after_merge
stringlengths
28
79.6k
before_merge
stringlengths
20
79.6k
url
stringlengths
38
71
full_traceback
stringlengths
43
922k
traceback_type
stringclasses
555 values
def set_length_validator(cls, v: "Optional[Set[T]]") -> "Optional[Set[T]]": if v is None: return None v = set_validator(v) v_len = len(v) if cls.min_items is not None and v_len < cls.min_items: raise errors.SetMinLengthError(limit_value=cls.min_items) if cls.max_items is not None and v_len > cls.max_items: raise errors.SetMaxLengthError(limit_value=cls.max_items) return v
def set_length_validator( cls, v: "Optional[Set[T]]", field: "ModelField" ) -> "Optional[Set[T]]": v = set_validator(v) v_len = len(v) if cls.min_items is not None and v_len < cls.min_items: raise errors.SetMinLengthError(limit_value=cls.min_items) if cls.max_items is not None and v_len > cls.max_items: raise errors.SetMaxLengthError(limit_value=cls.max_items) return v
https://github.com/samuelcolvin/pydantic/issues/2320
Info model set: network_segments=None Info model conset: network_segments=None Info settings set: network_segments=None Traceback (most recent call last): File "/home/mihai/bug.py", line 25, in <module> print(f"Info settings conset: {InfoSettingsConset()}") File "pydantic/env_settings.py", line 34, in pydantic.env_settings.BaseSettings.__init__ File "pydantic/main.py", line 362, in pydantic.main.BaseModel.__init__ pydantic.error_wrappers.ValidationError: 1 validation error for InfoSettingsConset network_segments value is not a valid set (type=type_error.set)
pydantic.error_wrappers.ValidationError
def __new__(mcs, name, bases, namespace, **kwargs): # noqa C901 fields: Dict[str, ModelField] = {} config = BaseConfig validators: "ValidatorListDict" = {} pre_root_validators, post_root_validators = [], [] private_attributes: Dict[str, ModelPrivateAttr] = {} slots: SetStr = namespace.get("__slots__", ()) slots = {slots} if isinstance(slots, str) else set(slots) class_vars: SetStr = set() for base in reversed(bases): if ( _is_base_model_class_defined and issubclass(base, BaseModel) and base != BaseModel ): fields.update(smart_deepcopy(base.__fields__)) config = inherit_config(base.__config__, config) validators = inherit_validators(base.__validators__, validators) pre_root_validators += base.__pre_root_validators__ post_root_validators += base.__post_root_validators__ private_attributes.update(base.__private_attributes__) class_vars.update(base.__class_vars__) config = inherit_config(namespace.get("Config"), config) validators = inherit_validators(extract_validators(namespace), validators) vg = ValidatorGroup(validators) for f in fields.values(): f.set_config(config) extra_validators = vg.get_validators(f.name) if extra_validators: f.class_validators.update(extra_validators) # re-run prepare to add extra validators f.populate_validators() prepare_config(config, name) untouched_types = ANNOTATED_FIELD_UNTOUCHED_TYPES def is_untouched(v: Any) -> bool: return ( isinstance(v, untouched_types) or v.__class__.__name__ == "cython_function_or_method" ) if (namespace.get("__module__"), namespace.get("__qualname__")) != ( "pydantic.main", "BaseModel", ): annotations = resolve_annotations( namespace.get("__annotations__", {}), namespace.get("__module__", None) ) # annotation only fields need to come first in fields for ann_name, ann_type in annotations.items(): if is_classvar(ann_type): class_vars.add(ann_name) elif is_valid_field(ann_name): validate_field_name(bases, ann_name) value = namespace.get(ann_name, Undefined) allowed_types = ( get_args(ann_type) if get_origin(ann_type) is Union else (ann_type,) ) if ( is_untouched(value) and ann_type != PyObject and not any( lenient_issubclass(get_origin(allowed_type), Type) for allowed_type in allowed_types ) ): continue fields[ann_name] = ModelField.infer( name=ann_name, value=value, annotation=ann_type, class_validators=vg.get_validators(ann_name), config=config, ) elif ann_name not in namespace and config.underscore_attrs_are_private: private_attributes[ann_name] = PrivateAttr() untouched_types = UNTOUCHED_TYPES + config.keep_untouched for var_name, value in namespace.items(): can_be_changed = var_name not in class_vars and not is_untouched(value) if isinstance(value, ModelPrivateAttr): if not is_valid_private_name(var_name): raise NameError( f'Private attributes "{var_name}" must not be a valid field name; ' f'Use sunder or dunder names, e. g. "_{var_name}" or "__{var_name}__"' ) private_attributes[var_name] = value elif ( config.underscore_attrs_are_private and is_valid_private_name(var_name) and can_be_changed ): private_attributes[var_name] = PrivateAttr(default=value) elif ( is_valid_field(var_name) and var_name not in annotations and can_be_changed ): validate_field_name(bases, var_name) inferred = ModelField.infer( name=var_name, value=value, annotation=annotations.get(var_name, Undefined), class_validators=vg.get_validators(var_name), config=config, ) if var_name in fields and inferred.type_ != fields[var_name].type_: raise TypeError( f"The type of {name}.{var_name} differs from the new default value; " f"if you wish to change the type of this field, please use a type annotation" ) fields[var_name] = inferred _custom_root_type = ROOT_KEY in fields if _custom_root_type: validate_custom_root_type(fields) vg.check_for_unused() if config.json_encoders: json_encoder = partial(custom_pydantic_encoder, config.json_encoders) else: json_encoder = pydantic_encoder pre_rv_new, post_rv_new = extract_root_validators(namespace) exclude_from_namespace = fields | private_attributes.keys() | {"__slots__"} new_namespace = { "__config__": config, "__fields__": fields, "__validators__": vg.validators, "__pre_root_validators__": unique_list(pre_root_validators + pre_rv_new), "__post_root_validators__": unique_list(post_root_validators + post_rv_new), "__schema_cache__": {}, "__json_encoder__": staticmethod(json_encoder), "__custom_root_type__": _custom_root_type, "__private_attributes__": private_attributes, "__slots__": slots | private_attributes.keys(), "__class_vars__": class_vars, **{n: v for n, v in namespace.items() if n not in exclude_from_namespace}, } cls = super().__new__(mcs, name, bases, new_namespace, **kwargs) # set __signature__ attr only for model class, but not for its instances cls.__signature__ = ClassAttribute( "__signature__", generate_model_signature(cls.__init__, fields, config) ) return cls
def __new__(mcs, name, bases, namespace, **kwargs): # noqa C901 fields: Dict[str, ModelField] = {} config = BaseConfig validators: "ValidatorListDict" = {} pre_root_validators, post_root_validators = [], [] private_attributes: Dict[str, ModelPrivateAttr] = {} slots: SetStr = namespace.get("__slots__", ()) slots = {slots} if isinstance(slots, str) else set(slots) class_vars: SetStr = set() for base in reversed(bases): if ( _is_base_model_class_defined and issubclass(base, BaseModel) and base != BaseModel ): fields.update(smart_deepcopy(base.__fields__)) config = inherit_config(base.__config__, config) validators = inherit_validators(base.__validators__, validators) pre_root_validators += base.__pre_root_validators__ post_root_validators += base.__post_root_validators__ private_attributes.update(base.__private_attributes__) class_vars.update(base.__class_vars__) config = inherit_config(namespace.get("Config"), config) validators = inherit_validators(extract_validators(namespace), validators) vg = ValidatorGroup(validators) for f in fields.values(): f.set_config(config) extra_validators = vg.get_validators(f.name) if extra_validators: f.class_validators.update(extra_validators) # re-run prepare to add extra validators f.populate_validators() prepare_config(config, name) if (namespace.get("__module__"), namespace.get("__qualname__")) != ( "pydantic.main", "BaseModel", ): annotations = resolve_annotations( namespace.get("__annotations__", {}), namespace.get("__module__", None) ) # annotation only fields need to come first in fields for ann_name, ann_type in annotations.items(): if is_classvar(ann_type): class_vars.add(ann_name) elif is_valid_field(ann_name): validate_field_name(bases, ann_name) value = namespace.get(ann_name, Undefined) allowed_types = ( get_args(ann_type) if get_origin(ann_type) is Union else (ann_type,) ) if ( isinstance(value, ANNOTATED_FIELD_UNTOUCHED_TYPES) and ann_type != PyObject and not any( lenient_issubclass(get_origin(allowed_type), Type) for allowed_type in allowed_types ) ): continue fields[ann_name] = ModelField.infer( name=ann_name, value=value, annotation=ann_type, class_validators=vg.get_validators(ann_name), config=config, ) elif ann_name not in namespace and config.underscore_attrs_are_private: private_attributes[ann_name] = PrivateAttr() untouched_types = UNTOUCHED_TYPES + config.keep_untouched for var_name, value in namespace.items(): can_be_changed = var_name not in class_vars and not isinstance( value, untouched_types ) if isinstance(value, ModelPrivateAttr): if not is_valid_private_name(var_name): raise NameError( f'Private attributes "{var_name}" must not be a valid field name; ' f'Use sunder or dunder names, e. g. "_{var_name}" or "__{var_name}__"' ) private_attributes[var_name] = value elif ( config.underscore_attrs_are_private and is_valid_private_name(var_name) and can_be_changed ): private_attributes[var_name] = PrivateAttr(default=value) elif ( is_valid_field(var_name) and var_name not in annotations and can_be_changed ): validate_field_name(bases, var_name) inferred = ModelField.infer( name=var_name, value=value, annotation=annotations.get(var_name, Undefined), class_validators=vg.get_validators(var_name), config=config, ) if var_name in fields and inferred.type_ != fields[var_name].type_: raise TypeError( f"The type of {name}.{var_name} differs from the new default value; " f"if you wish to change the type of this field, please use a type annotation" ) fields[var_name] = inferred _custom_root_type = ROOT_KEY in fields if _custom_root_type: validate_custom_root_type(fields) vg.check_for_unused() if config.json_encoders: json_encoder = partial(custom_pydantic_encoder, config.json_encoders) else: json_encoder = pydantic_encoder pre_rv_new, post_rv_new = extract_root_validators(namespace) exclude_from_namespace = fields | private_attributes.keys() | {"__slots__"} new_namespace = { "__config__": config, "__fields__": fields, "__validators__": vg.validators, "__pre_root_validators__": unique_list(pre_root_validators + pre_rv_new), "__post_root_validators__": unique_list(post_root_validators + post_rv_new), "__schema_cache__": {}, "__json_encoder__": staticmethod(json_encoder), "__custom_root_type__": _custom_root_type, "__private_attributes__": private_attributes, "__slots__": slots | private_attributes.keys(), "__class_vars__": class_vars, **{n: v for n, v in namespace.items() if n not in exclude_from_namespace}, } cls = super().__new__(mcs, name, bases, new_namespace, **kwargs) # set __signature__ attr only for model class, but not for its instances cls.__signature__ = ClassAttribute( "__signature__", generate_model_signature(cls.__init__, fields, config) ) return cls
https://github.com/samuelcolvin/pydantic/issues/1943
Traceback (most recent call last): File "<stdin>", line 1, in <module> File "main.py", line 5, in init main class MyModel(BaseModel): File "pydantic/main.py", line 285, in pydantic.main.ModelMetaclass.__new__ File "pydantic/fields.py", line 309, in pydantic.fields.ModelField.infer File "pydantic/fields.py", line 271, in pydantic.fields.ModelField.__init__ File "pydantic/fields.py", line 351, in pydantic.fields.ModelField.prepare File "pydantic/fields.py", line 529, in pydantic.fields.ModelField.populate_validators File "pydantic/validators.py", line 593, in find_validators RuntimeError: no validator found for <class 'cython_function_or_method'>, see `arbitrary_types_allowed` in Config
RuntimeError
def _type_analysis(self) -> None: # noqa: C901 (ignore complexity) # typing interface is horrible, we have to do some ugly checks if lenient_issubclass(self.type_, JsonWrapper): self.type_ = self.type_.inner_type self.parse_json = True elif lenient_issubclass(self.type_, Json): self.type_ = Any self.parse_json = True elif isinstance(self.type_, TypeVar): if self.type_.__bound__: self.type_ = self.type_.__bound__ elif self.type_.__constraints__: self.type_ = Union[self.type_.__constraints__] else: self.type_ = Any elif is_new_type(self.type_): self.type_ = new_type_supertype(self.type_) if self.type_ is Any: if self.required is Undefined: self.required = False self.allow_none = True return elif self.type_ is Pattern: # python 3.7 only, Pattern is a typing object but without sub fields return elif is_literal_type(self.type_): return elif is_typeddict(self.type_): return origin = get_origin(self.type_) if origin is None: # field is not "typing" object eg. Union, Dict, List etc. # allow None for virtual superclasses of NoneType, e.g. Hashable if isinstance(self.type_, type) and isinstance(None, self.type_): self.allow_none = True return if origin is Callable: return if origin is Union: types_ = [] for type_ in get_args(self.type_): if type_ is NoneType: if self.required is Undefined: self.required = False self.allow_none = True continue types_.append(type_) if len(types_) == 1: # Optional[] self.type_ = types_[0] # this is the one case where the "outer type" isn't just the original type self.outer_type_ = self.type_ # re-run to correctly interpret the new self.type_ self._type_analysis() else: self.sub_fields = [ self._create_sub_type(t, f"{self.name}_{display_as_type(t)}") for t in types_ ] return if issubclass(origin, Tuple): # type: ignore # origin == Tuple without item type args = get_args(self.type_) if not args: # plain tuple self.type_ = Any self.shape = SHAPE_TUPLE_ELLIPSIS elif len(args) == 2 and args[1] is Ellipsis: # e.g. Tuple[int, ...] self.type_ = args[0] self.shape = SHAPE_TUPLE_ELLIPSIS elif args == ((),): # Tuple[()] means empty tuple self.shape = SHAPE_TUPLE self.type_ = Any self.sub_fields = [] else: self.shape = SHAPE_TUPLE self.sub_fields = [ self._create_sub_type(t, f"{self.name}_{i}") for i, t in enumerate(args) ] return if issubclass(origin, List): # Create self validators get_validators = getattr(self.type_, "__get_validators__", None) if get_validators: self.class_validators.update( { f"list_{i}": Validator(validator, pre=True) for i, validator in enumerate(get_validators()) } ) self.type_ = get_args(self.type_)[0] self.shape = SHAPE_LIST elif issubclass(origin, Set): # Create self validators get_validators = getattr(self.type_, "__get_validators__", None) if get_validators: self.class_validators.update( { f"set_{i}": Validator(validator, pre=True) for i, validator in enumerate(get_validators()) } ) self.type_ = get_args(self.type_)[0] self.shape = SHAPE_SET elif issubclass(origin, FrozenSet): self.type_ = get_args(self.type_)[0] self.shape = SHAPE_FROZENSET elif issubclass(origin, Deque): self.type_ = get_args(self.type_)[0] self.shape = SHAPE_DEQUE elif issubclass(origin, Sequence): self.type_ = get_args(self.type_)[0] self.shape = SHAPE_SEQUENCE elif issubclass(origin, Mapping): self.key_field = self._create_sub_type( get_args(self.type_)[0], "key_" + self.name, for_keys=True ) self.type_ = get_args(self.type_)[1] self.shape = SHAPE_MAPPING # Equality check as almost everything inherits form Iterable, including str # check for Iterable and CollectionsIterable, as it could receive one even when declared with the other elif origin in {Iterable, CollectionsIterable}: self.type_ = get_args(self.type_)[0] self.shape = SHAPE_ITERABLE self.sub_fields = [self._create_sub_type(self.type_, f"{self.name}_type")] elif issubclass(origin, Type): # type: ignore return elif ( hasattr(origin, "__get_validators__") or self.model_config.arbitrary_types_allowed ): # Is a Pydantic-compatible generic that handles itself # or we have arbitrary_types_allowed = True self.shape = SHAPE_GENERIC self.sub_fields = [ self._create_sub_type(t, f"{self.name}_{i}") for i, t in enumerate(get_args(self.type_)) ] self.type_ = origin return else: raise TypeError(f'Fields of type "{origin}" are not supported.') # type_ has been refined eg. as the type of a List and sub_fields needs to be populated self.sub_fields = [self._create_sub_type(self.type_, "_" + self.name)]
def _type_analysis(self) -> None: # noqa: C901 (ignore complexity) # typing interface is horrible, we have to do some ugly checks if lenient_issubclass(self.type_, JsonWrapper): self.type_ = self.type_.inner_type self.parse_json = True elif lenient_issubclass(self.type_, Json): self.type_ = Any self.parse_json = True elif isinstance(self.type_, TypeVar): if self.type_.__bound__: self.type_ = self.type_.__bound__ elif self.type_.__constraints__: self.type_ = Union[self.type_.__constraints__] else: self.type_ = Any elif is_new_type(self.type_): self.type_ = new_type_supertype(self.type_) if self.type_ is Any: if self.required is Undefined: self.required = False self.allow_none = True return elif self.type_ is Pattern: # python 3.7 only, Pattern is a typing object but without sub fields return elif is_literal_type(self.type_): return elif is_typeddict(self.type_): return origin = get_origin(self.type_) if origin is None: # field is not "typing" object eg. Union, Dict, List etc. # allow None for virtual superclasses of NoneType, e.g. Hashable if isinstance(self.type_, type) and isinstance(None, self.type_): self.allow_none = True return if origin is Callable: return if origin is Union: types_ = [] for type_ in get_args(self.type_): if type_ is NoneType: if self.required is Undefined: self.required = False self.allow_none = True continue types_.append(type_) if len(types_) == 1: # Optional[] self.type_ = types_[0] # this is the one case where the "outer type" isn't just the original type self.outer_type_ = self.type_ # re-run to correctly interpret the new self.type_ self._type_analysis() else: self.sub_fields = [ self._create_sub_type(t, f"{self.name}_{display_as_type(t)}") for t in types_ ] return if issubclass(origin, Tuple): # type: ignore # origin == Tuple without item type if not get_args(self.type_): self.type_ = Any self.shape = SHAPE_TUPLE_ELLIPSIS else: self.shape = SHAPE_TUPLE self.sub_fields = [] for i, t in enumerate(get_args(self.type_)): if t is Ellipsis: self.type_ = get_args(self.type_)[0] self.shape = SHAPE_TUPLE_ELLIPSIS return self.sub_fields.append(self._create_sub_type(t, f"{self.name}_{i}")) return if issubclass(origin, List): # Create self validators get_validators = getattr(self.type_, "__get_validators__", None) if get_validators: self.class_validators.update( { f"list_{i}": Validator(validator, pre=True) for i, validator in enumerate(get_validators()) } ) self.type_ = get_args(self.type_)[0] self.shape = SHAPE_LIST elif issubclass(origin, Set): # Create self validators get_validators = getattr(self.type_, "__get_validators__", None) if get_validators: self.class_validators.update( { f"set_{i}": Validator(validator, pre=True) for i, validator in enumerate(get_validators()) } ) self.type_ = get_args(self.type_)[0] self.shape = SHAPE_SET elif issubclass(origin, FrozenSet): self.type_ = get_args(self.type_)[0] self.shape = SHAPE_FROZENSET elif issubclass(origin, Deque): self.type_ = get_args(self.type_)[0] self.shape = SHAPE_DEQUE elif issubclass(origin, Sequence): self.type_ = get_args(self.type_)[0] self.shape = SHAPE_SEQUENCE elif issubclass(origin, Mapping): self.key_field = self._create_sub_type( get_args(self.type_)[0], "key_" + self.name, for_keys=True ) self.type_ = get_args(self.type_)[1] self.shape = SHAPE_MAPPING # Equality check as almost everything inherits form Iterable, including str # check for Iterable and CollectionsIterable, as it could receive one even when declared with the other elif origin in {Iterable, CollectionsIterable}: self.type_ = get_args(self.type_)[0] self.shape = SHAPE_ITERABLE self.sub_fields = [self._create_sub_type(self.type_, f"{self.name}_type")] elif issubclass(origin, Type): # type: ignore return elif ( hasattr(origin, "__get_validators__") or self.model_config.arbitrary_types_allowed ): # Is a Pydantic-compatible generic that handles itself # or we have arbitrary_types_allowed = True self.shape = SHAPE_GENERIC self.sub_fields = [ self._create_sub_type(t, f"{self.name}_{i}") for i, t in enumerate(get_args(self.type_)) ] self.type_ = origin return else: raise TypeError(f'Fields of type "{origin}" are not supported.') # type_ has been refined eg. as the type of a List and sub_fields needs to be populated self.sub_fields = [self._create_sub_type(self.type_, "_" + self.name)]
https://github.com/samuelcolvin/pydantic/issues/2318
Traceback (most recent call last): File "pydantic/validators.py", line 615, in pydantic.validators.find_validators TypeError: issubclass() arg 1 must be a class During handling of the above exception, another exception occurred: Traceback (most recent call last): File "<stdin>", line 1, in <module> File "...", line 12, in <module> class Example(BaseModel): File "pydantic/main.py", line 262, in pydantic.main.ModelMetaclass.__new__ File "pydantic/fields.py", line 315, in pydantic.fields.ModelField.infer File "pydantic/fields.py", line 284, in pydantic.fields.ModelField.__init__ File "pydantic/fields.py", line 356, in pydantic.fields.ModelField.prepare File "pydantic/fields.py", line 458, in pydantic.fields.ModelField._type_analysis File "pydantic/fields.py", line 516, in pydantic.fields.ModelField._create_sub_type File "pydantic/fields.py", line 284, in pydantic.fields.ModelField.__init__ File "pydantic/fields.py", line 362, in pydantic.fields.ModelField.prepare File "pydantic/fields.py", line 538, in pydantic.fields.ModelField.populate_validators File "pydantic/validators.py", line 624, in find_validators RuntimeError: error checking inheritance of () (type: tuple)
TypeError
def __init__(self, function: "AnyCallableT", config: "ConfigType"): # noqa C901 from inspect import Parameter, signature parameters: Mapping[str, Parameter] = signature(function).parameters if parameters.keys() & { ALT_V_ARGS, ALT_V_KWARGS, V_POSITIONAL_ONLY_NAME, V_DUPLICATE_KWARGS, }: raise ConfigError( f'"{ALT_V_ARGS}", "{ALT_V_KWARGS}", "{V_POSITIONAL_ONLY_NAME}" and "{V_DUPLICATE_KWARGS}" ' f'are not permitted as argument names when using the "{validate_arguments.__name__}" decorator' ) self.raw_function = function self.arg_mapping: Dict[int, str] = {} self.positional_only_args = set() self.v_args_name = "args" self.v_kwargs_name = "kwargs" type_hints = get_type_hints(function) takes_args = False takes_kwargs = False fields: Dict[str, Tuple[Any, Any]] = {} for i, (name, p) in enumerate(parameters.items()): if p.annotation == p.empty: annotation = Any else: annotation = type_hints[name] default = ... if p.default == p.empty else p.default if p.kind == Parameter.POSITIONAL_ONLY: self.arg_mapping[i] = name fields[name] = annotation, default fields[V_POSITIONAL_ONLY_NAME] = List[str], None self.positional_only_args.add(name) elif p.kind == Parameter.POSITIONAL_OR_KEYWORD: self.arg_mapping[i] = name fields[name] = annotation, default fields[V_DUPLICATE_KWARGS] = List[str], None elif p.kind == Parameter.KEYWORD_ONLY: fields[name] = annotation, default elif p.kind == Parameter.VAR_POSITIONAL: self.v_args_name = name fields[name] = Tuple[annotation, ...], None takes_args = True else: assert p.kind == Parameter.VAR_KEYWORD, p.kind self.v_kwargs_name = name fields[name] = Dict[str, annotation], None # type: ignore takes_kwargs = True # these checks avoid a clash between "args" and a field with that name if not takes_args and self.v_args_name in fields: self.v_args_name = ALT_V_ARGS # same with "kwargs" if not takes_kwargs and self.v_kwargs_name in fields: self.v_kwargs_name = ALT_V_KWARGS if not takes_args: # we add the field so validation below can raise the correct exception fields[self.v_args_name] = List[Any], None if not takes_kwargs: # same with kwargs fields[self.v_kwargs_name] = Dict[Any, Any], None self.create_model(fields, takes_args, takes_kwargs, config)
def __init__(self, function: "AnyCallableT", config: "ConfigType"): # noqa C901 from inspect import Parameter, signature parameters: Mapping[str, Parameter] = signature(function).parameters if parameters.keys() & {ALT_V_ARGS, ALT_V_KWARGS, V_POSITIONAL_ONLY_NAME}: raise ConfigError( f'"{ALT_V_ARGS}", "{ALT_V_KWARGS}" and "{V_POSITIONAL_ONLY_NAME}" are not permitted as argument ' f'names when using the "{validate_arguments.__name__}" decorator' ) self.raw_function = function self.arg_mapping: Dict[int, str] = {} self.positional_only_args = set() self.v_args_name = "args" self.v_kwargs_name = "kwargs" type_hints = get_type_hints(function) takes_args = False takes_kwargs = False fields: Dict[str, Tuple[Any, Any]] = {} for i, (name, p) in enumerate(parameters.items()): if p.annotation == p.empty: annotation = Any else: annotation = type_hints[name] default = ... if p.default == p.empty else p.default if p.kind == Parameter.POSITIONAL_ONLY: self.arg_mapping[i] = name fields[name] = annotation, default fields[V_POSITIONAL_ONLY_NAME] = List[str], None self.positional_only_args.add(name) elif p.kind == Parameter.POSITIONAL_OR_KEYWORD: self.arg_mapping[i] = name fields[name] = annotation, default elif p.kind == Parameter.KEYWORD_ONLY: fields[name] = annotation, default elif p.kind == Parameter.VAR_POSITIONAL: self.v_args_name = name fields[name] = Tuple[annotation, ...], None takes_args = True else: assert p.kind == Parameter.VAR_KEYWORD, p.kind self.v_kwargs_name = name fields[name] = Dict[str, annotation], None # type: ignore takes_kwargs = True # these checks avoid a clash between "args" and a field with that name if not takes_args and self.v_args_name in fields: self.v_args_name = ALT_V_ARGS # same with "kwargs" if not takes_kwargs and self.v_kwargs_name in fields: self.v_kwargs_name = ALT_V_KWARGS if not takes_args: # we add the field so validation below can raise the correct exception fields[self.v_args_name] = List[Any], None if not takes_kwargs: # same with kwargs fields[self.v_kwargs_name] = Dict[Any, Any], None self.create_model(fields, takes_args, takes_kwargs, config)
https://github.com/samuelcolvin/pydantic/issues/2249
from pydantic import validate_arguments @validate_arguments def foo(x: int): print(x) def bar(x: int): print(x) foo(1) 1 foo(1, x=2) 2 bar(1) 1 bar(1, x=2) Traceback (most recent call last): File "<stdin>", line 1, in <module> TypeError: bar() got multiple values for argument 'x'
TypeError
def build_values(self, args: Tuple[Any, ...], kwargs: Dict[str, Any]) -> Dict[str, Any]: values: Dict[str, Any] = {} if args: arg_iter = enumerate(args) while True: try: i, a = next(arg_iter) except StopIteration: break arg_name = self.arg_mapping.get(i) if arg_name is not None: values[arg_name] = a else: values[self.v_args_name] = [a] + [a for _, a in arg_iter] break var_kwargs = {} wrong_positional_args = [] duplicate_kwargs = [] non_var_fields = set(self.model.__fields__) - {self.v_args_name, self.v_kwargs_name} for k, v in kwargs.items(): if k in non_var_fields: if k in self.positional_only_args: wrong_positional_args.append(k) if k in values: duplicate_kwargs.append(k) values[k] = v else: var_kwargs[k] = v if var_kwargs: values[self.v_kwargs_name] = var_kwargs if wrong_positional_args: values[V_POSITIONAL_ONLY_NAME] = wrong_positional_args if duplicate_kwargs: values[V_DUPLICATE_KWARGS] = duplicate_kwargs return values
def build_values(self, args: Tuple[Any, ...], kwargs: Dict[str, Any]) -> Dict[str, Any]: values: Dict[str, Any] = {} if args: arg_iter = enumerate(args) while True: try: i, a = next(arg_iter) except StopIteration: break arg_name = self.arg_mapping.get(i) if arg_name is not None: values[arg_name] = a else: values[self.v_args_name] = [a] + [a for _, a in arg_iter] break var_kwargs = {} wrong_positional_args = [] non_var_fields = set(self.model.__fields__) - {self.v_args_name, self.v_kwargs_name} for k, v in kwargs.items(): if k in non_var_fields: if k in self.positional_only_args: wrong_positional_args.append(k) values[k] = v else: var_kwargs[k] = v if var_kwargs: values[self.v_kwargs_name] = var_kwargs if wrong_positional_args: values[V_POSITIONAL_ONLY_NAME] = wrong_positional_args return values
https://github.com/samuelcolvin/pydantic/issues/2249
from pydantic import validate_arguments @validate_arguments def foo(x: int): print(x) def bar(x: int): print(x) foo(1) 1 foo(1, x=2) 2 bar(1) 1 bar(1, x=2) Traceback (most recent call last): File "<stdin>", line 1, in <module> TypeError: bar() got multiple values for argument 'x'
TypeError
def create_model( self, fields: Dict[str, Any], takes_args: bool, takes_kwargs: bool, config: "ConfigType", ) -> None: pos_args = len(self.arg_mapping) class CustomConfig: pass if not TYPE_CHECKING: # pragma: no branch if isinstance(config, dict): CustomConfig = type("Config", (), config) # noqa: F811 elif config is not None: CustomConfig = config # noqa: F811 if hasattr(CustomConfig, "fields") or hasattr(CustomConfig, "alias_generator"): raise ConfigError( 'Setting the "fields" and "alias_generator" property on custom Config for ' "@validate_arguments is not yet supported, please remove." ) class DecoratorBaseModel(BaseModel): @validator(self.v_args_name, check_fields=False, allow_reuse=True) def check_args(cls, v: List[Any]) -> List[Any]: if takes_args: return v raise TypeError( f"{pos_args} positional arguments expected but {pos_args + len(v)} given" ) @validator(self.v_kwargs_name, check_fields=False, allow_reuse=True) def check_kwargs(cls, v: Dict[str, Any]) -> Dict[str, Any]: if takes_kwargs: return v plural = "" if len(v) == 1 else "s" keys = ", ".join(map(repr, v.keys())) raise TypeError(f"unexpected keyword argument{plural}: {keys}") @validator(V_POSITIONAL_ONLY_NAME, check_fields=False, allow_reuse=True) def check_positional_only(cls, v: List[str]) -> None: plural = "" if len(v) == 1 else "s" keys = ", ".join(map(repr, v)) raise TypeError( f"positional-only argument{plural} passed as keyword argument{plural}: {keys}" ) @validator(V_DUPLICATE_KWARGS, check_fields=False, allow_reuse=True) def check_duplicate_kwargs(cls, v: List[str]) -> None: plural = "" if len(v) == 1 else "s" keys = ", ".join(map(repr, v)) raise TypeError(f"multiple values for argument{plural}: {keys}") class Config(CustomConfig): extra = Extra.forbid self.model = create_model( to_camel(self.raw_function.__name__), __base__=DecoratorBaseModel, **fields )
def create_model( self, fields: Dict[str, Any], takes_args: bool, takes_kwargs: bool, config: "ConfigType", ) -> None: pos_args = len(self.arg_mapping) class CustomConfig: pass if not TYPE_CHECKING: # pragma: no branch if isinstance(config, dict): CustomConfig = type("Config", (), config) # noqa: F811 elif config is not None: CustomConfig = config # noqa: F811 if hasattr(CustomConfig, "fields") or hasattr(CustomConfig, "alias_generator"): raise ConfigError( 'Setting the "fields" and "alias_generator" property on custom Config for ' "@validate_arguments is not yet supported, please remove." ) class DecoratorBaseModel(BaseModel): @validator(self.v_args_name, check_fields=False, allow_reuse=True) def check_args(cls, v: List[Any]) -> List[Any]: if takes_args: return v raise TypeError( f"{pos_args} positional arguments expected but {pos_args + len(v)} given" ) @validator(self.v_kwargs_name, check_fields=False, allow_reuse=True) def check_kwargs(cls, v: Dict[str, Any]) -> Dict[str, Any]: if takes_kwargs: return v plural = "" if len(v) == 1 else "s" keys = ", ".join(map(repr, v.keys())) raise TypeError(f"unexpected keyword argument{plural}: {keys}") @validator(V_POSITIONAL_ONLY_NAME, check_fields=False, allow_reuse=True) def check_positional_only(cls, v: List[str]) -> None: plural = "" if len(v) == 1 else "s" keys = ", ".join(map(repr, v)) raise TypeError( f"positional-only argument{plural} passed as keyword argument{plural}: {keys}" ) class Config(CustomConfig): extra = Extra.forbid self.model = create_model( to_camel(self.raw_function.__name__), __base__=DecoratorBaseModel, **fields )
https://github.com/samuelcolvin/pydantic/issues/2249
from pydantic import validate_arguments @validate_arguments def foo(x: int): print(x) def bar(x: int): print(x) foo(1) 1 foo(1, x=2) 2 bar(1) 1 bar(1, x=2) Traceback (most recent call last): File "<stdin>", line 1, in <module> TypeError: bar() got multiple values for argument 'x'
TypeError
def __new__(mcs, name, bases, namespace, **kwargs): # noqa C901 fields: Dict[str, ModelField] = {} config = BaseConfig validators: "ValidatorListDict" = {} pre_root_validators, post_root_validators = [], [] private_attributes: Dict[str, ModelPrivateAttr] = {} slots: Set[str] = namespace.get("__slots__", ()) slots = {slots} if isinstance(slots, str) else set(slots) for base in reversed(bases): if ( _is_base_model_class_defined and issubclass(base, BaseModel) and base != BaseModel ): fields.update(smart_deepcopy(base.__fields__)) config = inherit_config(base.__config__, config) validators = inherit_validators(base.__validators__, validators) pre_root_validators += base.__pre_root_validators__ post_root_validators += base.__post_root_validators__ private_attributes.update(base.__private_attributes__) config = inherit_config(namespace.get("Config"), config) validators = inherit_validators(extract_validators(namespace), validators) vg = ValidatorGroup(validators) for f in fields.values(): f.set_config(config) extra_validators = vg.get_validators(f.name) if extra_validators: f.class_validators.update(extra_validators) # re-run prepare to add extra validators f.populate_validators() prepare_config(config, name) class_vars = set() if (namespace.get("__module__"), namespace.get("__qualname__")) != ( "pydantic.main", "BaseModel", ): annotations = resolve_annotations( namespace.get("__annotations__", {}), namespace.get("__module__", None) ) # annotation only fields need to come first in fields for ann_name, ann_type in annotations.items(): if is_classvar(ann_type): class_vars.add(ann_name) elif is_valid_field(ann_name): validate_field_name(bases, ann_name) value = namespace.get(ann_name, Undefined) allowed_types = ( get_args(ann_type) if get_origin(ann_type) is Union else (ann_type,) ) if ( isinstance(value, ANNOTATED_FIELD_UNTOUCHED_TYPES) and ann_type != PyObject and not any( lenient_issubclass(get_origin(allowed_type), Type) for allowed_type in allowed_types ) ): continue fields[ann_name] = ModelField.infer( name=ann_name, value=value, annotation=ann_type, class_validators=vg.get_validators(ann_name), config=config, ) elif ann_name not in namespace and config.underscore_attrs_are_private: private_attributes[ann_name] = PrivateAttr() untouched_types = UNTOUCHED_TYPES + config.keep_untouched for var_name, value in namespace.items(): can_be_changed = var_name not in class_vars and not isinstance( value, untouched_types ) if isinstance(value, ModelPrivateAttr): if not is_valid_private_name(var_name): raise NameError( f'Private attributes "{var_name}" must not be a valid field name; ' f'Use sunder or dunder names, e. g. "_{var_name}" or "__{var_name}__"' ) private_attributes[var_name] = value elif ( config.underscore_attrs_are_private and is_valid_private_name(var_name) and can_be_changed ): private_attributes[var_name] = PrivateAttr(default=value) elif ( is_valid_field(var_name) and var_name not in annotations and can_be_changed ): validate_field_name(bases, var_name) inferred = ModelField.infer( name=var_name, value=value, annotation=annotations.get(var_name, Undefined), class_validators=vg.get_validators(var_name), config=config, ) if var_name in fields and inferred.type_ != fields[var_name].type_: raise TypeError( f"The type of {name}.{var_name} differs from the new default value; " f"if you wish to change the type of this field, please use a type annotation" ) fields[var_name] = inferred _custom_root_type = ROOT_KEY in fields if _custom_root_type: validate_custom_root_type(fields) vg.check_for_unused() if config.json_encoders: json_encoder = partial(custom_pydantic_encoder, config.json_encoders) else: json_encoder = pydantic_encoder pre_rv_new, post_rv_new = extract_root_validators(namespace) exclude_from_namespace = fields | private_attributes.keys() | {"__slots__"} new_namespace = { "__config__": config, "__fields__": fields, "__validators__": vg.validators, "__pre_root_validators__": unique_list(pre_root_validators + pre_rv_new), "__post_root_validators__": unique_list(post_root_validators + post_rv_new), "__schema_cache__": {}, "__json_encoder__": staticmethod(json_encoder), "__custom_root_type__": _custom_root_type, "__private_attributes__": private_attributes, "__slots__": slots | private_attributes.keys(), **{n: v for n, v in namespace.items() if n not in exclude_from_namespace}, } cls = super().__new__(mcs, name, bases, new_namespace, **kwargs) # set __signature__ attr only for model class, but not for its instances cls.__signature__ = ClassAttribute( "__signature__", generate_model_signature(cls.__init__, fields, config) ) return cls
def __new__(mcs, name, bases, namespace, **kwargs): # noqa C901 fields: Dict[str, ModelField] = {} config = BaseConfig validators: "ValidatorListDict" = {} pre_root_validators, post_root_validators = [], [] private_attributes: Dict[str, ModelPrivateAttr] = {} slots: Set[str] = namespace.get("__slots__", ()) slots = {slots} if isinstance(slots, str) else set(slots) for base in reversed(bases): if ( _is_base_model_class_defined and issubclass(base, BaseModel) and base != BaseModel ): fields.update(smart_deepcopy(base.__fields__)) config = inherit_config(base.__config__, config) validators = inherit_validators(base.__validators__, validators) pre_root_validators += base.__pre_root_validators__ post_root_validators += base.__post_root_validators__ private_attributes.update(base.__private_attributes__) config = inherit_config(namespace.get("Config"), config) validators = inherit_validators(extract_validators(namespace), validators) vg = ValidatorGroup(validators) for f in fields.values(): f.set_config(config) extra_validators = vg.get_validators(f.name) if extra_validators: f.class_validators.update(extra_validators) # re-run prepare to add extra validators f.populate_validators() prepare_config(config, name) class_vars = set() if (namespace.get("__module__"), namespace.get("__qualname__")) != ( "pydantic.main", "BaseModel", ): annotations = resolve_annotations( namespace.get("__annotations__", {}), namespace.get("__module__", None) ) untouched_types = UNTOUCHED_TYPES + config.keep_untouched # annotation only fields need to come first in fields for ann_name, ann_type in annotations.items(): if is_classvar(ann_type): class_vars.add(ann_name) elif is_valid_field(ann_name): validate_field_name(bases, ann_name) value = namespace.get(ann_name, Undefined) allowed_types = ( get_args(ann_type) if get_origin(ann_type) is Union else (ann_type,) ) if ( isinstance(value, untouched_types) and ann_type != PyObject and not any( lenient_issubclass(get_origin(allowed_type), Type) for allowed_type in allowed_types ) ): continue fields[ann_name] = inferred = ModelField.infer( name=ann_name, value=value, annotation=ann_type, class_validators=vg.get_validators(ann_name), config=config, ) elif ann_name not in namespace and config.underscore_attrs_are_private: private_attributes[ann_name] = PrivateAttr() for var_name, value in namespace.items(): can_be_changed = var_name not in class_vars and not isinstance( value, untouched_types ) if isinstance(value, ModelPrivateAttr): if not is_valid_private_name(var_name): raise NameError( f'Private attributes "{var_name}" must not be a valid field name; ' f'Use sunder or dunder names, e. g. "_{var_name}" or "__{var_name}__"' ) private_attributes[var_name] = value elif ( config.underscore_attrs_are_private and is_valid_private_name(var_name) and can_be_changed ): private_attributes[var_name] = PrivateAttr(default=value) elif ( is_valid_field(var_name) and var_name not in annotations and can_be_changed ): validate_field_name(bases, var_name) inferred = ModelField.infer( name=var_name, value=value, annotation=annotations.get(var_name, Undefined), class_validators=vg.get_validators(var_name), config=config, ) if var_name in fields and inferred.type_ != fields[var_name].type_: raise TypeError( f"The type of {name}.{var_name} differs from the new default value; " f"if you wish to change the type of this field, please use a type annotation" ) fields[var_name] = inferred _custom_root_type = ROOT_KEY in fields if _custom_root_type: validate_custom_root_type(fields) vg.check_for_unused() if config.json_encoders: json_encoder = partial(custom_pydantic_encoder, config.json_encoders) else: json_encoder = pydantic_encoder pre_rv_new, post_rv_new = extract_root_validators(namespace) exclude_from_namespace = fields | private_attributes.keys() | {"__slots__"} new_namespace = { "__config__": config, "__fields__": fields, "__validators__": vg.validators, "__pre_root_validators__": unique_list(pre_root_validators + pre_rv_new), "__post_root_validators__": unique_list(post_root_validators + post_rv_new), "__schema_cache__": {}, "__json_encoder__": staticmethod(json_encoder), "__custom_root_type__": _custom_root_type, "__private_attributes__": private_attributes, "__slots__": slots | private_attributes.keys(), **{n: v for n, v in namespace.items() if n not in exclude_from_namespace}, } cls = super().__new__(mcs, name, bases, new_namespace, **kwargs) # set __signature__ attr only for model class, but not for its instances cls.__signature__ = ClassAttribute( "__signature__", generate_model_signature(cls.__init__, fields, config) ) return cls
https://github.com/samuelcolvin/pydantic/issues/2086
from pydantic import BaseModel, Field from typing import Callable class Foo(BaseModel): ... # without default value, getting user warning ... callback: Callable[[], dict] ... Foo.schema() __main__:1: UserWarning: Callable callback was excluded from schema since JSON schema has no equivalent type. {'title': 'Foo', 'type': 'object', 'properties': {}} class FooDefault(BaseModel): ... callback: Callable[[], dict] = Field(default=lambda: {'some': 'data'}) FooDefault.schema() Traceback (most recent call last): File "<stdin>", line 1, in <module> File "pydantic/main.py", line 637, in pydantic.main.BaseModel.schema File "pydantic/schema.py", line 153, in pydantic.schema.model_schema File "pydantic/schema.py", line 523, in pydantic.schema.model_process_schema File "pydantic/schema.py", line 564, in pydantic.schema.model_type_schema File "pydantic/schema.py", line 215, in pydantic.schema.field_schema File "pydantic/schema.py", line 183, in pydantic.schema.get_field_info_schema File "pydantic/schema.py", line 846, in pydantic.schema.encode_default File "pydantic/json.py", line 65, in pydantic.json.pydantic_encoder TypeError: Object of type 'function' is not JSON serializable
TypeError
def get_field_info_schema(field: ModelField) -> Tuple[Dict[str, Any], bool]: schema_overrides = False # If no title is explicitly set, we don't set title in the schema for enums. # The behaviour is the same as `BaseModel` reference, where the default title # is in the definitions part of the schema. schema: Dict[str, Any] = {} if field.field_info.title or not lenient_issubclass(field.type_, Enum): schema["title"] = field.field_info.title or field.alias.title().replace( "_", " " ) if field.field_info.title: schema_overrides = True if field.field_info.description: schema["description"] = field.field_info.description schema_overrides = True if ( not field.required and not field.field_info.const and field.default is not None and not is_callable_type(field.outer_type_) ): schema["default"] = encode_default(field.default) schema_overrides = True return schema, schema_overrides
def get_field_info_schema(field: ModelField) -> Tuple[Dict[str, Any], bool]: schema_overrides = False # If no title is explicitly set, we don't set title in the schema for enums. # The behaviour is the same as `BaseModel` reference, where the default title # is in the definitions part of the schema. schema: Dict[str, Any] = {} if field.field_info.title or not lenient_issubclass(field.type_, Enum): schema["title"] = field.field_info.title or field.alias.title().replace( "_", " " ) if field.field_info.title: schema_overrides = True if field.field_info.description: schema["description"] = field.field_info.description schema_overrides = True if not field.required and not field.field_info.const and field.default is not None: schema["default"] = encode_default(field.default) schema_overrides = True return schema, schema_overrides
https://github.com/samuelcolvin/pydantic/issues/2086
from pydantic import BaseModel, Field from typing import Callable class Foo(BaseModel): ... # without default value, getting user warning ... callback: Callable[[], dict] ... Foo.schema() __main__:1: UserWarning: Callable callback was excluded from schema since JSON schema has no equivalent type. {'title': 'Foo', 'type': 'object', 'properties': {}} class FooDefault(BaseModel): ... callback: Callable[[], dict] = Field(default=lambda: {'some': 'data'}) FooDefault.schema() Traceback (most recent call last): File "<stdin>", line 1, in <module> File "pydantic/main.py", line 637, in pydantic.main.BaseModel.schema File "pydantic/schema.py", line 153, in pydantic.schema.model_schema File "pydantic/schema.py", line 523, in pydantic.schema.model_process_schema File "pydantic/schema.py", line 564, in pydantic.schema.model_type_schema File "pydantic/schema.py", line 215, in pydantic.schema.field_schema File "pydantic/schema.py", line 183, in pydantic.schema.get_field_info_schema File "pydantic/schema.py", line 846, in pydantic.schema.encode_default File "pydantic/json.py", line 65, in pydantic.json.pydantic_encoder TypeError: Object of type 'function' is not JSON serializable
TypeError
def _process_class( _cls: Type[Any], init: bool, repr: bool, eq: bool, order: bool, unsafe_hash: bool, frozen: bool, config: Optional[Type[Any]], ) -> Type["Dataclass"]: import dataclasses post_init_original = getattr(_cls, "__post_init__", None) if post_init_original and post_init_original.__name__ == "_pydantic_post_init": post_init_original = None if not post_init_original: post_init_original = getattr(_cls, "__post_init_original__", None) post_init_post_parse = getattr(_cls, "__post_init_post_parse__", None) def _pydantic_post_init(self: "Dataclass", *initvars: Any) -> None: if post_init_original is not None: post_init_original(self, *initvars) d, _, validation_error = validate_model( self.__pydantic_model__, self.__dict__, cls=self.__class__ ) if validation_error: raise validation_error object.__setattr__(self, "__dict__", d) object.__setattr__(self, "__initialised__", True) if post_init_post_parse is not None: post_init_post_parse(self, *initvars) # If the class is already a dataclass, __post_init__ will not be called automatically # so no validation will be added. # We hence create dynamically a new dataclass: # ``` # @dataclasses.dataclass # class NewClass(_cls): # __post_init__ = _pydantic_post_init # ``` # with the exact same fields as the base dataclass # and register it on module level to address pickle problem: # https://github.com/samuelcolvin/pydantic/issues/2111 if is_builtin_dataclass(_cls): uniq_class_name = f"_Pydantic_{_cls.__name__}_{id(_cls)}" _cls = type( # for pretty output new class will have the name as original _cls.__name__, (_cls,), { "__annotations__": _cls.__annotations__, "__post_init__": _pydantic_post_init, # attrs for pickle to find this class "__module__": __name__, "__qualname__": uniq_class_name, }, ) globals()[uniq_class_name] = _cls else: _cls.__post_init__ = _pydantic_post_init cls: Type["Dataclass"] = dataclasses.dataclass( # type: ignore _cls, init=init, repr=repr, eq=eq, order=order, unsafe_hash=unsafe_hash, frozen=frozen, ) cls.__processed__ = ClassAttribute("__processed__", True) fields: Dict[str, Any] = {} for field in dataclasses.fields(cls): if field.default != dataclasses.MISSING: field_value = field.default # mypy issue 7020 and 708 elif field.default_factory != dataclasses.MISSING: # type: ignore field_value = field.default_factory() # type: ignore else: field_value = Required fields[field.name] = (field.type, field_value) validators = gather_all_validators(cls) cls.__pydantic_model__ = create_model( cls.__name__, __config__=config, __module__=_cls.__module__, __validators__=validators, **fields, ) cls.__initialised__ = False cls.__validate__ = classmethod(_validate_dataclass) # type: ignore[assignment] cls.__get_validators__ = classmethod(_get_validators) # type: ignore[assignment] if post_init_original: cls.__post_init_original__ = post_init_original if cls.__pydantic_model__.__config__.validate_assignment and not frozen: cls.__setattr__ = setattr_validate_assignment # type: ignore[assignment] return cls
def _process_class( _cls: Type[Any], init: bool, repr: bool, eq: bool, order: bool, unsafe_hash: bool, frozen: bool, config: Optional[Type[Any]], ) -> Type["Dataclass"]: import dataclasses post_init_original = getattr(_cls, "__post_init__", None) if post_init_original and post_init_original.__name__ == "_pydantic_post_init": post_init_original = None if not post_init_original: post_init_original = getattr(_cls, "__post_init_original__", None) post_init_post_parse = getattr(_cls, "__post_init_post_parse__", None) def _pydantic_post_init(self: "Dataclass", *initvars: Any) -> None: if post_init_original is not None: post_init_original(self, *initvars) d, _, validation_error = validate_model( self.__pydantic_model__, self.__dict__, cls=self.__class__ ) if validation_error: raise validation_error object.__setattr__(self, "__dict__", d) object.__setattr__(self, "__initialised__", True) if post_init_post_parse is not None: post_init_post_parse(self, *initvars) # If the class is already a dataclass, __post_init__ will not be called automatically # so no validation will be added. # We hence create dynamically a new dataclass: # ``` # @dataclasses.dataclass # class NewClass(_cls): # __post_init__ = _pydantic_post_init # ``` # with the exact same fields as the base dataclass if is_builtin_dataclass(_cls): _cls = type( _cls.__name__, (_cls,), { "__annotations__": _cls.__annotations__, "__post_init__": _pydantic_post_init, }, ) else: _cls.__post_init__ = _pydantic_post_init cls: Type["Dataclass"] = dataclasses.dataclass( # type: ignore _cls, init=init, repr=repr, eq=eq, order=order, unsafe_hash=unsafe_hash, frozen=frozen, ) cls.__processed__ = ClassAttribute("__processed__", True) fields: Dict[str, Any] = {} for field in dataclasses.fields(cls): if field.default != dataclasses.MISSING: field_value = field.default # mypy issue 7020 and 708 elif field.default_factory != dataclasses.MISSING: # type: ignore field_value = field.default_factory() # type: ignore else: field_value = Required fields[field.name] = (field.type, field_value) validators = gather_all_validators(cls) cls.__pydantic_model__ = create_model( cls.__name__, __config__=config, __module__=_cls.__module__, __validators__=validators, **fields, ) cls.__initialised__ = False cls.__validate__ = classmethod(_validate_dataclass) # type: ignore[assignment] cls.__get_validators__ = classmethod(_get_validators) # type: ignore[assignment] if post_init_original: cls.__post_init_original__ = post_init_original if cls.__pydantic_model__.__config__.validate_assignment and not frozen: cls.__setattr__ = setattr_validate_assignment # type: ignore[assignment] return cls
https://github.com/samuelcolvin/pydantic/issues/2111
--------------------------------------------------------------------------- PicklingError Traceback (most recent call last) <ipython-input-14-7d67a44cf353> in <module> 15 16 print(pickle.dumps( ---> 17 PydanticModel(built_in_dataclass=BuiltInDataclass(value=0)) 18 )) PicklingError: Can't pickle <class '__main__.BuiltInDataclass'>: it's not the same object as __main__.BuiltInDataclass
PicklingError
def _type_analysis(self) -> None: # noqa: C901 (ignore complexity) # typing interface is horrible, we have to do some ugly checks if lenient_issubclass(self.type_, JsonWrapper): self.type_ = self.type_.inner_type self.parse_json = True elif lenient_issubclass(self.type_, Json): self.type_ = Any self.parse_json = True elif isinstance(self.type_, TypeVar): if self.type_.__bound__: self.type_ = self.type_.__bound__ elif self.type_.__constraints__: self.type_ = Union[self.type_.__constraints__] else: self.type_ = Any elif is_new_type(self.type_): self.type_ = new_type_supertype(self.type_) if self.type_ is Any: if self.required is Undefined: self.required = False self.allow_none = True return elif self.type_ is Pattern: # python 3.7 only, Pattern is a typing object but without sub fields return elif is_literal_type(self.type_): return origin = get_origin(self.type_) if origin is None: # field is not "typing" object eg. Union, Dict, List etc. # allow None for virtual superclasses of NoneType, e.g. Hashable if isinstance(self.type_, type) and isinstance(None, self.type_): self.allow_none = True return if origin is Callable: return if origin is Union: types_ = [] for type_ in get_args(self.type_): if type_ is NoneType: if self.required is Undefined: self.required = False self.allow_none = True continue types_.append(type_) if len(types_) == 1: # Optional[] self.type_ = types_[0] # this is the one case where the "outer type" isn't just the original type self.outer_type_ = self.type_ # re-run to correctly interpret the new self.type_ self._type_analysis() else: self.sub_fields = [ self._create_sub_type(t, f"{self.name}_{display_as_type(t)}") for t in types_ ] return if issubclass(origin, Tuple): # type: ignore # origin == Tuple without item type if not get_args(self.type_): self.type_ = Any self.shape = SHAPE_TUPLE_ELLIPSIS else: self.shape = SHAPE_TUPLE self.sub_fields = [] for i, t in enumerate(get_args(self.type_)): if t is Ellipsis: self.type_ = get_args(self.type_)[0] self.shape = SHAPE_TUPLE_ELLIPSIS return self.sub_fields.append(self._create_sub_type(t, f"{self.name}_{i}")) return if issubclass(origin, List): # Create self validators get_validators = getattr(self.type_, "__get_validators__", None) if get_validators: self.class_validators.update( { f"list_{i}": Validator(validator, pre=True) for i, validator in enumerate(get_validators()) } ) self.type_ = get_args(self.type_)[0] self.shape = SHAPE_LIST elif issubclass(origin, Set): # Create self validators get_validators = getattr(self.type_, "__get_validators__", None) if get_validators: self.class_validators.update( { f"set_{i}": Validator(validator, pre=True) for i, validator in enumerate(get_validators()) } ) self.type_ = get_args(self.type_)[0] self.shape = SHAPE_SET elif issubclass(origin, FrozenSet): self.type_ = get_args(self.type_)[0] self.shape = SHAPE_FROZENSET elif issubclass(origin, Deque): self.type_ = get_args(self.type_)[0] self.shape = SHAPE_DEQUE elif issubclass(origin, Sequence): self.type_ = get_args(self.type_)[0] self.shape = SHAPE_SEQUENCE elif issubclass(origin, Mapping): self.key_field = self._create_sub_type( get_args(self.type_)[0], "key_" + self.name, for_keys=True ) self.type_ = get_args(self.type_)[1] self.shape = SHAPE_MAPPING # Equality check as almost everything inherits form Iterable, including str # check for Iterable and CollectionsIterable, as it could receive one even when declared with the other elif origin in {Iterable, CollectionsIterable}: self.type_ = get_args(self.type_)[0] self.shape = SHAPE_ITERABLE self.sub_fields = [self._create_sub_type(self.type_, f"{self.name}_type")] elif issubclass(origin, Type): # type: ignore return elif ( hasattr(origin, "__get_validators__") or self.model_config.arbitrary_types_allowed ): # Is a Pydantic-compatible generic that handles itself # or we have arbitrary_types_allowed = True self.shape = SHAPE_GENERIC self.sub_fields = [ self._create_sub_type(t, f"{self.name}_{i}") for i, t in enumerate(get_args(self.type_)) ] self.type_ = origin return else: raise TypeError(f'Fields of type "{origin}" are not supported.') # type_ has been refined eg. as the type of a List and sub_fields needs to be populated self.sub_fields = [self._create_sub_type(self.type_, "_" + self.name)]
def _type_analysis(self) -> None: # noqa: C901 (ignore complexity) # typing interface is horrible, we have to do some ugly checks if lenient_issubclass(self.type_, JsonWrapper): self.type_ = self.type_.inner_type self.parse_json = True elif lenient_issubclass(self.type_, Json): self.type_ = Any self.parse_json = True elif isinstance(self.type_, TypeVar): if self.type_.__bound__: self.type_ = self.type_.__bound__ elif self.type_.__constraints__: self.type_ = Union[self.type_.__constraints__] else: self.type_ = Any elif is_new_type(self.type_): self.type_ = new_type_supertype(self.type_) if self.type_ is Any: if self.required is Undefined: self.required = False self.allow_none = True return elif self.type_ is Pattern: # python 3.7 only, Pattern is a typing object but without sub fields return elif is_literal_type(self.type_): return origin = get_origin(self.type_) if origin is None: # field is not "typing" object eg. Union, Dict, List etc. # allow None for virtual superclasses of NoneType, e.g. Hashable if isinstance(self.type_, type) and isinstance(None, self.type_): self.allow_none = True return if origin is Callable: return if origin is Union: types_ = [] for type_ in get_args(self.type_): if type_ is NoneType: if self.required is Undefined: self.required = False self.allow_none = True continue types_.append(type_) if len(types_) == 1: # Optional[] self.type_ = types_[0] # this is the one case where the "outer type" isn't just the original type self.outer_type_ = self.type_ # re-run to correctly interpret the new self.type_ self._type_analysis() else: self.sub_fields = [ self._create_sub_type(t, f"{self.name}_{display_as_type(t)}") for t in types_ ] return if issubclass(origin, Tuple): # type: ignore self.shape = SHAPE_TUPLE self.sub_fields = [] for i, t in enumerate(get_args(self.type_)): if t is Ellipsis: self.type_ = get_args(self.type_)[0] self.shape = SHAPE_TUPLE_ELLIPSIS return self.sub_fields.append(self._create_sub_type(t, f"{self.name}_{i}")) return if issubclass(origin, List): # Create self validators get_validators = getattr(self.type_, "__get_validators__", None) if get_validators: self.class_validators.update( { f"list_{i}": Validator(validator, pre=True) for i, validator in enumerate(get_validators()) } ) self.type_ = get_args(self.type_)[0] self.shape = SHAPE_LIST elif issubclass(origin, Set): # Create self validators get_validators = getattr(self.type_, "__get_validators__", None) if get_validators: self.class_validators.update( { f"set_{i}": Validator(validator, pre=True) for i, validator in enumerate(get_validators()) } ) self.type_ = get_args(self.type_)[0] self.shape = SHAPE_SET elif issubclass(origin, FrozenSet): self.type_ = get_args(self.type_)[0] self.shape = SHAPE_FROZENSET elif issubclass(origin, Deque): self.type_ = get_args(self.type_)[0] self.shape = SHAPE_DEQUE elif issubclass(origin, Sequence): self.type_ = get_args(self.type_)[0] self.shape = SHAPE_SEQUENCE elif issubclass(origin, Mapping): self.key_field = self._create_sub_type( get_args(self.type_)[0], "key_" + self.name, for_keys=True ) self.type_ = get_args(self.type_)[1] self.shape = SHAPE_MAPPING # Equality check as almost everything inherits form Iterable, including str # check for Iterable and CollectionsIterable, as it could receive one even when declared with the other elif origin in {Iterable, CollectionsIterable}: self.type_ = get_args(self.type_)[0] self.shape = SHAPE_ITERABLE self.sub_fields = [self._create_sub_type(self.type_, f"{self.name}_type")] elif issubclass(origin, Type): # type: ignore return elif ( hasattr(origin, "__get_validators__") or self.model_config.arbitrary_types_allowed ): # Is a Pydantic-compatible generic that handles itself # or we have arbitrary_types_allowed = True self.shape = SHAPE_GENERIC self.sub_fields = [ self._create_sub_type(t, f"{self.name}_{i}") for i, t in enumerate(get_args(self.type_)) ] self.type_ = origin return else: raise TypeError(f'Fields of type "{origin}" are not supported.') # type_ has been refined eg. as the type of a List and sub_fields needs to be populated self.sub_fields = [self._create_sub_type(self.type_, "_" + self.name)]
https://github.com/samuelcolvin/pydantic/issues/2132
Traceback (most recent call last): File "pydantic/validators.py", line 615, in pydantic.validators.find_validators TypeError: issubclass() arg 1 must be a class During handling of the above exception, another exception occurred: Traceback (most recent call last): File "<input>", line 2, in <module> File "pydantic/dataclasses.py", line 216, in pydantic.dataclasses.dataclass File "pydantic/dataclasses.py", line 211, in pydantic.dataclasses.dataclass.wrap File "pydantic/dataclasses.py", line 147, in pydantic.dataclasses._process_class File "pydantic/main.py", line 925, in pydantic.main.create_model File "pydantic/main.py", line 262, in pydantic.main.ModelMetaclass.__new__ File "pydantic/fields.py", line 315, in pydantic.fields.ModelField.infer File "pydantic/fields.py", line 284, in pydantic.fields.ModelField.__init__ File "pydantic/fields.py", line 362, in pydantic.fields.ModelField.prepare File "pydantic/fields.py", line 538, in pydantic.fields.ModelField.populate_validators File "pydantic/validators.py", line 624, in find_validators RuntimeError: error checking inheritance of typing.Tuple (type: Tuple)
TypeError
def _validate_sequence_like( # noqa: C901 (ignore complexity) self, v: Any, values: Dict[str, Any], loc: "LocStr", cls: Optional["ModelOrDc"] ) -> "ValidateReturn": """ Validate sequence-like containers: lists, tuples, sets and generators Note that large if-else blocks are necessary to enable Cython optimization, which is why we disable the complexity check above. """ if not sequence_like(v): e: errors_.PydanticTypeError if self.shape == SHAPE_LIST: e = errors_.ListError() elif self.shape in (SHAPE_TUPLE, SHAPE_TUPLE_ELLIPSIS): e = errors_.TupleError() elif self.shape == SHAPE_SET: e = errors_.SetError() elif self.shape == SHAPE_FROZENSET: e = errors_.FrozenSetError() else: e = errors_.SequenceError() return v, ErrorWrapper(e, loc) loc = loc if isinstance(loc, tuple) else (loc,) result = [] errors: List[ErrorList] = [] for i, v_ in enumerate(v): v_loc = *loc, i r, ee = self._validate_singleton(v_, values, v_loc, cls) if ee: errors.append(ee) else: result.append(r) if errors: return v, errors converted: Union[ List[Any], Set[Any], FrozenSet[Any], Tuple[Any, ...], Iterator[Any], Deque[Any] ] = result if self.shape == SHAPE_SET: converted = set(result) elif self.shape == SHAPE_FROZENSET: converted = frozenset(result) elif self.shape == SHAPE_TUPLE_ELLIPSIS: converted = tuple(result) elif self.shape == SHAPE_DEQUE: converted = deque(result) elif self.shape == SHAPE_SEQUENCE: if isinstance(v, tuple): converted = tuple(result) elif isinstance(v, set): converted = set(result) elif isinstance(v, Generator): converted = iter(result) elif isinstance(v, deque): converted = deque(result) return converted, None
def _validate_sequence_like( # noqa: C901 (ignore complexity) self, v: Any, values: Dict[str, Any], loc: "LocStr", cls: Optional["ModelOrDc"] ) -> "ValidateReturn": """ Validate sequence-like containers: lists, tuples, sets and generators Note that large if-else blocks are necessary to enable Cython optimization, which is why we disable the complexity check above. """ if not sequence_like(v): e: errors_.PydanticTypeError if self.shape == SHAPE_LIST: e = errors_.ListError() elif self.shape == SHAPE_SET: e = errors_.SetError() elif self.shape == SHAPE_FROZENSET: e = errors_.FrozenSetError() else: e = errors_.SequenceError() return v, ErrorWrapper(e, loc) loc = loc if isinstance(loc, tuple) else (loc,) result = [] errors: List[ErrorList] = [] for i, v_ in enumerate(v): v_loc = *loc, i r, ee = self._validate_singleton(v_, values, v_loc, cls) if ee: errors.append(ee) else: result.append(r) if errors: return v, errors converted: Union[ List[Any], Set[Any], FrozenSet[Any], Tuple[Any, ...], Iterator[Any], Deque[Any] ] = result if self.shape == SHAPE_SET: converted = set(result) elif self.shape == SHAPE_FROZENSET: converted = frozenset(result) elif self.shape == SHAPE_TUPLE_ELLIPSIS: converted = tuple(result) elif self.shape == SHAPE_DEQUE: converted = deque(result) elif self.shape == SHAPE_SEQUENCE: if isinstance(v, tuple): converted = tuple(result) elif isinstance(v, set): converted = set(result) elif isinstance(v, Generator): converted = iter(result) elif isinstance(v, deque): converted = deque(result) return converted, None
https://github.com/samuelcolvin/pydantic/issues/2132
Traceback (most recent call last): File "pydantic/validators.py", line 615, in pydantic.validators.find_validators TypeError: issubclass() arg 1 must be a class During handling of the above exception, another exception occurred: Traceback (most recent call last): File "<input>", line 2, in <module> File "pydantic/dataclasses.py", line 216, in pydantic.dataclasses.dataclass File "pydantic/dataclasses.py", line 211, in pydantic.dataclasses.dataclass.wrap File "pydantic/dataclasses.py", line 147, in pydantic.dataclasses._process_class File "pydantic/main.py", line 925, in pydantic.main.create_model File "pydantic/main.py", line 262, in pydantic.main.ModelMetaclass.__new__ File "pydantic/fields.py", line 315, in pydantic.fields.ModelField.infer File "pydantic/fields.py", line 284, in pydantic.fields.ModelField.__init__ File "pydantic/fields.py", line 362, in pydantic.fields.ModelField.prepare File "pydantic/fields.py", line 538, in pydantic.fields.ModelField.populate_validators File "pydantic/validators.py", line 624, in find_validators RuntimeError: error checking inheritance of typing.Tuple (type: Tuple)
TypeError
def is_valid_private_name(name: str) -> bool: return not is_valid_field(name) and name not in { "__annotations__", "__classcell__", "__doc__", "__module__", "__orig_bases__", "__qualname__", }
def is_valid_private_name(name: str) -> bool: return not is_valid_field(name) and name not in { "__annotations__", "__classcell__", "__doc__", "__module__", "__qualname__", }
https://github.com/samuelcolvin/pydantic/issues/2138
TypeError Traceback (most recent call last) <ipython-input-17-86d3af5f0365> in <module> ----> 1 class Model(GenericModel, Generic[T]): 2 class Config: 3 underscore_attrs_are_private = True 4 id: T 5 /nix/store/5mlyrz5jm75dbjd92wsq89b9lsd0bhww-python3-3.7.7-env/lib/python3.7/site-packages/pydantic/main.py in __new__(mcs, name, bases, namespace, **kwargs) 322 } 323 --> 324 cls = super().__new__(mcs, name, bases, new_namespace, **kwargs) 325 # set __signature__ attr only for model class, but not for its instances 326 cls.__signature__ = ClassAttribute('__signature__', generate_model_signature(cls.__init__, fields, config)) /nix/store/k2w1idz2vdag50xl88113845mr74z823-python3-3.7.7/lib/python3.7/abc.py in __new__(mcls, name, bases, namespace, **kwargs) 124 """ 125 def __new__(mcls, name, bases, namespace, **kwargs): --> 126 cls = super().__new__(mcls, name, bases, namespace, **kwargs) 127 _abc_init(cls) 128 return cls /nix/store/k2w1idz2vdag50xl88113845mr74z823-python3-3.7.7/lib/python3.7/typing.py in __init_subclass__(cls, *args, **kwargs) 848 tvars = [] 849 if '__orig_bases__' in cls.__dict__: --> 850 error = Generic in cls.__orig_bases__ 851 else: 852 error = Generic in cls.__bases__ and cls.__name__ != '_Protocol' TypeError: argument of type 'member_descriptor' is not iterable
TypeError
def make_dataclass_validator( _cls: Type[Any], config: Type["BaseConfig"] ) -> "CallableGenerator": """ Create a pydantic.dataclass from a builtin dataclass to add type validation and yield the validators It retrieves the parameters of the dataclass and forwards them to the newly created dataclass """ dataclass_params = _cls.__dataclass_params__ stdlib_dataclass_parameters = { param: getattr(dataclass_params, param) for param in dataclass_params.__slots__ } cls = dataclass(_cls, config=config, **stdlib_dataclass_parameters) yield from _get_validators(cls)
def make_dataclass_validator( _cls: Type[Any], config: Type["BaseConfig"] ) -> "CallableGenerator": """ Create a pydantic.dataclass from a builtin dataclass to add type validation and yield the validators """ cls = dataclass(_cls, config=config) yield from _get_validators(cls)
https://github.com/samuelcolvin/pydantic/issues/2065
Traceback (most recent call last): File "/home/user/debug_pydantic.py", line 11, in <module> class Example(BaseModel): File "pydantic/main.py", line 262, in pydantic.main.ModelMetaclass.__new__ File "pydantic/fields.py", line 315, in pydantic.fields.ModelField.infer File "pydantic/fields.py", line 284, in pydantic.fields.ModelField.__init__ File "pydantic/fields.py", line 362, in pydantic.fields.ModelField.prepare File "pydantic/fields.py", line 538, in pydantic.fields.ModelField.populate_validators File "pydantic/validators.py", line 596, in find_validators File "pydantic/dataclasses.py", line 222, in make_dataclass_validator # and only from the field() function, although Field instances are File "pydantic/dataclasses.py", line 214, in pydantic.dataclasses.dataclass type_name = self.type.__name__ File "pydantic/dataclasses.py", line 209, in pydantic.dataclasses.dataclass.wrap def __init__(self, type): File "pydantic/dataclasses.py", line 126, in pydantic.dataclasses._process_class # +=======+=======+=======+========+========+ File "/home/user/miniconda3/envs/debug_pydantic/lib/python3.8/dataclasses.py", line 1019, in dataclass return wrap(cls) File "/home/user/miniconda3/envs/debug_pydantic/lib/python3.8/dataclasses.py", line 1011, in wrap return _process_class(cls, init, repr, eq, order, unsafe_hash, frozen) File "/home/user/miniconda3/envs/debug_pydantic/lib/python3.8/dataclasses.py", line 891, in _process_class raise TypeError('cannot inherit non-frozen dataclass from a ' TypeError: cannot inherit non-frozen dataclass from a frozen one
TypeError
def _process_class( _cls: Type[Any], init: bool, repr: bool, eq: bool, order: bool, unsafe_hash: bool, frozen: bool, config: Optional[Type[Any]], ) -> Type["Dataclass"]: import dataclasses post_init_original = getattr(_cls, "__post_init__", None) if post_init_original and post_init_original.__name__ == "_pydantic_post_init": post_init_original = None if not post_init_original: post_init_original = getattr(_cls, "__post_init_original__", None) post_init_post_parse = getattr(_cls, "__post_init_post_parse__", None) def _pydantic_post_init(self: "Dataclass", *initvars: Any) -> None: if post_init_original is not None: post_init_original(self, *initvars) d, _, validation_error = validate_model( self.__pydantic_model__, self.__dict__, cls=self.__class__ ) if validation_error: raise validation_error object.__setattr__(self, "__dict__", d) object.__setattr__(self, "__initialised__", True) if post_init_post_parse is not None: post_init_post_parse(self, *initvars) # If the class is already a dataclass, __post_init__ will not be called automatically # so no validation will be added. # We hence create dynamically a new dataclass: # ``` # @dataclasses.dataclass # class NewClass(_cls): # __post_init__ = _pydantic_post_init # ``` # with the exact same fields as the base dataclass if is_builtin_dataclass(_cls): _cls = type( _cls.__name__, (_cls,), { "__annotations__": _cls.__annotations__, "__post_init__": _pydantic_post_init, }, ) else: _cls.__post_init__ = _pydantic_post_init cls: Type["Dataclass"] = dataclasses.dataclass( # type: ignore _cls, init=init, repr=repr, eq=eq, order=order, unsafe_hash=unsafe_hash, frozen=frozen, ) cls.__processed__ = ClassAttribute("__processed__", True) fields: Dict[str, Any] = {} for field in dataclasses.fields(cls): if field.default != dataclasses.MISSING: field_value = field.default # mypy issue 7020 and 708 elif field.default_factory != dataclasses.MISSING: # type: ignore field_value = field.default_factory() # type: ignore else: field_value = Required fields[field.name] = (field.type, field_value) validators = gather_all_validators(cls) cls.__pydantic_model__ = create_model( cls.__name__, __config__=config, __module__=_cls.__module__, __validators__=validators, **fields, ) cls.__initialised__ = False cls.__validate__ = classmethod(_validate_dataclass) # type: ignore[assignment] cls.__get_validators__ = classmethod(_get_validators) # type: ignore[assignment] if post_init_original: cls.__post_init_original__ = post_init_original if cls.__pydantic_model__.__config__.validate_assignment and not frozen: cls.__setattr__ = setattr_validate_assignment # type: ignore[assignment] return cls
def _process_class( _cls: Type[Any], init: bool, repr: bool, eq: bool, order: bool, unsafe_hash: bool, frozen: bool, config: Optional[Type[Any]], ) -> Type["Dataclass"]: import dataclasses post_init_original = getattr(_cls, "__post_init__", None) if post_init_original and post_init_original.__name__ == "_pydantic_post_init": post_init_original = None if not post_init_original: post_init_original = getattr(_cls, "__post_init_original__", None) post_init_post_parse = getattr(_cls, "__post_init_post_parse__", None) def _pydantic_post_init(self: "Dataclass", *initvars: Any) -> None: if post_init_original is not None: post_init_original(self, *initvars) d, _, validation_error = validate_model( self.__pydantic_model__, self.__dict__, cls=self.__class__ ) if validation_error: raise validation_error object.__setattr__(self, "__dict__", d) object.__setattr__(self, "__initialised__", True) if post_init_post_parse is not None: post_init_post_parse(self, *initvars) # If the class is already a dataclass, __post_init__ will not be called automatically # so no validation will be added. # We hence create dynamically a new dataclass: # ``` # @dataclasses.dataclass # class NewClass(_cls): # __post_init__ = _pydantic_post_init # ``` # with the exact same fields as the base dataclass if is_builtin_dataclass(_cls): _cls = type(_cls.__name__, (_cls,), {"__post_init__": _pydantic_post_init}) else: _cls.__post_init__ = _pydantic_post_init cls: Type["Dataclass"] = dataclasses.dataclass( # type: ignore _cls, init=init, repr=repr, eq=eq, order=order, unsafe_hash=unsafe_hash, frozen=frozen, ) cls.__processed__ = ClassAttribute("__processed__", True) fields: Dict[str, Any] = {} for field in dataclasses.fields(cls): if field.default != dataclasses.MISSING: field_value = field.default # mypy issue 7020 and 708 elif field.default_factory != dataclasses.MISSING: # type: ignore field_value = field.default_factory() # type: ignore else: field_value = Required fields[field.name] = (field.type, field_value) validators = gather_all_validators(cls) cls.__pydantic_model__ = create_model( cls.__name__, __config__=config, __module__=_cls.__module__, __validators__=validators, **fields, ) cls.__initialised__ = False cls.__validate__ = classmethod(_validate_dataclass) # type: ignore[assignment] cls.__get_validators__ = classmethod(_get_validators) # type: ignore[assignment] if post_init_original: cls.__post_init_original__ = post_init_original if cls.__pydantic_model__.__config__.validate_assignment and not frozen: cls.__setattr__ = setattr_validate_assignment # type: ignore[assignment] return cls
https://github.com/samuelcolvin/pydantic/issues/2042
Traceback (most recent call last): File "dataclass.py", line 15, in <module> @dataclass File "pydantic/dataclasses.py", line 214, in pydantic.dataclasses.dataclass # name and type are filled in after the fact, not in __init__. File "pydantic/dataclasses.py", line 209, in pydantic.dataclasses.dataclass.wrap File "pydantic/dataclasses.py", line 138, in pydantic.dataclasses._process_class # +-------+-------+-------+--------+--------+ File "dataclass.py", line 17, in <lambda> a: A = field(default_factory=lambda: A(x=2, y=3)) TypeError: __init__() got an unexpected keyword argument 'x'
TypeError
def make_dataclass_validator( _cls: Type[Any], config: Type["BaseConfig"] ) -> "CallableGenerator": """ Create a pydantic.dataclass from a builtin dataclass to add type validation and yield the validators """ cls = dataclass(_cls, config=config) yield from _get_validators(cls)
def make_dataclass_validator(_cls: Type[Any], **kwargs: Any) -> "CallableGenerator": """ Create a pydantic.dataclass from a builtin dataclass to add type validation and yield the validators """ cls = dataclass(_cls, **kwargs) yield from _get_validators(cls)
https://github.com/samuelcolvin/pydantic/issues/2042
Traceback (most recent call last): File "dataclass.py", line 15, in <module> @dataclass File "pydantic/dataclasses.py", line 214, in pydantic.dataclasses.dataclass # name and type are filled in after the fact, not in __init__. File "pydantic/dataclasses.py", line 209, in pydantic.dataclasses.dataclass.wrap File "pydantic/dataclasses.py", line 138, in pydantic.dataclasses._process_class # +-------+-------+-------+--------+--------+ File "dataclass.py", line 17, in <lambda> a: A = field(default_factory=lambda: A(x=2, y=3)) TypeError: __init__() got an unexpected keyword argument 'x'
TypeError
def find_validators( # noqa: C901 (ignore complexity) type_: Type[Any], config: Type["BaseConfig"] ) -> Generator[AnyCallable, None, None]: from .dataclasses import is_builtin_dataclass, make_dataclass_validator if type_ is Any: return type_type = type_.__class__ if type_type == ForwardRef or type_type == TypeVar: return if type_ is Pattern: yield pattern_validator return if type_ is Hashable: yield hashable_validator return if is_callable_type(type_): yield callable_validator return if is_literal_type(type_): yield make_literal_validator(type_) return if is_builtin_dataclass(type_): yield from make_dataclass_validator(type_, config) return if type_ is Enum: yield enum_validator return if type_ is IntEnum: yield int_enum_validator return class_ = get_class(type_) if class_ is not None: if isinstance(class_, type): yield make_class_validator(class_) else: yield any_class_validator return for val_type, validators in _VALIDATORS: try: if issubclass(type_, val_type): for v in validators: if isinstance(v, IfConfig): if v.check(config): yield v.validator else: yield v return except TypeError: raise RuntimeError( f"error checking inheritance of {type_!r} (type: {display_as_type(type_)})" ) if config.arbitrary_types_allowed: yield make_arbitrary_type_validator(type_) else: raise RuntimeError( f"no validator found for {type_}, see `arbitrary_types_allowed` in Config" )
def find_validators( # noqa: C901 (ignore complexity) type_: Type[Any], config: Type["BaseConfig"] ) -> Generator[AnyCallable, None, None]: from .dataclasses import is_builtin_dataclass, make_dataclass_validator if type_ is Any: return type_type = type_.__class__ if type_type == ForwardRef or type_type == TypeVar: return if type_ is Pattern: yield pattern_validator return if type_ is Hashable: yield hashable_validator return if is_callable_type(type_): yield callable_validator return if is_literal_type(type_): yield make_literal_validator(type_) return if is_builtin_dataclass(type_): yield from make_dataclass_validator(type_) return if type_ is Enum: yield enum_validator return if type_ is IntEnum: yield int_enum_validator return class_ = get_class(type_) if class_ is not None: if isinstance(class_, type): yield make_class_validator(class_) else: yield any_class_validator return for val_type, validators in _VALIDATORS: try: if issubclass(type_, val_type): for v in validators: if isinstance(v, IfConfig): if v.check(config): yield v.validator else: yield v return except TypeError: raise RuntimeError( f"error checking inheritance of {type_!r} (type: {display_as_type(type_)})" ) if config.arbitrary_types_allowed: yield make_arbitrary_type_validator(type_) else: raise RuntimeError( f"no validator found for {type_}, see `arbitrary_types_allowed` in Config" )
https://github.com/samuelcolvin/pydantic/issues/2042
Traceback (most recent call last): File "dataclass.py", line 15, in <module> @dataclass File "pydantic/dataclasses.py", line 214, in pydantic.dataclasses.dataclass # name and type are filled in after the fact, not in __init__. File "pydantic/dataclasses.py", line 209, in pydantic.dataclasses.dataclass.wrap File "pydantic/dataclasses.py", line 138, in pydantic.dataclasses._process_class # +-------+-------+-------+--------+--------+ File "dataclass.py", line 17, in <lambda> a: A = field(default_factory=lambda: A(x=2, y=3)) TypeError: __init__() got an unexpected keyword argument 'x'
TypeError
def is_valid_private_name(name: str) -> bool: return not is_valid_field(name) and name not in { "__annotations__", "__classcell__", "__module__", "__qualname__", }
def is_valid_private_name(name: str) -> bool: return not is_valid_field(name) and name not in { "__annotations__", "__module__", "__annotations__", "__qualname__", }
https://github.com/samuelcolvin/pydantic/issues/2047
test.py:4: DeprecationWarning: __class__ not set defining 'TestObject' as <class '__main__.TestObject'>. Was __classcell__ propagated to type.__new__? class TestObject(BaseModel): Traceback (most recent call last): File "test.py", line 15, in <module> print(TestObject(public_field="foo")) File "test.py", line 12, in __init__ super().__init__(**data) File "pydantic/main.py", line 365, in pydantic.main.BaseModel.__init__ File "pydantic/main.py", line 424, in pydantic.main.BaseModel._init_private_attributes File "pydantic/fields.py", line 821, in pydantic.fields.PrivateAttr.get_default File "pydantic/utils.py", line 624, in pydantic.utils.smart_deepcopy File "/Users/ahedges/.pyenv/versions/3.7.8/lib/python3.7/copy.py", line 169, in deepcopy rv = reductor(4) TypeError: can't pickle cell objects
TypeError
def add_field_type_to_schema(field_type: Any, schema: Dict[str, Any]) -> None: """ Update the given `schema` with the type-specific metadata for the given `field_type`. This function looks through `field_class_to_schema` for a class that matches the given `field_type`, and then modifies the given `schema` with the information from that type. """ for type_, t_schema in field_class_to_schema: # Fallback for `typing.Pattern` as it is not a valid class if lenient_issubclass(field_type, type_) or field_type is type_ is Pattern: schema.update(t_schema) break
def add_field_type_to_schema(field_type: Any, schema: Dict[str, Any]) -> None: """ Update the given `schema` with the type-specific metadata for the given `field_type`. This function looks through `field_class_to_schema` for a class that matches the given `field_type`, and then modifies the given `schema` with the information from that type. """ for type_, t_schema in field_class_to_schema: if issubclass(field_type, type_): schema.update(t_schema) break
https://github.com/samuelcolvin/pydantic/issues/1767
--------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-13-c3756917c641> in <module> ----> 1 model_process_schema(Test, model_name_map={}) ~/.cache/pypoetry/virtualenvs/chatbot-py3.6/lib/python3.6/site-packages/pydantic/schema.py in model_process_schema(model, by_alias, model_name_map, ref_prefix, known_models) 454 known_models.add(model) 455 m_schema, m_definitions, nested_models = model_type_schema( --> 456 model, by_alias=by_alias, model_name_map=model_name_map, ref_prefix=ref_prefix, known_models=known_models 457 ) 458 s.update(m_schema) ~/.cache/pypoetry/virtualenvs/chatbot-py3.6/lib/python3.6/site-packages/pydantic/schema.py in model_type_schema(model, by_alias, model_name_map, ref_prefix, known_models) 490 try: 491 f_schema, f_definitions, f_nested_models = field_schema( --> 492 f, by_alias=by_alias, model_name_map=model_name_map, ref_prefix=ref_prefix, known_models=known_models 493 ) 494 except SkipField as skip: ~/.cache/pypoetry/virtualenvs/chatbot-py3.6/lib/python3.6/site-packages/pydantic/schema.py in field_schema(field, by_alias, model_name_map, ref_prefix, known_models) 189 schema_overrides=schema_overrides, 190 ref_prefix=ref_prefix, --> 191 known_models=known_models or set(), 192 ) 193 # $ref will only be returned when there are no schema_overrides ~/.cache/pypoetry/virtualenvs/chatbot-py3.6/lib/python3.6/site-packages/pydantic/schema.py in field_type_schema(field, by_alias, model_name_map, schema_overrides, ref_prefix, known_models) 416 schema_overrides=schema_overrides, 417 ref_prefix=ref_prefix, --> 418 known_models=known_models, 419 ) 420 definitions.update(f_definitions) ~/.cache/pypoetry/virtualenvs/chatbot-py3.6/lib/python3.6/site-packages/pydantic/schema.py in field_singleton_schema(field, by_alias, model_name_map, schema_overrides, ref_prefix, known_models) 642 f_schema['const'] = literal_value 643 --> 644 if issubclass(field_type, Enum): 645 f_schema.update({'enum': [item.value for item in field_type]}) 646 # Don't return immediately, to allow adding specific types TypeError: issubclass() arg 1 must be a class
TypeError
def prepare_field(cls, field: ModelField) -> None: env_names: Union[List[str], AbstractSet[str]] field_info_from_config = cls.get_field_info(field.name) env = field_info_from_config.get("env") or field.field_info.extra.get("env") if env is None: if field.has_alias: warnings.warn( "aliases are no longer used by BaseSettings to define which environment variables to read. " 'Instead use the "env" field setting. ' "See https://pydantic-docs.helpmanual.io/usage/settings/#environment-variable-names", FutureWarning, ) env_names = {cls.env_prefix + field.name} elif isinstance(env, str): env_names = {env} elif isinstance(env, (set, frozenset)): env_names = env elif sequence_like(env): env_names = list(env) else: raise TypeError( f"invalid field env: {env!r} ({display_as_type(env)}); should be string, list or set" ) if not cls.case_sensitive: env_names = env_names.__class__(n.lower() for n in env_names) field.field_info.extra["env_names"] = env_names
def prepare_field(cls, field: ModelField) -> None: env_names: Union[List[str], AbstractSet[str]] env = field.field_info.extra.get("env") if env is None: if field.has_alias: warnings.warn( "aliases are no longer used by BaseSettings to define which environment variables to read. " 'Instead use the "env" field setting. ' "See https://pydantic-docs.helpmanual.io/usage/settings/#environment-variable-names", FutureWarning, ) env_names = {cls.env_prefix + field.name} elif isinstance(env, str): env_names = {env} elif isinstance(env, (set, frozenset)): env_names = env elif sequence_like(env): env_names = list(env) else: raise TypeError( f"invalid field env: {env!r} ({display_as_type(env)}); should be string, list or set" ) if not cls.case_sensitive: env_names = env_names.__class__(n.lower() for n in env_names) field.field_info.extra["env_names"] = env_names
https://github.com/samuelcolvin/pydantic/issues/1561
In [2]: from pydantic import AnyUrl, BaseSettings In [4]: %set_env FOO=7 env: FOO=7 In [9]: class A(BaseSettings): ...: foo : int ...: class Config: ...: env_prefix = 'PREFIX_' ...: fields = { ...: 'foo': {'env': ['FOO', 'PREFIX_FOO'],}, ...: } ...: ...: In [10]: A().foo Out[10]: 7 In [11]: A.__fields__['foo'].field_info Out[11]: FieldInfo(default=Ellipsis, extra={'env': ['FOO', 'PREFIX_FOO'], 'env_names': ['foo', 'prefix_foo']}) In [12]: class A(BaseSettings): ...: foo : int ...: In [13]: class B(A): ...: class Config: ...: env_prefix = 'PREFIX_' ...: fields = { ...: 'foo': {'env': ['FOO', 'PREFIX_FOO'],}, ...: } ...: ...: ...: In [14]: B().foo --------------------------------------------------------------------------- ValidationError Traceback (most recent call last) <ipython-input-14-40329edc0228> in <module> ----> 1 B().foo ~/.cache/pypoetry/virtualenvs/chatbot-py3.6/lib/python3.6/site-packages/pydantic/env_settings.cpython-36m-x86_64-linux-gnu.so in pydantic.env_settings.BaseSettings.__init__() ~/.cache/pypoetry/virtualenvs/chatbot-py3.6/lib/python3.6/site-packages/pydantic/main.cpython-36m-x86_64-linux-gnu.so in pydantic.main.BaseModel.__init__() ValidationError: 1 validation error for B foo field required (type=value_error.missing) In [15]: A.__fields__['foo'] Out[15]: ModelField(name='foo', type=int, required=True) In [16]: A.__fields__['foo'].field_info Out[16]: FieldInfo(default=Ellipsis, extra={'env_names': {'foo'}}) In [17]: B.__fields__['foo'].field_info Out[17]: FieldInfo(default=Ellipsis, extra={'env_names': {'prefix_foo'}})
ValidationError
def __new__(mcs, name, bases, namespace, **kwargs): # noqa C901 fields: Dict[str, ModelField] = {} config = BaseConfig validators: "ValidatorListDict" = {} fields_defaults: Dict[str, Any] = {} pre_root_validators, post_root_validators = [], [] for base in reversed(bases): if ( _is_base_model_class_defined and issubclass(base, BaseModel) and base != BaseModel ): fields.update(deepcopy(base.__fields__)) config = inherit_config(base.__config__, config) validators = inherit_validators(base.__validators__, validators) pre_root_validators += base.__pre_root_validators__ post_root_validators += base.__post_root_validators__ config = inherit_config(namespace.get("Config"), config) validators = inherit_validators(extract_validators(namespace), validators) vg = ValidatorGroup(validators) for f in fields.values(): if not f.required: fields_defaults[f.name] = f.default f.set_config(config) extra_validators = vg.get_validators(f.name) if extra_validators: f.class_validators.update(extra_validators) # re-run prepare to add extra validators f.populate_validators() prepare_config(config, name) class_vars = set() if (namespace.get("__module__"), namespace.get("__qualname__")) != ( "pydantic.main", "BaseModel", ): annotations = resolve_annotations( namespace.get("__annotations__", {}), namespace.get("__module__", None) ) untouched_types = UNTOUCHED_TYPES + config.keep_untouched # annotation only fields need to come first in fields for ann_name, ann_type in annotations.items(): if is_classvar(ann_type): class_vars.add(ann_name) elif is_valid_field(ann_name): validate_field_name(bases, ann_name) value = namespace.get(ann_name, Undefined) if ( isinstance(value, untouched_types) and ann_type != PyObject and not lenient_issubclass( getattr(ann_type, "__origin__", None), Type ) ): continue fields[ann_name] = inferred = ModelField.infer( name=ann_name, value=value, annotation=ann_type, class_validators=vg.get_validators(ann_name), config=config, ) if not inferred.required: fields_defaults[ann_name] = inferred.default for var_name, value in namespace.items(): if ( var_name not in annotations and is_valid_field(var_name) and not isinstance(value, untouched_types) and var_name not in class_vars ): validate_field_name(bases, var_name) inferred = ModelField.infer( name=var_name, value=value, annotation=annotations.get(var_name), class_validators=vg.get_validators(var_name), config=config, ) if var_name in fields and inferred.type_ != fields[var_name].type_: raise TypeError( f"The type of {name}.{var_name} differs from the new default value; " f"if you wish to change the type of this field, please use a type annotation" ) fields[var_name] = inferred if not inferred.required: fields_defaults[var_name] = inferred.default _custom_root_type = ROOT_KEY in fields if _custom_root_type: validate_custom_root_type(fields) vg.check_for_unused() if config.json_encoders: json_encoder = partial(custom_pydantic_encoder, config.json_encoders) else: json_encoder = pydantic_encoder pre_rv_new, post_rv_new = extract_root_validators(namespace) new_namespace = { "__config__": config, "__fields__": fields, "__field_defaults__": fields_defaults, "__validators__": vg.validators, "__pre_root_validators__": pre_root_validators + pre_rv_new, "__post_root_validators__": post_root_validators + post_rv_new, "__schema_cache__": {}, "__json_encoder__": staticmethod(json_encoder), "__custom_root_type__": _custom_root_type, **{n: v for n, v in namespace.items() if n not in fields}, } cls = super().__new__(mcs, name, bases, new_namespace, **kwargs) # set __signature__ attr only for model class, but not for its instances cls.__signature__ = ClassAttribute( "__signature__", generate_model_signature(cls.__init__, fields, config) ) return cls
def __new__(mcs, name, bases, namespace, **kwargs): # noqa C901 fields: Dict[str, ModelField] = {} config = BaseConfig validators: "ValidatorListDict" = {} fields_defaults: Dict[str, Any] = {} pre_root_validators, post_root_validators = [], [] for base in reversed(bases): if ( _is_base_model_class_defined and issubclass(base, BaseModel) and base != BaseModel ): fields.update(deepcopy(base.__fields__)) config = inherit_config(base.__config__, config) validators = inherit_validators(base.__validators__, validators) pre_root_validators += base.__pre_root_validators__ post_root_validators += base.__post_root_validators__ config = inherit_config(namespace.get("Config"), config) validators = inherit_validators(extract_validators(namespace), validators) vg = ValidatorGroup(validators) for f in fields.values(): if not f.required: fields_defaults[f.name] = f.default f.set_config(config) extra_validators = vg.get_validators(f.name) if extra_validators: f.class_validators.update(extra_validators) # re-run prepare to add extra validators f.populate_validators() prepare_config(config, name) class_vars = set() if (namespace.get("__module__"), namespace.get("__qualname__")) != ( "pydantic.main", "BaseModel", ): annotations = resolve_annotations( namespace.get("__annotations__", {}), namespace.get("__module__", None) ) untouched_types = UNTOUCHED_TYPES + config.keep_untouched # annotation only fields need to come first in fields for ann_name, ann_type in annotations.items(): if is_classvar(ann_type): class_vars.add(ann_name) elif is_valid_field(ann_name): validate_field_name(bases, ann_name) value = namespace.get(ann_name, Undefined) if ( isinstance(value, untouched_types) and ann_type != PyObject and not lenient_issubclass( getattr(ann_type, "__origin__", None), Type ) ): continue fields[ann_name] = inferred = ModelField.infer( name=ann_name, value=value, annotation=ann_type, class_validators=vg.get_validators(ann_name), config=config, ) if not inferred.required: fields_defaults[ann_name] = inferred.default for var_name, value in namespace.items(): if ( var_name not in annotations and is_valid_field(var_name) and not isinstance(value, untouched_types) and var_name not in class_vars ): validate_field_name(bases, var_name) inferred = ModelField.infer( name=var_name, value=value, annotation=annotations.get(var_name), class_validators=vg.get_validators(var_name), config=config, ) if var_name in fields and inferred.type_ != fields[var_name].type_: raise TypeError( f"The type of {name}.{var_name} differs from the new default value; " f"if you wish to change the type of this field, please use a type annotation" ) fields[var_name] = inferred if not inferred.required: fields_defaults[var_name] = inferred.default _custom_root_type = ROOT_KEY in fields if _custom_root_type: validate_custom_root_type(fields) vg.check_for_unused() if config.json_encoders: json_encoder = partial(custom_pydantic_encoder, config.json_encoders) else: json_encoder = pydantic_encoder pre_rv_new, post_rv_new = extract_root_validators(namespace) new_namespace = { "__config__": config, "__fields__": fields, "__field_defaults__": fields_defaults, "__validators__": vg.validators, "__pre_root_validators__": pre_root_validators + pre_rv_new, "__post_root_validators__": post_root_validators + post_rv_new, "__schema_cache__": {}, "__json_encoder__": staticmethod(json_encoder), "__custom_root_type__": _custom_root_type, **{n: v for n, v in namespace.items() if n not in fields}, } cls = super().__new__(mcs, name, bases, new_namespace, **kwargs) cls.__signature__ = generate_model_signature(cls.__init__, fields, config) return cls
https://github.com/samuelcolvin/pydantic/issues/1419
FullArgSpec(args=['self'], varargs=None, varkw=None, defaults=None, kwonlyargs=['foo'], kwonlydefaults=None, annotations={'foo': <class 'str'>}) Traceback (most recent call last): File "/usr/lib/python3.8/inspect.py", line 1123, in getfullargspec sig = _signature_from_callable(func, File "/usr/lib/python3.8/inspect.py", line 2216, in _signature_from_callable raise TypeError('{!r} is not a callable object'.format(obj)) TypeError: <__main__.MyModel object at 0x7fb1f405b490> is not a callable object The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/home/jrootjunior/work/aiogram3/expetiment.py", line 19, in <module> print(inspect.getfullargspec(MyModel(foo="baz"))) File "/usr/lib/python3.8/inspect.py", line 1132, in getfullargspec raise TypeError('unsupported callable') from ex TypeError: unsupported callable
TypeError
def __init__(self, name: str, value: Any) -> None: self.name = name self.value = value
def __init__( self, value: Any, items: Union["AbstractSetIntStr", "MappingIntStrAny"] ) -> None: if TYPE_CHECKING: self._items: Union["AbstractSetIntStr", "MappingIntStrAny"] self._type: Type[Union[set, dict]] # type: ignore # For further type checks speed-up if isinstance(items, dict): self._type = dict elif isinstance(items, AbstractSet): self._type = set else: raise TypeError(f"Unexpected type of exclude value {items.__class__}") if isinstance(value, (list, tuple)): try: items = self._normalize_indexes(items, len(value)) except TypeError as e: raise TypeError( "Excluding fields from a sequence of sub-models or dicts must be performed index-wise: " 'expected integer keys or keyword "__all__"' ) from e self._items = items
https://github.com/samuelcolvin/pydantic/issues/1419
FullArgSpec(args=['self'], varargs=None, varkw=None, defaults=None, kwonlyargs=['foo'], kwonlydefaults=None, annotations={'foo': <class 'str'>}) Traceback (most recent call last): File "/usr/lib/python3.8/inspect.py", line 1123, in getfullargspec sig = _signature_from_callable(func, File "/usr/lib/python3.8/inspect.py", line 2216, in _signature_from_callable raise TypeError('{!r} is not a callable object'.format(obj)) TypeError: <__main__.MyModel object at 0x7fb1f405b490> is not a callable object The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/home/jrootjunior/work/aiogram3/expetiment.py", line 19, in <module> print(inspect.getfullargspec(MyModel(foo="baz"))) File "/usr/lib/python3.8/inspect.py", line 1132, in getfullargspec raise TypeError('unsupported callable') from ex TypeError: unsupported callable
TypeError
def populate_validators(self) -> None: """ Prepare self.pre_validators, self.validators, and self.post_validators based on self.type_'s __get_validators__ and class validators. This method should be idempotent, e.g. it should be safe to call multiple times without mis-configuring the field. """ class_validators_ = self.class_validators.values() if not self.sub_fields or self.shape == SHAPE_GENERIC: get_validators = getattr(self.type_, "__get_validators__", None) v_funcs = ( *[v.func for v in class_validators_ if v.each_item and v.pre], *( get_validators() if get_validators else list(find_validators(self.type_, self.model_config)) ), *[v.func for v in class_validators_ if v.each_item and not v.pre], ) self.validators = prep_validators(v_funcs) self.pre_validators = [] self.post_validators = [] if self.field_info and self.field_info.const: self.post_validators.append(make_generic_validator(constant_validator)) if class_validators_: self.pre_validators += prep_validators( v.func for v in class_validators_ if not v.each_item and v.pre ) self.post_validators = prep_validators( v.func for v in class_validators_ if not v.each_item and not v.pre ) if self.parse_json: self.pre_validators.append(make_generic_validator(validate_json)) self.pre_validators = self.pre_validators or None self.post_validators = self.post_validators or None
def populate_validators(self) -> None: """ Prepare self.pre_validators, self.validators, and self.post_validators based on self.type_'s __get_validators__ and class validators. This method should be idempotent, e.g. it should be safe to call multiple times without mis-configuring the field. """ class_validators_ = self.class_validators.values() if not self.sub_fields or self.shape == SHAPE_GENERIC: get_validators = getattr(self.type_, "__get_validators__", None) v_funcs = ( *[v.func for v in class_validators_ if v.each_item and v.pre], *( get_validators() if get_validators else list(find_validators(self.type_, self.model_config)) ), *[v.func for v in class_validators_ if v.each_item and not v.pre], ) self.validators = prep_validators(v_funcs) # Add const validator self.pre_validators = [] self.post_validators = [] if self.field_info and self.field_info.const: self.pre_validators = [make_generic_validator(constant_validator)] if class_validators_: self.pre_validators += prep_validators( v.func for v in class_validators_ if not v.each_item and v.pre ) self.post_validators = prep_validators( v.func for v in class_validators_ if not v.each_item and not v.pre ) if self.parse_json: self.pre_validators.append(make_generic_validator(validate_json)) self.pre_validators = self.pre_validators or None self.post_validators = self.post_validators or None
https://github.com/samuelcolvin/pydantic/issues/1410
import os from pathlib import Path import pydantic class Config1(pydantic.BaseSettings): ... port: int = pydantic.Field(1234, const=True) ... class Config2(pydantic.BaseSettings): ... port: int = pydantic.Field("1234", const=True) ... print("No env") No env print("Config1", Config1().dict()) Config1 {'port': 1234} print("Config2", Config2().dict()) Config2 {'port': 1234} print("With .env") With .env Path(".env").write_text("port=1234\n") 10 print("Config1", Config1(_env_file=".env").dict()) # FAILS Traceback (most recent call last): File "<stdin>", line 1, in <module> File "D:\dev\github\pydantic\issues\.venv\lib\site-packages\pydantic\env_settings.py", line 28, in __init__ super().__init__(**__pydantic_self__._build_values(values, _env_file=_env_file)) File "D:\dev\github\pydantic\issues\.venv\lib\site-packages\pydantic\main.py", line 338, in __init__ raise validation_error pydantic.error_wrappers.ValidationError: 1 validation error for Config1 port unexpected value; permitted: 1234 (type=value_error.const; given=1234; permitted=[1234]) print("Config2", Config2(_env_file=".env").dict()) Config2 {'port': 1234} print("With os.environ") With os.environ os.environ["port"] = "1234" print("Config1", Config1().dict()) # FAILS Traceback (most recent call last): File "<stdin>", line 1, in <module> File "D:\dev\github\pydantic\issues\.venv\lib\site-packages\pydantic\env_settings.py", line 28, in __init__ super().__init__(**__pydantic_self__._build_values(values, _env_file=_env_file)) File "D:\dev\github\pydantic\issues\.venv\lib\site-packages\pydantic\main.py", line 338, in __init__ raise validation_error pydantic.error_wrappers.ValidationError: 1 validation error for Config1 port unexpected value; permitted: 1234 (type=value_error.const; given=1234; permitted=[1234]) print("Config2", Config2().dict()) Config2 {'port': 1234}
pydantic.error_wrappers.ValidationError
def generate_model_signature( init: Callable[..., None], fields: Dict[str, "ModelField"], config: Type["BaseConfig"], ) -> "Signature": """ Generate signature for model based on its fields """ from inspect import Parameter, Signature, signature present_params = signature(init).parameters.values() merged_params: Dict[str, Parameter] = {} var_kw = None use_var_kw = False for param in islice(present_params, 1, None): # skip self arg if param.kind is param.VAR_KEYWORD: var_kw = param continue merged_params[param.name] = param if var_kw: # if custom init has no var_kw, fields which are not declared in it cannot be passed through allow_names = config.allow_population_by_field_name for field_name, field in fields.items(): param_name = field.alias if field_name in merged_params or param_name in merged_params: continue elif not param_name.isidentifier(): if allow_names and field_name.isidentifier(): param_name = field_name else: use_var_kw = True continue # TODO: replace annotation with actual expected types once #1055 solved kwargs = {"default": field.default} if not field.required else {} merged_params[param_name] = Parameter( param_name, Parameter.KEYWORD_ONLY, annotation=field.outer_type_, **kwargs, ) if config.extra is config.extra.allow: use_var_kw = True if var_kw and use_var_kw: # Make sure the parameter for extra kwargs # does not have the same name as a field default_model_signature = [ ("__pydantic_self__", Parameter.POSITIONAL_OR_KEYWORD), ("data", Parameter.VAR_KEYWORD), ] if [(p.name, p.kind) for p in present_params] == default_model_signature: # if this is the standard model signature, use extra_data as the extra args name var_kw_name = "extra_data" else: # else start from var_kw var_kw_name = var_kw.name # generate a name that's definitely unique while var_kw_name in fields: var_kw_name += "_" merged_params[var_kw_name] = var_kw.replace(name=var_kw_name) return Signature(parameters=list(merged_params.values()), return_annotation=None)
def generate_model_signature( init: Callable[..., None], fields: Dict[str, "ModelField"], config: Type["BaseConfig"], ) -> "Signature": """ Generate signature for model based on its fields """ from inspect import Parameter, Signature, signature present_params = signature(init).parameters.values() merged_params: Dict[str, Parameter] = {} var_kw = None use_var_kw = False for param in islice(present_params, 1, None): # skip self arg if param.kind is param.VAR_KEYWORD: var_kw = param continue merged_params[param.name] = param if var_kw: # if custom init has no var_kw, fields which are not declared in it cannot be passed through allow_names = config.allow_population_by_field_name for field_name, field in fields.items(): param_name = field.alias if field_name in merged_params or param_name in merged_params: continue elif not param_name.isidentifier(): if allow_names and field_name.isidentifier(): param_name = field_name else: use_var_kw = True continue # TODO: replace annotation with actual expected types once #1055 solved kwargs = {"default": field.default} if not field.required else {} merged_params[param_name] = Parameter( param_name, Parameter.KEYWORD_ONLY, annotation=field.type_, **kwargs ) if config.extra is config.extra.allow: use_var_kw = True if var_kw and use_var_kw: merged_params[var_kw.name] = var_kw return Signature(parameters=list(merged_params.values()), return_annotation=None)
https://github.com/samuelcolvin/pydantic/issues/1418
Traceback (most recent call last): File "/home/jrootjunior/work/aiogram3/expetiment.py", line 7, in <module> class MyObject(BaseModel): File "/home/jrootjunior/.cache/pypoetry/virtualenvs/aiogram-BAWpo_Vh-py3.8/lib/python3.8/site-packages/pydantic/main.py", line 303, in __new__ cls.__signature__ = generate_model_signature(cls.__init__, fields, config) File "/home/jrootjunior/.cache/pypoetry/virtualenvs/aiogram-BAWpo_Vh-py3.8/lib/python3.8/site-packages/pydantic/utils.py", line 185, in generate_model_signature return Signature(parameters=list(merged_params.values()), return_annotation=None) File "/usr/lib/python3.8/inspect.py", line 2785, in __init__ raise ValueError(msg) ValueError: wrong parameter order: variadic keyword parameter before keyword-only parameter
ValueError
def __class_getitem__( cls: Type[GenericModelT], params: Union[Type[Any], Tuple[Type[Any], ...]] ) -> Type[Any]: cached = _generic_types_cache.get((cls, params)) if cached is not None: return cached if cls.__concrete__: raise TypeError( "Cannot parameterize a concrete instantiation of a generic model" ) if not isinstance(params, tuple): params = (params,) if cls is GenericModel and any(isinstance(param, TypeVar) for param in params): # type: ignore raise TypeError( "Type parameters should be placed on typing.Generic, not GenericModel" ) if not hasattr(cls, "__parameters__"): raise TypeError( f"Type {cls.__name__} must inherit from typing.Generic before being parameterized" ) check_parameters_count(cls, params) typevars_map: Dict[TypeVarType, Type[Any]] = dict(zip(cls.__parameters__, params)) type_hints = get_type_hints(cls).items() instance_type_hints = { k: v for k, v in type_hints if getattr(v, "__origin__", None) is not ClassVar } concrete_type_hints: Dict[str, Type[Any]] = { k: resolve_type_hint(v, typevars_map) for k, v in instance_type_hints.items() } model_name = cls.__concrete_name__(params) validators = gather_all_validators(cls) fields = _build_generic_fields(cls.__fields__, concrete_type_hints, typevars_map) created_model = cast( Type[ GenericModel ], # casting ensures mypy is aware of the __concrete__ and __parameters__ attributes create_model( model_name, __module__=cls.__module__, __base__=cls, __config__=None, __validators__=validators, **fields, ), ) created_model.Config = cls.Config concrete = all(not _is_typevar(v) for v in concrete_type_hints.values()) created_model.__concrete__ = concrete if not concrete: parameters = tuple(v for v in concrete_type_hints.values() if _is_typevar(v)) parameters = tuple( {k: None for k in parameters}.keys() ) # get unique params while maintaining order created_model.__parameters__ = parameters _generic_types_cache[(cls, params)] = created_model if len(params) == 1: _generic_types_cache[(cls, params[0])] = created_model return created_model
def __class_getitem__( cls: Type[GenericModelT], params: Union[Type[Any], Tuple[Type[Any], ...]] ) -> Type[Any]: cached = _generic_types_cache.get((cls, params)) if cached is not None: return cached if cls.__concrete__: raise TypeError( "Cannot parameterize a concrete instantiation of a generic model" ) if not isinstance(params, tuple): params = (params,) if cls is GenericModel and any(isinstance(param, TypeVar) for param in params): # type: ignore raise TypeError( "Type parameters should be placed on typing.Generic, not GenericModel" ) if not hasattr(cls, "__parameters__"): raise TypeError( f"Type {cls.__name__} must inherit from typing.Generic before being parameterized" ) check_parameters_count(cls, params) typevars_map: Dict[TypeVarType, Type[Any]] = dict(zip(cls.__parameters__, params)) type_hints = get_type_hints(cls).items() instance_type_hints = { k: v for k, v in type_hints if getattr(v, "__origin__", None) is not ClassVar } concrete_type_hints: Dict[str, Type[Any]] = { k: resolve_type_hint(v, typevars_map) for k, v in instance_type_hints.items() } model_name = cls.__concrete_name__(params) validators = gather_all_validators(cls) fields = _build_generic_fields(cls.__fields__, concrete_type_hints, typevars_map) created_model = cast( Type[ GenericModel ], # casting ensures mypy is aware of the __concrete__ and __parameters__ attributes create_model( model_name=model_name, __module__=cls.__module__, __base__=cls, __config__=None, __validators__=validators, **fields, ), ) created_model.Config = cls.Config concrete = all(not _is_typevar(v) for v in concrete_type_hints.values()) created_model.__concrete__ = concrete if not concrete: parameters = tuple(v for v in concrete_type_hints.values() if _is_typevar(v)) parameters = tuple( {k: None for k in parameters}.keys() ) # get unique params while maintaining order created_model.__parameters__ = parameters _generic_types_cache[(cls, params)] = created_model if len(params) == 1: _generic_types_cache[(cls, params[0])] = created_model return created_model
https://github.com/samuelcolvin/pydantic/issues/1366
Traceback (most recent call last): File "test_model_name.py", line 3, in <module> @dataclass File "pydantic/dataclasses.py", line 146, in pydantic.dataclasses.dataclass # File "pydantic/dataclasses.py", line 141, in pydantic.dataclasses.dataclass.wrap # | True | True | True | add | raise | Frozen, so hashable File "pydantic/dataclasses.py", line 106, in pydantic.dataclasses._process_class # +--- order= parameter File "pydantic/main.py", line 736, in pydantic.main.create_model TypeError: create_model() got multiple values for keyword argument 'model_name'
TypeError
def create_model( __model_name: str, *, __config__: Type[BaseConfig] = None, __base__: Type[BaseModel] = None, __module__: Optional[str] = None, __validators__: Dict[str, classmethod] = None, **field_definitions: Any, ) -> Type[BaseModel]: """ Dynamically create a model. :param __model_name: name of the created model :param __config__: config class to use for the new model :param __base__: base class for the new model to inherit from :param __validators__: a dict of method names and @validator class methods :param **field_definitions: fields of the model (or extra fields if a base is supplied) in the format `<name>=(<type>, <default default>)` or `<name>=<default value> eg. `foobar=(str, ...)` or `foobar=123` """ if __base__: if __config__ is not None: raise ConfigError( "to avoid confusion __config__ and __base__ cannot be used together" ) else: __base__ = BaseModel fields = {} annotations = {} for f_name, f_def in field_definitions.items(): if not is_valid_field(f_name): warnings.warn( f'fields may not start with an underscore, ignoring "{f_name}"', RuntimeWarning, ) if isinstance(f_def, tuple): try: f_annotation, f_value = f_def except ValueError as e: raise ConfigError( "field definitions should either be a tuple of (<type>, <default>) or just a " "default value, unfortunately this means tuples as " "default values are not allowed" ) from e else: f_annotation, f_value = None, f_def if f_annotation: annotations[f_name] = f_annotation fields[f_name] = f_value namespace: "DictStrAny" = {"__annotations__": annotations, "__module__": __module__} if __validators__: namespace.update(__validators__) namespace.update(fields) if __config__: namespace["Config"] = inherit_config(__config__, BaseConfig) return type(__model_name, (__base__,), namespace)
def create_model( model_name: str, *, __config__: Type[BaseConfig] = None, __base__: Type[BaseModel] = None, __module__: Optional[str] = None, __validators__: Dict[str, classmethod] = None, **field_definitions: Any, ) -> Type[BaseModel]: """ Dynamically create a model. :param model_name: name of the created model :param __config__: config class to use for the new model :param __base__: base class for the new model to inherit from :param __validators__: a dict of method names and @validator class methods :param **field_definitions: fields of the model (or extra fields if a base is supplied) in the format `<name>=(<type>, <default default>)` or `<name>=<default value> eg. `foobar=(str, ...)` or `foobar=123` """ if __base__: if __config__ is not None: raise ConfigError( "to avoid confusion __config__ and __base__ cannot be used together" ) else: __base__ = BaseModel fields = {} annotations = {} for f_name, f_def in field_definitions.items(): if not is_valid_field(f_name): warnings.warn( f'fields may not start with an underscore, ignoring "{f_name}"', RuntimeWarning, ) if isinstance(f_def, tuple): try: f_annotation, f_value = f_def except ValueError as e: raise ConfigError( "field definitions should either be a tuple of (<type>, <default>) or just a " "default value, unfortunately this means tuples as " "default values are not allowed" ) from e else: f_annotation, f_value = None, f_def if f_annotation: annotations[f_name] = f_annotation fields[f_name] = f_value namespace: "DictStrAny" = {"__annotations__": annotations, "__module__": __module__} if __validators__: namespace.update(__validators__) namespace.update(fields) if __config__: namespace["Config"] = inherit_config(__config__, BaseConfig) return type(model_name, (__base__,), namespace)
https://github.com/samuelcolvin/pydantic/issues/1366
Traceback (most recent call last): File "test_model_name.py", line 3, in <module> @dataclass File "pydantic/dataclasses.py", line 146, in pydantic.dataclasses.dataclass # File "pydantic/dataclasses.py", line 141, in pydantic.dataclasses.dataclass.wrap # | True | True | True | add | raise | Frozen, so hashable File "pydantic/dataclasses.py", line 106, in pydantic.dataclasses._process_class # +--- order= parameter File "pydantic/main.py", line 736, in pydantic.main.create_model TypeError: create_model() got multiple values for keyword argument 'model_name'
TypeError
def __get_validators__(cls) -> "CallableGenerator": yield cls.list_length_validator
def __get_validators__(cls) -> "CallableGenerator": yield list_validator yield cls.list_length_validator
https://github.com/samuelcolvin/pydantic/issues/1295
class Foo(BaseModel): ... bar: List = Field(None) ... baz: List = Field(None, max_items=10) ... Foo() Foo(bar=None, baz=None) Foo(bar=None) Foo(bar=None, baz=None) Foo(baz=None) Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/Users/spike/.pyenv/versions/3.8.2/lib/python3.8/site-packages/pydantic/main.py", line 283, in __init__ raise validation_error pydantic.error_wrappers.ValidationError: 1 validation error for Foo baz value is not a valid list (type=type_error.list)
pydantic.error_wrappers.ValidationError
def list_length_validator( cls, v: "Optional[List[T]]", field: "ModelField" ) -> "Optional[List[T]]": if v is None and not field.required: return None v = list_validator(v) v_len = len(v) if cls.min_items is not None and v_len < cls.min_items: raise errors.ListMinLengthError(limit_value=cls.min_items) if cls.max_items is not None and v_len > cls.max_items: raise errors.ListMaxLengthError(limit_value=cls.max_items) return v
def list_length_validator(cls, v: "List[T]") -> "List[T]": v_len = len(v) if cls.min_items is not None and v_len < cls.min_items: raise errors.ListMinLengthError(limit_value=cls.min_items) if cls.max_items is not None and v_len > cls.max_items: raise errors.ListMaxLengthError(limit_value=cls.max_items) return v
https://github.com/samuelcolvin/pydantic/issues/1295
class Foo(BaseModel): ... bar: List = Field(None) ... baz: List = Field(None, max_items=10) ... Foo() Foo(bar=None, baz=None) Foo(bar=None) Foo(bar=None, baz=None) Foo(baz=None) Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/Users/spike/.pyenv/versions/3.8.2/lib/python3.8/site-packages/pydantic/main.py", line 283, in __init__ raise validation_error pydantic.error_wrappers.ValidationError: 1 validation error for Foo baz value is not a valid list (type=type_error.list)
pydantic.error_wrappers.ValidationError
def validate(cls: Type["Model"], value: Any) -> "Model": if isinstance(value, dict): return cls(**value) elif isinstance(value, cls): return value.copy() elif cls.__config__.orm_mode: return cls.from_orm(value) elif cls.__custom_root_type__: return cls.parse_obj(value) else: try: value_as_dict = dict(value) except (TypeError, ValueError) as e: raise DictError() from e return cls(**value_as_dict)
def validate(cls: Type["Model"], value: Any) -> "Model": if isinstance(value, dict): return cls(**value) elif isinstance(value, cls): return value.copy() elif cls.__config__.orm_mode: return cls.from_orm(value) else: try: value_as_dict = dict(value) except (TypeError, ValueError) as e: raise DictError() from e return cls(**value_as_dict)
https://github.com/samuelcolvin/pydantic/issues/1190
from pydantic import BaseModel class OperationData(BaseModel): id: str class Operation(BaseModel): __root__: Tuple[int, OperationData] data = [0, {'id': '1.11.0'}] # this one works as expected print(Operation.parse_obj(data)) # printed: __root__=(0, OperationData(id='1.11.0')) # However, this one doesn't print(parse_obj_as(Operation, data)) # Traceback (most recent call last): # File "/home/**removed**/protocol/base.py", line 238, in <module> # print(parse_obj_as(Operation, data)) # File "pydantic/tools.py", line 35, in pydantic.tools.parse_obj_as # File "pydantic/main.py", line 283, in pydantic.main.BaseModel.__init__ # pydantic.error_wrappers.ValidationError: 1 validation error for ParsingModel[Operation] #__root__ # value is not a valid dict (type=type_error.dict) # Which is not a big problem. The problem is that I have nested class class OperationsBatch(BaseModel): batch_desc: str operations: List[Operation] # and it produces same exception on print(OperationsBatch.parse_obj({'batch_desc': '123', 'operations': [data, data]})) # Traceback (most recent call last): # File "/home/**removed**/protocol/base.py", line 243, in <module> # OperationsBatch.parse_obj({'batch_desc': '123', 'operations': [data, data]}) # File "pydantic/main.py", line 402, in pydantic.main.BaseModel.parse_obj # File "pydantic/main.py", line 283, in pydantic.main.BaseModel.__init__ # pydantic.error_wrappers.ValidationError: 2 validation errors for OperationsBatch # operations -> 0 # value is not a valid dict (type=type_error.dict) # operations -> 1 # value is not a valid dict (type=type_error.dict)
pydantic.error_wrappers.ValidationError
def __init__(self, default: Any, **kwargs: Any) -> None: self.default = default self.alias = kwargs.pop("alias", None) self.alias_priority = kwargs.pop("alias_priority", 2 if self.alias else None) self.title = kwargs.pop("title", None) self.description = kwargs.pop("description", None) self.const = kwargs.pop("const", None) self.gt = kwargs.pop("gt", None) self.ge = kwargs.pop("ge", None) self.lt = kwargs.pop("lt", None) self.le = kwargs.pop("le", None) self.multiple_of = kwargs.pop("multiple_of", None) self.min_items = kwargs.pop("min_items", None) self.max_items = kwargs.pop("max_items", None) self.min_length = kwargs.pop("min_length", None) self.max_length = kwargs.pop("max_length", None) self.regex = kwargs.pop("regex", None) self.extra = kwargs
def __init__(self, default: Any, **kwargs: Any) -> None: self.default = default self.alias = kwargs.pop("alias", None) self.title = kwargs.pop("title", None) self.description = kwargs.pop("description", None) self.const = kwargs.pop("const", None) self.gt = kwargs.pop("gt", None) self.ge = kwargs.pop("ge", None) self.lt = kwargs.pop("lt", None) self.le = kwargs.pop("le", None) self.multiple_of = kwargs.pop("multiple_of", None) self.min_items = kwargs.pop("min_items", None) self.max_items = kwargs.pop("max_items", None) self.min_length = kwargs.pop("min_length", None) self.max_length = kwargs.pop("max_length", None) self.regex = kwargs.pop("regex", None) self.extra = kwargs
https://github.com/samuelcolvin/pydantic/issues/1177
MyModel() Traceback (most recent call last): File "<stdin>", line 1, in <module> File "<snip>/site-packages/pydantic/main.py", line 274, in __init__ raise validation_error pydantic.error_wrappers.ValidationError: 1 validation error for MyModel myATTRIBUTE field required (type=value_error.missing) MyOtherModel() Traceback (most recent call last): File "<stdin>", line 1, in <module> File "<snip>/site-packages/pydantic/main.py", line 274, in __init__ raise validation_error pydantic.error_wrappers.ValidationError: 1 validation error for MyOtherModel MY_ATTRIBUTE field required (type=value_error.missing)
pydantic.error_wrappers.ValidationError
def set_config(self, config: Type["BaseConfig"]) -> None: self.model_config = config info_from_config = config.get_field_info(self.name) config.prepare_field(self) new_alias = info_from_config.get("alias") new_alias_priority = info_from_config.get("alias_priority") or 0 if new_alias and new_alias_priority >= (self.field_info.alias_priority or 0): self.field_info.alias = new_alias self.field_info.alias_priority = new_alias_priority self.alias = new_alias
def set_config(self, config: Type["BaseConfig"]) -> None: self.model_config = config info_from_config = config.get_field_info(self.name) config.prepare_field(self) if info_from_config: self.field_info.alias = ( info_from_config.get("alias") or self.field_info.alias or self.name ) self.alias = cast(str, self.field_info.alias)
https://github.com/samuelcolvin/pydantic/issues/1177
MyModel() Traceback (most recent call last): File "<stdin>", line 1, in <module> File "<snip>/site-packages/pydantic/main.py", line 274, in __init__ raise validation_error pydantic.error_wrappers.ValidationError: 1 validation error for MyModel myATTRIBUTE field required (type=value_error.missing) MyOtherModel() Traceback (most recent call last): File "<stdin>", line 1, in <module> File "<snip>/site-packages/pydantic/main.py", line 274, in __init__ raise validation_error pydantic.error_wrappers.ValidationError: 1 validation error for MyOtherModel MY_ATTRIBUTE field required (type=value_error.missing)
pydantic.error_wrappers.ValidationError
def get_field_info(cls, name: str) -> Dict[str, Any]: fields_value = cls.fields.get(name) if isinstance(fields_value, str): field_info: Dict[str, Any] = {"alias": fields_value} elif isinstance(fields_value, dict): field_info = fields_value else: field_info = {} if "alias" in field_info: field_info.setdefault("alias_priority", 2) if field_info.get("alias_priority", 0) <= 1 and cls.alias_generator: alias = cls.alias_generator(name) if not isinstance(alias, str): raise TypeError( f"Config.alias_generator must return str, not {type(alias)}" ) field_info.update(alias=alias, alias_priority=1) return field_info
def get_field_info(cls, name: str) -> Dict[str, Any]: field_info = cls.fields.get(name) or {} if isinstance(field_info, str): field_info = {"alias": field_info} elif cls.alias_generator and "alias" not in field_info: alias = cls.alias_generator(name) if not isinstance(alias, str): raise TypeError( f"Config.alias_generator must return str, not {type(alias)}" ) field_info["alias"] = alias return field_info
https://github.com/samuelcolvin/pydantic/issues/1177
MyModel() Traceback (most recent call last): File "<stdin>", line 1, in <module> File "<snip>/site-packages/pydantic/main.py", line 274, in __init__ raise validation_error pydantic.error_wrappers.ValidationError: 1 validation error for MyModel myATTRIBUTE field required (type=value_error.missing) MyOtherModel() Traceback (most recent call last): File "<stdin>", line 1, in <module> File "<snip>/site-packages/pydantic/main.py", line 274, in __init__ raise validation_error pydantic.error_wrappers.ValidationError: 1 validation error for MyOtherModel MY_ATTRIBUTE field required (type=value_error.missing)
pydantic.error_wrappers.ValidationError
def validate_luhn_check_digit(cls, card_number: str) -> str: """ Based on: https://en.wikipedia.org/wiki/Luhn_algorithm """ sum_ = int(card_number[-1]) length = len(card_number) parity = length % 2 for i in range(length - 1): digit = int(card_number[i]) if i % 2 == parity: digit *= 2 if digit > 9: digit -= 9 sum_ += digit valid = sum_ % 10 == 0 if not valid: raise errors.LuhnValidationError return card_number
def validate_luhn_check_digit(cls, card_number: str) -> str: """ Based on: https://en.wikipedia.org/wiki/Luhn_algorithm """ sum_ = int(card_number[-1]) length = len(card_number) parity = length % 2 for i in range(length - 1): digit = int(card_number[i]) if i % 2 == parity: digit *= 2 sum_ += digit valid = sum_ % 10 == 0 if not valid: raise errors.LuhnValidationError return card_number
https://github.com/samuelcolvin/pydantic/issues/1166
Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/usr/local/lib/python3.7/site-packages/pydantic/types.py", line 657, in validate_luhn_check_digit raise errors.LuhnValidationError pydantic.errors.LuhnValidationError: card number is not luhn valid
pydantic.errors.LuhnValidationError
def validate(cls, value: Union[str]) -> Union[str]: if cls.curtail_length and len(value) > cls.curtail_length: value = value[: cls.curtail_length] if cls.regex: if not cls.regex.match(value): raise errors.StrRegexError(pattern=cls.regex.pattern) return value
def validate(cls, value: str) -> str: if cls.curtail_length and len(value) > cls.curtail_length: value = value[: cls.curtail_length] if cls.regex: if not cls.regex.match(value): raise errors.StrRegexError(pattern=cls.regex.pattern) return value
https://github.com/samuelcolvin/pydantic/issues/1060
Traceback (most recent call last): File "/opt/pycharm-eap/plugins/python/helpers/pydev/pydevd.py", line 1415, in _exec pydev_imports.execfile(file, globals, locals) # execute the script File "/opt/pycharm-eap/plugins/python/helpers/pydev/_pydev_imps/_pydev_execfile.py", line 18, in execfile exec(compile(contents+"\n", file, 'exec'), glob, loc) File "/home/spalmer1009/.PyCharm2019.3/config/scratches/scratch.py", line 44, in <module> connection_name=ConnectionName("test"), connection_endpoint=EndpointStr("default"), File "/home/spalmer1009/.PyCharm2019.3/config/scratches/scratch.py", line 30, in build_from connection_endpoint=connection_endpoint, File "pydantic/main.py", line 274, in pydantic.main.BaseModel.__init__ pydantic.error_wrappers.ValidationError: 2 validation errors for UpRequestEvent connection_name Expected unicode, got ConnectionName (type=type_error) connection_endpoint Expected unicode, got EndpointStr (type=type_error)
pydantic.error_wrappers.ValidationError
def strict_str_validator(v: Any) -> Union[str]: if isinstance(v, str): return v raise errors.StrError()
def strict_str_validator(v: Any) -> str: if isinstance(v, str): return v raise errors.StrError()
https://github.com/samuelcolvin/pydantic/issues/1060
Traceback (most recent call last): File "/opt/pycharm-eap/plugins/python/helpers/pydev/pydevd.py", line 1415, in _exec pydev_imports.execfile(file, globals, locals) # execute the script File "/opt/pycharm-eap/plugins/python/helpers/pydev/_pydev_imps/_pydev_execfile.py", line 18, in execfile exec(compile(contents+"\n", file, 'exec'), glob, loc) File "/home/spalmer1009/.PyCharm2019.3/config/scratches/scratch.py", line 44, in <module> connection_name=ConnectionName("test"), connection_endpoint=EndpointStr("default"), File "/home/spalmer1009/.PyCharm2019.3/config/scratches/scratch.py", line 30, in build_from connection_endpoint=connection_endpoint, File "pydantic/main.py", line 274, in pydantic.main.BaseModel.__init__ pydantic.error_wrappers.ValidationError: 2 validation errors for UpRequestEvent connection_name Expected unicode, got ConnectionName (type=type_error) connection_endpoint Expected unicode, got EndpointStr (type=type_error)
pydantic.error_wrappers.ValidationError
def get_annotation_from_schema(annotation: Any, schema: Schema) -> Type[Any]: """ Get an annotation with validation implemented for numbers and strings based on the schema. :param annotation: an annotation from a field specification, as ``str``, ``ConstrainedStr`` :param schema: an instance of Schema, possibly with declarations for validations and JSON Schema :return: the same ``annotation`` if unmodified or a new annotation with validation in place """ if isinstance(annotation, type): attrs: Optional[Tuple[str, ...]] = None constraint_func: Optional[Callable[..., type]] = None if issubclass(annotation, str) and not issubclass( annotation, (EmailStr, AnyUrl, ConstrainedStr) ): attrs = ("max_length", "min_length", "regex") constraint_func = constr elif lenient_issubclass(annotation, numeric_types) and not issubclass( annotation, ( ConstrainedInt, ConstrainedFloat, ConstrainedDecimal, ConstrainedList, bool, ), ): # Is numeric type attrs = ("gt", "lt", "ge", "le", "multiple_of") numeric_type = next( t for t in numeric_types if issubclass(annotation, t) ) # pragma: no branch constraint_func = _map_types_constraint[numeric_type] elif issubclass(annotation, ConstrainedList): attrs = ("min_items", "max_items") constraint_func = conlist if attrs: kwargs = { attr_name: attr for attr_name, attr in ( (attr_name, getattr(schema, attr_name)) for attr_name in attrs ) if attr is not None } if kwargs: constraint_func = cast(Callable[..., type], constraint_func) return constraint_func(**kwargs) return annotation
def get_annotation_from_schema(annotation: Any, schema: Schema) -> Type[Any]: """ Get an annotation with validation implemented for numbers and strings based on the schema. :param annotation: an annotation from a field specification, as ``str``, ``ConstrainedStr`` :param schema: an instance of Schema, possibly with declarations for validations and JSON Schema :return: the same ``annotation`` if unmodified or a new annotation with validation in place """ if isinstance(annotation, type): attrs: Optional[Tuple[str, ...]] = None constraint_func: Optional[Callable[..., type]] = None if issubclass(annotation, str) and not issubclass( annotation, (EmailStr, DSN, UrlStr, ConstrainedStr) ): attrs = ("max_length", "min_length", "regex") constraint_func = constr elif lenient_issubclass(annotation, numeric_types) and not issubclass( annotation, ( ConstrainedInt, ConstrainedFloat, ConstrainedDecimal, ConstrainedList, bool, ), ): # Is numeric type attrs = ("gt", "lt", "ge", "le", "multiple_of") numeric_type = next( t for t in numeric_types if issubclass(annotation, t) ) # pragma: no branch constraint_func = _map_types_constraint[numeric_type] elif issubclass(annotation, ConstrainedList): attrs = ("min_items", "max_items") constraint_func = conlist if attrs: kwargs = { attr_name: attr for attr_name, attr in ( (attr_name, getattr(schema, attr_name)) for attr_name in attrs ) if attr is not None } if kwargs: constraint_func = cast(Callable[..., type], constraint_func) return constraint_func(**kwargs) return annotation
https://github.com/samuelcolvin/pydantic/issues/541
from pydantic import BaseModel, urlstr class DatabaseConfig(BaseModel): ... url: urlstr(schemes={'postgresql'}, require_tld=False) = ... ... DatabaseConfig(url='postgresql://diary_test@localhost/diary_test') Traceback (most recent call last): File "<stdin>", line 1, in <module> File "pydantic/main.py", line 235, in __init__ object.__setattr__(self, '__values__', self._process_values(data)) File "pydantic/main.py", line 438, in _process_values return validate_model(self, input_data) # type: ignore File "pydantic/main.py", line 635, in validate_model raise ValidationError(errors) pydantic.error_wrappers.ValidationError: 1 validation error url url string does not match regex (type=value_error.url.regex)
pydantic.error_wrappers.ValidationError
def display_as_type(v: AnyType) -> str: if not isinstance(v, typing_base) and not isinstance(v, type): v = type(v) if isinstance(v, type) and issubclass(v, Enum): if issubclass(v, int): return "int" elif issubclass(v, str): return "str" else: return "enum" try: return v.__name__ except AttributeError: # happens with unions return str(v)
def display_as_type(v: AnyType) -> str: if not isinstance(v, typing_base) and not isinstance(v, type): v = type(v) if lenient_issubclass(v, Enum): if issubclass(v, int): return "int" elif issubclass(v, str): return "str" else: return "enum" try: return v.__name__ except AttributeError: # happens with unions return str(v)
https://github.com/samuelcolvin/pydantic/issues/541
from pydantic import BaseModel, urlstr class DatabaseConfig(BaseModel): ... url: urlstr(schemes={'postgresql'}, require_tld=False) = ... ... DatabaseConfig(url='postgresql://diary_test@localhost/diary_test') Traceback (most recent call last): File "<stdin>", line 1, in <module> File "pydantic/main.py", line 235, in __init__ object.__setattr__(self, '__values__', self._process_values(data)) File "pydantic/main.py", line 438, in _process_values return validate_model(self, input_data) # type: ignore File "pydantic/main.py", line 635, in validate_model raise ValidationError(errors) pydantic.error_wrappers.ValidationError: 1 validation error url url string does not match regex (type=value_error.url.regex)
pydantic.error_wrappers.ValidationError
def setattr_validate_assignment(self: "DataclassType", name: str, value: Any) -> None: if self.__initialised__: d = dict(self.__dict__) d.pop(name, None) known_field = self.__pydantic_model__.__fields__.get(name, None) if known_field: value, error_ = known_field.validate(value, d, loc=name, cls=self.__class__) if error_: raise ValidationError([error_], type(self)) object.__setattr__(self, name, value)
def setattr_validate_assignment(self: "DataclassType", name: str, value: Any) -> None: if self.__initialised__: d = dict(self.__dict__) d.pop(name) value, error_ = self.__pydantic_model__.__fields__[name].validate( value, d, loc=name, cls=self.__class__ ) if error_: raise ValidationError([error_], type(self)) object.__setattr__(self, name, value)
https://github.com/samuelcolvin/pydantic/issues/723
import contextlib import datetime import pydantic class ModelWithoutValidation(pydantic.BaseModel): name: str = None birthdate: datetime.date = None class Config: extra = pydantic.Extra.allow validate_assignment = False # <<< !!! obj = ModelWithoutValidation(name="Jim Smith") obj.name = "John Doe" # OK: Does not fail because assignment is not validated obj.birthdate = "1983-16-72" # OK: Does not fail because assignment is not validated obj.fake_stuff = "asdf" # OK: Can assign new field class ModelWithValidation(pydantic.BaseModel): name: str = None birthdate: datetime.date = None class Config: extra = pydantic.Extra.allow validate_assignment = True # <<< !!! obj = ModelWithValidation(name="Jim Smith") obj.name = "John Doe" # OK: Does not fail because the value is acceptable with contextlib.suppress(pydantic.ValidationError): obj.birthdate = "1983-16-72" # OK: Fails because the value is wrong obj.fake_stuff = "asdf" # FAIL: Cannot assign new field becaue there's no valiator # Traceback (most recent call last): # File "/home/yaraslau/Projects/Dialogue/tmp/test.py", line 34, in <module> # obj.fake_stuff = "asdf" # File "pydantic/main.py", line 294, in pydantic.main.BaseModel.__setattr__ # KeyError: 'fake_stuff'
pydantic.ValidationError
def __setattr__(self, name, value): if self.__config__.extra is not Extra.allow and name not in self.__fields__: raise ValueError(f'"{self.__class__.__name__}" object has no field "{name}"') elif not self.__config__.allow_mutation: raise TypeError( f'"{self.__class__.__name__}" is immutable and does not support item assignment' ) elif self.__config__.validate_assignment: known_field = self.fields.get(name, None) if known_field: value, error_ = known_field.validate( value, self.dict(exclude={name}), loc=name ) if error_: raise ValidationError([error_], type(self)) self.__dict__[name] = value self.__fields_set__.add(name)
def __setattr__(self, name, value): if self.__config__.extra is not Extra.allow and name not in self.__fields__: raise ValueError(f'"{self.__class__.__name__}" object has no field "{name}"') elif not self.__config__.allow_mutation: raise TypeError( f'"{self.__class__.__name__}" is immutable and does not support item assignment' ) elif self.__config__.validate_assignment: value_, error_ = self.fields[name].validate( value, self.dict(exclude={name}), loc=name ) if error_: raise ValidationError([error_], type(self)) else: self.__dict__[name] = value_ self.__fields_set__.add(name) else: self.__dict__[name] = value self.__fields_set__.add(name)
https://github.com/samuelcolvin/pydantic/issues/723
import contextlib import datetime import pydantic class ModelWithoutValidation(pydantic.BaseModel): name: str = None birthdate: datetime.date = None class Config: extra = pydantic.Extra.allow validate_assignment = False # <<< !!! obj = ModelWithoutValidation(name="Jim Smith") obj.name = "John Doe" # OK: Does not fail because assignment is not validated obj.birthdate = "1983-16-72" # OK: Does not fail because assignment is not validated obj.fake_stuff = "asdf" # OK: Can assign new field class ModelWithValidation(pydantic.BaseModel): name: str = None birthdate: datetime.date = None class Config: extra = pydantic.Extra.allow validate_assignment = True # <<< !!! obj = ModelWithValidation(name="Jim Smith") obj.name = "John Doe" # OK: Does not fail because the value is acceptable with contextlib.suppress(pydantic.ValidationError): obj.birthdate = "1983-16-72" # OK: Fails because the value is wrong obj.fake_stuff = "asdf" # FAIL: Cannot assign new field becaue there's no valiator # Traceback (most recent call last): # File "/home/yaraslau/Projects/Dialogue/tmp/test.py", line 34, in <module> # obj.fake_stuff = "asdf" # File "pydantic/main.py", line 294, in pydantic.main.BaseModel.__setattr__ # KeyError: 'fake_stuff'
pydantic.ValidationError
def __str__(self) -> str: permitted = ", ".join(repr(v.value) for v in self.ctx["enum_values"]) # type: ignore return f"value is not a valid enumeration member; permitted: {permitted}"
def __str__(self) -> str: permitted = ", ".join(repr(v.value) for v in self.ctx["enum_type"]) # type: ignore return f"value is not a valid enumeration member; permitted: {permitted}"
https://github.com/samuelcolvin/pydantic/issues/696
Traceback (most recent call last): File "<stdin>", line 2, in <module> File "/Users/josepcugat/.pyenv/versions/current/lib/python3.6/site-packages/pydantic/main.py", line 276, in __init__ values, fields_set, _ = validate_model(__pydantic_self__, data) File "/Users/josepcugat/.pyenv/versions/current/lib/python3.6/site-packages/pydantic/main.py", line 790, in validate_model raise ValidationError(errors) pydantic.error_wrappers.ValidationError: 1 validation error bar value is not a valid enumeration member; permitted: 'VALID' (type=type_error.enum; enum_type=<enum 'MyEnum'>) During handling of the above exception, another exception occurred: Traceback (most recent call last): File "<stdin>", line 4, in <module> File "/Users/josepcugat/.pyenv/versions/current/lib/python3.6/site-packages/pydantic/error_wrappers.py", line 62, in json return json.dumps(self.errors(), indent=indent) File "/Users/josepcugat/.pyenv/versions/3.6.8/lib/python3.6/json/__init__.py", line 238, in dumps **kw).encode(obj) File "/Users/josepcugat/.pyenv/versions/3.6.8/lib/python3.6/json/encoder.py", line 201, in encode chunks = list(chunks) File "/Users/josepcugat/.pyenv/versions/3.6.8/lib/python3.6/json/encoder.py", line 428, in _iterencode yield from _iterencode_list(o, _current_indent_level) File "/Users/josepcugat/.pyenv/versions/3.6.8/lib/python3.6/json/encoder.py", line 325, in _iterencode_list yield from chunks File "/Users/josepcugat/.pyenv/versions/3.6.8/lib/python3.6/json/encoder.py", line 404, in _iterencode_dict yield from chunks File "/Users/josepcugat/.pyenv/versions/3.6.8/lib/python3.6/json/encoder.py", line 404, in _iterencode_dict yield from chunks File "/Users/josepcugat/.pyenv/versions/3.6.8/lib/python3.6/json/encoder.py", line 437, in _iterencode o = _default(o) File "/Users/josepcugat/.pyenv/versions/3.6.8/lib/python3.6/json/encoder.py", line 180, in default o.__class__.__name__) TypeError: Object of type 'EnumMeta' is not JSON serializable
pydantic.error_wrappers.ValidationError
def enum_validator(v: Any, field: "Field", config: "BaseConfig") -> Enum: try: enum_v = field.type_(v) except ValueError: # field.type_ should be an enum, so will be iterable raise errors.EnumError(enum_values=list(field.type_)) # type: ignore return enum_v.value if config.use_enum_values else enum_v
def enum_validator(v: Any, field: "Field", config: "BaseConfig") -> Enum: try: enum_v = field.type_(v) except ValueError: raise errors.EnumError(enum_type=field.type_) return enum_v.value if config.use_enum_values else enum_v
https://github.com/samuelcolvin/pydantic/issues/696
Traceback (most recent call last): File "<stdin>", line 2, in <module> File "/Users/josepcugat/.pyenv/versions/current/lib/python3.6/site-packages/pydantic/main.py", line 276, in __init__ values, fields_set, _ = validate_model(__pydantic_self__, data) File "/Users/josepcugat/.pyenv/versions/current/lib/python3.6/site-packages/pydantic/main.py", line 790, in validate_model raise ValidationError(errors) pydantic.error_wrappers.ValidationError: 1 validation error bar value is not a valid enumeration member; permitted: 'VALID' (type=type_error.enum; enum_type=<enum 'MyEnum'>) During handling of the above exception, another exception occurred: Traceback (most recent call last): File "<stdin>", line 4, in <module> File "/Users/josepcugat/.pyenv/versions/current/lib/python3.6/site-packages/pydantic/error_wrappers.py", line 62, in json return json.dumps(self.errors(), indent=indent) File "/Users/josepcugat/.pyenv/versions/3.6.8/lib/python3.6/json/__init__.py", line 238, in dumps **kw).encode(obj) File "/Users/josepcugat/.pyenv/versions/3.6.8/lib/python3.6/json/encoder.py", line 201, in encode chunks = list(chunks) File "/Users/josepcugat/.pyenv/versions/3.6.8/lib/python3.6/json/encoder.py", line 428, in _iterencode yield from _iterencode_list(o, _current_indent_level) File "/Users/josepcugat/.pyenv/versions/3.6.8/lib/python3.6/json/encoder.py", line 325, in _iterencode_list yield from chunks File "/Users/josepcugat/.pyenv/versions/3.6.8/lib/python3.6/json/encoder.py", line 404, in _iterencode_dict yield from chunks File "/Users/josepcugat/.pyenv/versions/3.6.8/lib/python3.6/json/encoder.py", line 404, in _iterencode_dict yield from chunks File "/Users/josepcugat/.pyenv/versions/3.6.8/lib/python3.6/json/encoder.py", line 437, in _iterencode o = _default(o) File "/Users/josepcugat/.pyenv/versions/3.6.8/lib/python3.6/json/encoder.py", line 180, in default o.__class__.__name__) TypeError: Object of type 'EnumMeta' is not JSON serializable
pydantic.error_wrappers.ValidationError
def truncate(v: Union[str], *, max_len: int = 80) -> str: """ Truncate a value and add a unicode ellipsis (three dots) to the end if it was too long """ if isinstance(v, str) and len(v) > (max_len - 2): # -3 so quote + string + … + quote has correct length return (v[: (max_len - 3)] + "…").__repr__() try: v = v.__repr__() except TypeError: v = type(v).__repr__(v) # in case v is a type if len(v) > max_len: v = v[: max_len - 1] + "…" return v
def truncate(v: Union[str], *, max_len: int = 80) -> str: """ Truncate a value and add a unicode ellipsis (three dots) to the end if it was too long """ if isinstance(v, str) and len(v) > (max_len - 2): # -3 so quote + string + … + quote has correct length return (v[: (max_len - 3)] + "…").__repr__() v = v.__repr__() if len(v) > max_len: v = v[: max_len - 1] + "…" return v
https://github.com/samuelcolvin/pydantic/issues/608
Traceback (most recent call last): File "test.py", line 23, in <module> print(TestSettings(cls=TestClass)) File "pydantic/main.py", line 534, in pydantic.main.BaseModel.__str__ File "pydantic/main.py", line 530, in genexpr File "pydantic/main.py", line 530, in genexpr File "pydantic/utils.py", line 158, in pydantic.utils.truncate TypeError: descriptor '__repr__' of 'object' object needs an argument
TypeError
def _process_class( _cls: AnyType, init: bool, repr: bool, eq: bool, order: bool, unsafe_hash: bool, frozen: bool, config: Type["BaseConfig"], ) -> "DataclassType": post_init_original = getattr(_cls, "__post_init__", None) post_init_post_parse = getattr(_cls, "__post_init_post_parse__", None) if post_init_original and post_init_original.__name__ == "_pydantic_post_init": post_init_original = None def _pydantic_post_init(self: "DataclassType", *initvars: Any) -> None: if post_init_original is not None: post_init_original(self, *initvars) d = validate_model(self.__pydantic_model__, self.__dict__, cls=self.__class__)[ 0 ] object.__setattr__(self, "__dict__", d) object.__setattr__(self, "__initialised__", True) if post_init_post_parse is not None: post_init_post_parse(self) _cls.__post_init__ = _pydantic_post_init cls = dataclasses._process_class(_cls, init, repr, eq, order, unsafe_hash, frozen) # type: ignore fields: Dict[str, Any] = { field.name: ( field.type, field.default if field.default != dataclasses.MISSING else Required, ) for field in dataclasses.fields(cls) } validators = gather_validators(cls) cls.__pydantic_model__ = create_model( cls.__name__, __config__=config, __module__=_cls.__module__, __validators__=validators, **fields, ) cls.__initialised__ = False cls.__validate__ = classmethod(_validate_dataclass) cls.__get_validators__ = classmethod(_get_validators) if cls.__pydantic_model__.__config__.validate_assignment and not frozen: cls.__setattr__ = setattr_validate_assignment return cls
def _process_class( _cls: AnyType, init: bool, repr: bool, eq: bool, order: bool, unsafe_hash: bool, frozen: bool, config: Type["BaseConfig"], ) -> "DataclassType": post_init_original = getattr(_cls, "__post_init__", None) post_init_post_parse = getattr(_cls, "__post_init_post_parse__", None) if post_init_original and post_init_original.__name__ == "_pydantic_post_init": post_init_original = None _cls.__post_init__ = _pydantic_post_init cls = dataclasses._process_class(_cls, init, repr, eq, order, unsafe_hash, frozen) # type: ignore fields: Dict[str, Any] = { field.name: ( field.type, field.default if field.default != dataclasses.MISSING else Required, ) for field in dataclasses.fields(cls) } cls.__post_init_original__ = post_init_original cls.__post_init_post_parse__ = post_init_post_parse validators = gather_validators(cls) cls.__pydantic_model__ = create_model( cls.__name__, __config__=config, __module__=_cls.__module__, __validators__=validators, **fields, ) cls.__initialised__ = False cls.__validate__ = classmethod(_validate_dataclass) cls.__get_validators__ = classmethod(_get_validators) if cls.__pydantic_model__.__config__.validate_assignment and not frozen: cls.__setattr__ = setattr_validate_assignment return cls
https://github.com/samuelcolvin/pydantic/issues/536
--------------------------------------------------------------------------- RecursionError Traceback (most recent call last) <ipython-input-28-55581d931e2a> in <module> ----> 1 B(b=1, a=2) <string> in __init__(self, a, b) [path_removed]/.venv/lib/python3.7/site-packages/pydantic/dataclasses.py in _pydantic_post_init(self) 30 object.__setattr__(self, '__initialised__', True) 31 if self.__post_init_original__: ---> 32 self.__post_init_original__() 33 34 <ipython-input-27-252024563a0b> in __post_init__(self) 11 12 def __post_init__(self): ---> 13 super().__post_init__() 14 print("B") ... last 2 frames repeated, from the frame below ... [path_removed]/.venv/lib/python3.7/site-packages/pydantic/dataclasses.py in _pydantic_post_init(self) 30 object.__setattr__(self, '__initialised__', True) 31 if self.__post_init_original__: ---> 32 self.__post_init_original__() 33 34 RecursionError: maximum recursion depth exceeded while calling a Python object
RecursionError
def _pydantic_post_init(self: "DataclassType", *initvars: Any) -> None: if post_init_original is not None: post_init_original(self, *initvars) d = validate_model(self.__pydantic_model__, self.__dict__, cls=self.__class__)[0] object.__setattr__(self, "__dict__", d) object.__setattr__(self, "__initialised__", True) if post_init_post_parse is not None: post_init_post_parse(self)
def _pydantic_post_init(self: "DataclassType", *initvars: Any) -> None: if self.__post_init_original__: self.__post_init_original__(*initvars) d = validate_model(self.__pydantic_model__, self.__dict__, cls=self.__class__)[0] object.__setattr__(self, "__dict__", d) object.__setattr__(self, "__initialised__", True) if self.__post_init_post_parse__: self.__post_init_post_parse__()
https://github.com/samuelcolvin/pydantic/issues/536
--------------------------------------------------------------------------- RecursionError Traceback (most recent call last) <ipython-input-28-55581d931e2a> in <module> ----> 1 B(b=1, a=2) <string> in __init__(self, a, b) [path_removed]/.venv/lib/python3.7/site-packages/pydantic/dataclasses.py in _pydantic_post_init(self) 30 object.__setattr__(self, '__initialised__', True) 31 if self.__post_init_original__: ---> 32 self.__post_init_original__() 33 34 <ipython-input-27-252024563a0b> in __post_init__(self) 11 12 def __post_init__(self): ---> 13 super().__post_init__() 14 print("B") ... last 2 frames repeated, from the frame below ... [path_removed]/.venv/lib/python3.7/site-packages/pydantic/dataclasses.py in _pydantic_post_init(self) 30 object.__setattr__(self, '__initialised__', True) 31 if self.__post_init_original__: ---> 32 self.__post_init_original__() 33 34 RecursionError: maximum recursion depth exceeded while calling a Python object
RecursionError
def find_validators( type_: AnyType, arbitrary_types_allowed: bool = False ) -> List[AnyCallable]: if type_ is Any or type(type_) in (ForwardRef, TypeVar): return [] if type_ is Pattern: return pattern_validators if is_callable_type(type_): return [callable_validator] supertype = _find_supertype(type_) if supertype is not None: type_ = supertype for val_type, validators in _VALIDATORS: try: if issubclass(type_, val_type): return validators except TypeError as e: raise RuntimeError( f"error checking inheritance of {type_!r} (type: {display_as_type(type_)})" ) from e if arbitrary_types_allowed: return [make_arbitrary_type_validator(type_)] else: raise RuntimeError(f"no validator found for {type_}")
def find_validators( type_: AnyType, arbitrary_types_allowed: bool = False ) -> List[AnyCallable]: if type_ is Any or type(type_) == ForwardRef: return [] if type_ is Pattern: return pattern_validators if is_callable_type(type_): return [callable_validator] supertype = _find_supertype(type_) if supertype is not None: type_ = supertype for val_type, validators in _VALIDATORS: try: if issubclass(type_, val_type): return validators except TypeError as e: raise RuntimeError( f"error checking inheritance of {type_!r} (type: {display_as_type(type_)})" ) from e if arbitrary_types_allowed: return [make_arbitrary_type_validator(type_)] else: raise RuntimeError(f"no validator found for {type_}")
https://github.com/samuelcolvin/pydantic/issues/550
--------------------------------------------------------------------------- TypeError Traceback (most recent call last) /scratch/miniconda3/envs/jupyter/lib/python3.7/site-packages/pydantic/validators.py in find_validators(type_, arbitrary_types_allowed) 385 try: --> 386 if issubclass(type_, val_type): 387 return validators TypeError: issubclass() arg 1 must be a class The above exception was the direct cause of the following exception: RuntimeError Traceback (most recent call last) <ipython-input-5-c7b931439a60> in <module> ----> 1 class Subclass(BaseModel): 2 asdf: int 3 fdsa: Dict /scratch/miniconda3/envs/jupyter/lib/python3.7/site-packages/pydantic/main.py in __new__(mcs, name, bases, namespace) 180 annotation=ann_type, 181 class_validators=vg.get_validators(ann_name), --> 182 config=config, 183 ) 184 /scratch/miniconda3/envs/jupyter/lib/python3.7/site-packages/pydantic/fields.py in infer(cls, name, value, annotation, class_validators, config) 136 required=required, 137 model_config=config, --> 138 schema=schema, 139 ) 140 /scratch/miniconda3/envs/jupyter/lib/python3.7/site-packages/pydantic/fields.py in __init__(self, name, type_, class_validators, model_config, default, required, alias, schema) 105 self.parse_json: bool = False 106 self.shape: Shape = Shape.SINGLETON --> 107 self.prepare() 108 109 @classmethod /scratch/miniconda3/envs/jupyter/lib/python3.7/site-packages/pydantic/fields.py in prepare(self) 170 self.allow_none = True 171 --> 172 self._populate_sub_fields() 173 self._populate_validators() 174 /scratch/miniconda3/envs/jupyter/lib/python3.7/site-packages/pydantic/fields.py in _populate_sub_fields(self) 221 assert issubclass(origin, Mapping) 222 self.key_field = self._create_sub_type( --> 223 self.type_.__args__[0], 'key_' + self.name, for_keys=True # type: ignore 224 ) 225 self.type_ = self.type_.__args__[1] # type: ignore /scratch/miniconda3/envs/jupyter/lib/python3.7/site-packages/pydantic/fields.py in _create_sub_type(self, type_, name, for_keys) 235 name=name, 236 class_validators=None if for_keys else {k: v for k, v in self.class_validators.items() if not v.whole}, --> 237 model_config=self.model_config, 238 ) 239 /scratch/miniconda3/envs/jupyter/lib/python3.7/site-packages/pydantic/fields.py in __init__(self, name, type_, class_validators, model_config, default, required, alias, schema) 105 self.parse_json: bool = False 106 self.shape: Shape = Shape.SINGLETON --> 107 self.prepare() 108 109 @classmethod /scratch/miniconda3/envs/jupyter/lib/python3.7/site-packages/pydantic/fields.py in prepare(self) 171 172 self._populate_sub_fields() --> 173 self._populate_validators() 174 175 def _populate_sub_fields(self) -> None: # noqa: C901 (ignore complexity) /scratch/miniconda3/envs/jupyter/lib/python3.7/site-packages/pydantic/fields.py in _populate_validators(self) 253 get_validators() 254 if get_validators --> 255 else find_validators(self.type_, self.model_config.arbitrary_types_allowed) 256 ), 257 self.schema is not None and self.schema.const and constant_validator, /scratch/miniconda3/envs/jupyter/lib/python3.7/site-packages/pydantic/validators.py in find_validators(type_, arbitrary_types_allowed) 387 return validators 388 except TypeError as e: --> 389 raise RuntimeError(f'error checking inheritance of {type_!r} (type: {display_as_type(type_)})') from e 390 391 if arbitrary_types_allowed: RuntimeError: error checking inheritance of ~KT (type: KT)
TypeError
def _get_key_factory(self, by_alias: bool) -> Callable[..., str]: if by_alias: return lambda fields, key: fields[key].alias if key in fields else key return lambda _, key: key
def _get_key_factory(self, by_alias: bool) -> Callable[..., str]: if by_alias: return lambda fields, key: fields[key].alias return lambda _, key: key
https://github.com/samuelcolvin/pydantic/issues/488
Traceback (most recent call last): File "...", line 41, in ... print(m.dict(by_alias=True)) # error File ".../python3.7/site-packages/pydantic/main.py", line 282, in dict get_key(k): v for k, v in self._iter(by_alias=by_alias, skip_defaults=skip_defaults) File ".../python3.7/site-packages/pydantic/main.py", line 282, in <dictcomp> get_key(k): v for k, v in self._iter(by_alias=by_alias, skip_defaults=skip_defaults) File ".../python3.7/site-packages/pydantic/main.py", line 293, in <lambda> return lambda fields, key: fields[key].alias KeyError: 'extra_column'
KeyError
def include_in_schema(self) -> bool: """ False if this is a simple field just allowing None as used in Unions/Optional. """ return self.type_ != NoneType
def include_in_schema(self) -> bool: """ False if this is a simple field just allowing None as used in Unions/Optional. """ return len(self.validators) != 1 or self.validators[0][1] != is_none_validator
https://github.com/samuelcolvin/pydantic/issues/363
Traceback (most recent call last): File "<input>", line 1, in <module> File "/Users/.../lib/python3.6/site-packages/pydantic/main.py", line 292, in schema s = model_schema(cls, by_alias=by_alias) File "/Users/.../lib/python3.6/site-packages/pydantic/schema.py", line 193, in model_schema model, by_alias=by_alias, model_name_map=model_name_map, ref_prefix=ref_prefix File "/Users/.../lib/python3.6/site-packages/pydantic/schema.py", line 463, in model_process_schema model, by_alias=by_alias, model_name_map=model_name_map, ref_prefix=ref_prefix File "/Users/.../lib/python3.6/site-packages/pydantic/schema.py", line 484, in model_type_schema f, by_alias=by_alias, model_name_map=model_name_map, ref_prefix=ref_prefix File "/Users/.../lib/python3.6/site-packages/pydantic/schema.py", line 240, in field_schema ref_prefix=ref_prefix, File "/Users/.../lib/python3.6/site-packages/pydantic/schema.py", line 442, in field_type_schema ref_prefix=ref_prefix, File "/Users/.../lib/python3.6/site-packages/pydantic/schema.py", line 610, in field_singleton_schema ref_prefix=ref_prefix, File "/Users/.../lib/python3.6/site-packages/pydantic/schema.py", line 534, in field_singleton_sub_fields_schema ref_prefix=ref_prefix, File "/Users/.../lib/python3.6/site-packages/pydantic/schema.py", line 442, in field_type_schema ref_prefix=ref_prefix, File "/Users/.../lib/python3.6/site-packages/pydantic/schema.py", line 645, in field_singleton_schema raise ValueError(f'Value not declarable with JSON Schema, field: {field}') ValueError: Value not declarable with JSON Schema, field: field_NoneType type=NoneType required
ValueError
def validator( *fields, pre: bool = False, whole: bool = False, always: bool = False, check_fields: bool = True, ): """ Decorate methods on the class indicating that they should be used to validate fields :param fields: which field(s) the method should be called on :param pre: whether or not this validator should be called before the standard validators (else after) :param whole: for complex objects (sets, lists etc.) whether to validate individual elements or the whole object :param always: whether this method and other validators should be called even if the value is missing :param check_fields: whether to check that the fields actually exist on the model """ if not fields: raise ConfigError("validator with no fields specified") elif isinstance(fields[0], FunctionType): raise ConfigError( "validators should be used with fields and keyword arguments, not bare. " "E.g. usage should be `@validator('<field_name>', ...)`" ) def dec(f): # avoid validators with duplicated names since without this validators can be overwritten silently # which generally isn't the intended behaviour, don't run in ipython - see #312 if not in_ipython(): # pragma: no branch ref = f.__module__ + "." + f.__qualname__ if ref in _FUNCS: raise ConfigError(f'duplicate validator function "{ref}"') _FUNCS.add(ref) f_cls = classmethod(f) f_cls.__validator_config = ( fields, Validator(f, pre, whole, always, check_fields), ) return f_cls return dec
def validator( *fields, pre: bool = False, whole: bool = False, always: bool = False, check_fields: bool = True, ): """ Decorate methods on the class indicating that they should be used to validate fields :param fields: which field(s) the method should be called on :param pre: whether or not this validator should be called before the standard validators (else after) :param whole: for complex objects (sets, lists etc.) whether to validate individual elements or the whole object :param always: whether this method and other validators should be called even if the value is missing :param check_fields: whether to check that the fields actually exist on the model """ if not fields: raise ConfigError("validator with no fields specified") elif isinstance(fields[0], FunctionType): raise ConfigError( "validators should be used with fields and keyword arguments, not bare. " "E.g. usage should be `@validator('<field_name>', ...)`" ) def dec(f): ref = f.__module__ + "." + f.__qualname__ if ref in _FUNCS: raise ConfigError(f'duplicate validator function "{ref}"') _FUNCS.add(ref) f_cls = classmethod(f) f_cls.__validator_config = ( fields, Validator(f, pre, whole, always, check_fields), ) return f_cls return dec
https://github.com/samuelcolvin/pydantic/issues/312
Config ip=IPv4Address('123.0.0.200') name='asdf' port=2392 when the cell is called first and raises ConfigError Traceback (most recent call last) in () 3 from ipaddress import ip_address 4 ----> 5 class Config(BaseModel): 6 ip: str 7 name: str in Config() 8 port: int 9 ---> 10 @validator("ip") 11 def check_ip(cls, value): 12 return ip_address(value) ~/anaconda3/lib/python3.7/site-packages/pydantic/main.py in dec(f) 449 ref = f.module + '.' + f.qualname 450 if ref in _FUNCS: --> 451 raise ConfigError(f'duplicate validator function "{ref}"') 452 _FUNCS.add(ref) 453 f_cls = classmethod(f) ConfigError: duplicate validator function "main.Config.check_ip"
ConfigError
def dec(f): # avoid validators with duplicated names since without this validators can be overwritten silently # which generally isn't the intended behaviour, don't run in ipython - see #312 if not in_ipython(): # pragma: no branch ref = f.__module__ + "." + f.__qualname__ if ref in _FUNCS: raise ConfigError(f'duplicate validator function "{ref}"') _FUNCS.add(ref) f_cls = classmethod(f) f_cls.__validator_config = fields, Validator(f, pre, whole, always, check_fields) return f_cls
def dec(f): ref = f.__module__ + "." + f.__qualname__ if ref in _FUNCS: raise ConfigError(f'duplicate validator function "{ref}"') _FUNCS.add(ref) f_cls = classmethod(f) f_cls.__validator_config = fields, Validator(f, pre, whole, always, check_fields) return f_cls
https://github.com/samuelcolvin/pydantic/issues/312
Config ip=IPv4Address('123.0.0.200') name='asdf' port=2392 when the cell is called first and raises ConfigError Traceback (most recent call last) in () 3 from ipaddress import ip_address 4 ----> 5 class Config(BaseModel): 6 ip: str 7 name: str in Config() 8 port: int 9 ---> 10 @validator("ip") 11 def check_ip(cls, value): 12 return ip_address(value) ~/anaconda3/lib/python3.7/site-packages/pydantic/main.py in dec(f) 449 ref = f.module + '.' + f.qualname 450 if ref in _FUNCS: --> 451 raise ConfigError(f'duplicate validator function "{ref}"') 452 _FUNCS.add(ref) 453 f_cls = classmethod(f) ConfigError: duplicate validator function "main.Config.check_ip"
ConfigError
def _process_class( _cls, init, repr, eq, order, unsafe_hash, frozen, validate_assignment ): post_init_original = getattr(_cls, "__post_init__", None) if post_init_original and post_init_original.__name__ == "_pydantic_post_init": post_init_original = None _cls.__post_init__ = _pydantic_post_init cls = dataclasses._process_class(_cls, init, repr, eq, order, unsafe_hash, frozen) fields = { name: (field.type, field.default) for name, field in cls.__dataclass_fields__.items() } cls.__post_init_original__ = post_init_original cls.__pydantic_model__ = create_model(cls.__name__, **fields) cls.__initialised__ = False if validate_assignment and not frozen: cls.__setattr__ = setattr_validate_assignment return cls
def _process_class( _cls, init, repr, eq, order, unsafe_hash, frozen, validate_assignment ): post_init_original = getattr(_cls, "__post_init__", None) _cls.__post_init__ = post_init cls = dataclasses._process_class(_cls, init, repr, eq, order, unsafe_hash, frozen) fields = { name: (field.type, field.default) for name, field in cls.__dataclass_fields__.items() } cls.__post_init_original__ = post_init_original cls.__pydantic_model__ = create_model(cls.__name__, **fields) cls.__initialised__ = False if validate_assignment and not frozen: cls.__setattr__ = setattr_validate_assignment return cls
https://github.com/samuelcolvin/pydantic/issues/293
Traceback (most recent call last): File "/usr/local/pyenv/versions/3.6.7/lib/python3.6/site-packages/IPython/core/interactiveshell.py", line 2881, in run_code exec(code_obj, self.user_global_ns, self.user_ns) File "<ipython-input-9-10a7116ca691>", line 12, in <module> B(a='a', b='b') File "<string>", line 4, in __init__ File "/usr/local/pyenv/versions/3.6.7/lib/python3.6/site-packages/pydantic/dataclasses.py", line 13, in post_init self.__post_init_original__() File "/usr/local/pyenv/versions/3.6.7/lib/python3.6/site-packages/pydantic/dataclasses.py", line 13, in post_init self.__post_init_original__() File "/usr/local/pyenv/versions/3.6.7/lib/python3.6/site-packages/pydantic/dataclasses.py", line 13, in post_init self.__post_init_original__() [Previous line repeated 952 more times] File "/usr/local/pyenv/versions/3.6.7/lib/python3.6/site-packages/pydantic/dataclasses.py", line 9, in post_init d = validate_model(self.__pydantic_model__, self.__dict__) File "/usr/local/pyenv/versions/3.6.7/lib/python3.6/site-packages/pydantic/main.py", line 484, in validate_model v_, errors_ = field.validate(value, values, loc=field.alias, cls=model.__class__) File "/usr/local/pyenv/versions/3.6.7/lib/python3.6/site-packages/pydantic/fields.py", line 303, in validate v, errors = self._validate_singleton(v, values, loc, cls) File "/usr/local/pyenv/versions/3.6.7/lib/python3.6/site-packages/pydantic/fields.py", line 406, in _validate_singleton return self._apply_validators(v, values, loc, cls, self.validators) File "/usr/local/pyenv/versions/3.6.7/lib/python3.6/site-packages/pydantic/fields.py", line 412, in _apply_validators v = validator(v) File "/usr/local/pyenv/versions/3.6.7/lib/python3.6/site-packages/pydantic/validators.py", line 23, in str_validator if isinstance(v, (str, NoneType)): RecursionError: maximum recursion depth exceeded in __instancecheck__
RecursionError
def _substitute_environ(self): """ Substitute environment variables into values. """ d = {} for field in self.__fields__.values(): if field.has_alias: env_name = field.alias else: env_name = self.__config__.env_prefix + field.name.upper() env_var = os.getenv(env_name, None) if env_var: if _complex_field(field): try: env_var = json.loads(env_var) except ValueError as e: raise SettingsError(f'error parsing JSON for "{env_name}"') from e d[field.alias] = env_var return d
def _substitute_environ(self): """ Substitute environment variables into values. """ d = {} for field in self.__fields__.values(): if field.alt_alias: env_name = field.alias else: env_name = self.__config__.env_prefix + field.name.upper() env_var = os.getenv(env_name, None) if env_var: if _complex_field(field): try: env_var = json.loads(env_var) except ValueError as e: raise SettingsError(f'error parsing JSON for "{env_name}"') from e d[field.alias] = env_var return d
https://github.com/samuelcolvin/pydantic/issues/275
❯ export test=foobar; pipenv run python Python 3.6.1 (default, Aug 1 2018, 10:30:03) [GCC 4.2.1 Compatible Apple LLVM 9.1.0 (clang-902.0.39.2)] on darwin Type "help", "copyright", "credits" or "license" for more information. import os import pydantic class S(pydantic.BaseSettings): ... test:str ... class Config: ... fields = dict(test=dict(alias="test")) ... S() Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/Users/jasonkuhrt/.local/share/virtualenvs/waldo-QOik3jd4/lib/python3.6/site-packages/pydantic/env_settings.py", line 35, in __init__ super().__init__(**values) File "/Users/jasonkuhrt/.local/share/virtualenvs/waldo-QOik3jd4/lib/python3.6/site-packages/pydantic/main.py", line 166, in __init__ self.__setstate__(self._process_values(data)) File "/Users/jasonkuhrt/.local/share/virtualenvs/waldo-QOik3jd4/lib/python3.6/site-packages/pydantic/main.py", line 331, in _process_values return validate_model(self, input_data) File "/Users/jasonkuhrt/.local/share/virtualenvs/waldo-QOik3jd4/lib/python3.6/site-packages/pydantic/main.py", line 507, in validate_model raise ValidationError(errors) pydantic.error_wrappers.ValidationError: 1 validation error test none is not an allow value (type=type_error.none.not_allowed) os.getenv('test') 'foobar'
pydantic.error_wrappers.ValidationError
def __init__( self, *, name: str, type_: Type, class_validators: List[Validator], default: Any, required: bool, model_config: Any, alias: str = None, allow_none: bool = False, schema: Schema = None, ): self.name: str = name self.has_alias: bool = bool(alias) self.alias: str = alias or name self.type_: type = type_ self.class_validators = class_validators or [] self.validate_always: bool = False self.sub_fields: List[Field] = None self.key_field: Field = None self.validators = [] self.whole_pre_validators = None self.whole_post_validators = None self.default: Any = default self.required: bool = required self.model_config = model_config self.allow_none: bool = allow_none self.parse_json: bool = False self.shape: Shape = Shape.SINGLETON self._schema: Schema = schema self.prepare()
def __init__( self, *, name: str, type_: Type, class_validators: List[Validator], default: Any, required: bool, model_config: Any, alias: str = None, allow_none: bool = False, schema: Schema = None, ): self.name: str = name self.alias: str = alias or name self.type_: type = type_ self.class_validators = class_validators or [] self.validate_always: bool = False self.sub_fields: List[Field] = None self.key_field: Field = None self.validators = [] self.whole_pre_validators = None self.whole_post_validators = None self.default: Any = default self.required: bool = required self.model_config = model_config self.allow_none: bool = allow_none self.parse_json: bool = False self.shape: Shape = Shape.SINGLETON self._schema: Schema = schema self.prepare()
https://github.com/samuelcolvin/pydantic/issues/275
❯ export test=foobar; pipenv run python Python 3.6.1 (default, Aug 1 2018, 10:30:03) [GCC 4.2.1 Compatible Apple LLVM 9.1.0 (clang-902.0.39.2)] on darwin Type "help", "copyright", "credits" or "license" for more information. import os import pydantic class S(pydantic.BaseSettings): ... test:str ... class Config: ... fields = dict(test=dict(alias="test")) ... S() Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/Users/jasonkuhrt/.local/share/virtualenvs/waldo-QOik3jd4/lib/python3.6/site-packages/pydantic/env_settings.py", line 35, in __init__ super().__init__(**values) File "/Users/jasonkuhrt/.local/share/virtualenvs/waldo-QOik3jd4/lib/python3.6/site-packages/pydantic/main.py", line 166, in __init__ self.__setstate__(self._process_values(data)) File "/Users/jasonkuhrt/.local/share/virtualenvs/waldo-QOik3jd4/lib/python3.6/site-packages/pydantic/main.py", line 331, in _process_values return validate_model(self, input_data) File "/Users/jasonkuhrt/.local/share/virtualenvs/waldo-QOik3jd4/lib/python3.6/site-packages/pydantic/main.py", line 507, in validate_model raise ValidationError(errors) pydantic.error_wrappers.ValidationError: 1 validation error test none is not an allow value (type=type_error.none.not_allowed) os.getenv('test') 'foobar'
pydantic.error_wrappers.ValidationError
def schema(self, by_alias=True): s = dict( title=self._schema.title or self.alias.title(), required=self.required, ) if not self.required and self.default is not None: s["default"] = self.default s.update(self._schema.extra) ts = self.type_schema(by_alias) s.update(ts if isinstance(ts, dict) else {"type": ts}) return s
def schema(self, by_alias=True): s = self.type_.schema(by_alias) if hasattr(self.type_, "schema") else {} s.update( type=s.get("type") or display_as_type(self.type_), title=self._schema.title or s.get("title") or self.alias.title(), required=self.required, ) if not self.required and self.default is not None: s["default"] = self.default if issubclass(self.type_, Enum): choice_names = self._schema.choice_names or {} s["choices"] = [ (v.value, choice_names.get(v.value) or k.title()) for k, v in self.type_.__members__.items() ] s.update(self._schema.extra) return s
https://github.com/samuelcolvin/pydantic/issues/213
Traceback (most recent call last): File "test_pydantic_schema.py", line 9, in <module> print(TestModel.schema_json(indent=2)) File "/pydantic/pydantic/main.py", line 288, in schema_json return json.dumps(cls.schema(by_alias=by_alias), default=pydantic_encoder, **dumps_kwargs) File "/pydantic/pydantic/main.py", line 278, in schema s['properties'] = {f.alias: f.schema(by_alias) for f in cls.__fields__.values()} File "/pydantic/pydantic/main.py", line 278, in <dictcomp> s['properties'] = {f.alias: f.schema(by_alias) for f in cls.__fields__.values()} File "/pydantic/pydantic/fields.py", line 146, in schema if issubclass(self.type_, Enum): TypeError: issubclass() arg 1 must be a class
TypeError
def _populate_sub_fields(self): # typing interface is horrible, we have to do some ugly checks if isinstance(self.type_, type) and issubclass(self.type_, JsonWrapper): self.type_ = self.type_.inner_type self.parse_json = True origin = getattr(self.type_, "__origin__", None) if origin is None: # field is not "typing" object eg. Union, Dict, List etc. return if origin is Union: types_ = [] for type_ in self.type_.__args__: if type_ is NoneType: self.allow_none = True self.required = False else: types_.append(type_) self.sub_fields = [ self._create_sub_type(t, f"{self.name}_{display_as_type(t)}") for t in types_ ] return if issubclass(origin, Tuple): self.shape = Shape.TUPLE self.sub_fields = [ self._create_sub_type(t, f"{self.name}_{i}") for i, t in enumerate(self.type_.__args__) ] return if issubclass(origin, List): self.type_ = self.type_.__args__[0] self.shape = Shape.LIST elif issubclass(origin, Set): self.type_ = self.type_.__args__[0] self.shape = Shape.SET else: assert issubclass(origin, Mapping) self.key_field = self._create_sub_type( self.type_.__args__[0], "key_" + self.name ) self.type_ = self.type_.__args__[1] self.shape = Shape.MAPPING if getattr(self.type_, "__origin__", None): # type_ has been refined eg. as the type of a List and sub_fields needs to be populated self.sub_fields = [self._create_sub_type(self.type_, "_" + self.name)]
def _populate_sub_fields(self): # typing interface is horrible, we have to do some ugly checks if isinstance(self.type_, type) and issubclass(self.type_, JsonWrapper): self.type_ = self.type_.inner_type self.parse_json = True origin = _get_type_origin(self.type_) if origin is None: # field is not "typing" object eg. Union, Dict, List etc. return if origin is Union: types_ = [] for type_ in self.type_.__args__: if type_ is NoneType: self.allow_none = True else: types_.append(type_) self.sub_fields = [ self._create_sub_type(t, f"{self.name}_{display_as_type(t)}") for t in types_ ] return if issubclass(origin, Tuple): self.shape = Shape.TUPLE self.sub_fields = [ self._create_sub_type(t, f"{self.name}_{i}") for i, t in enumerate(self.type_.__args__) ] return if issubclass(origin, List): self.type_ = self.type_.__args__[0] self.shape = Shape.LIST elif issubclass(origin, Set): self.type_ = self.type_.__args__[0] self.shape = Shape.SET else: assert issubclass(origin, Mapping) self.key_field = self._create_sub_type( self.type_.__args__[0], "key_" + self.name ) self.type_ = self.type_.__args__[1] self.shape = Shape.MAPPING if _get_type_origin(self.type_): # type_ has been refined eg. as the type of a List and sub_fields needs to be populated self.sub_fields = [self._create_sub_type(self.type_, "_" + self.name)]
https://github.com/samuelcolvin/pydantic/issues/213
Traceback (most recent call last): File "test_pydantic_schema.py", line 9, in <module> print(TestModel.schema_json(indent=2)) File "/pydantic/pydantic/main.py", line 288, in schema_json return json.dumps(cls.schema(by_alias=by_alias), default=pydantic_encoder, **dumps_kwargs) File "/pydantic/pydantic/main.py", line 278, in schema s['properties'] = {f.alias: f.schema(by_alias) for f in cls.__fields__.values()} File "/pydantic/pydantic/main.py", line 278, in <dictcomp> s['properties'] = {f.alias: f.schema(by_alias) for f in cls.__fields__.values()} File "/pydantic/pydantic/fields.py", line 146, in schema if issubclass(self.type_, Enum): TypeError: issubclass() arg 1 must be a class
TypeError
def schema(cls, by_alias=True) -> Dict[str, Any]: cached = cls._schema_cache.get(by_alias) if cached is not None: return cached s = {"title": cls.__config__.title or cls.__name__} if cls.__doc__: s["description"] = clean_docstring(cls.__doc__) s.update(cls.type_schema(by_alias)) cls._schema_cache[by_alias] = s return s
def schema(cls, by_alias=True) -> Dict[str, Any]: cached = cls._schema_cache.get(by_alias) if cached is not None: return cached s = { "type": "object", "title": cls.__config__.title or cls.__name__, } if cls.__doc__: s["description"] = clean_docstring(cls.__doc__) if by_alias: s["properties"] = {f.alias: f.schema(by_alias) for f in cls.__fields__.values()} else: s["properties"] = {k: f.schema(by_alias) for k, f in cls.__fields__.items()} cls._schema_cache[by_alias] = s return s
https://github.com/samuelcolvin/pydantic/issues/213
Traceback (most recent call last): File "test_pydantic_schema.py", line 9, in <module> print(TestModel.schema_json(indent=2)) File "/pydantic/pydantic/main.py", line 288, in schema_json return json.dumps(cls.schema(by_alias=by_alias), default=pydantic_encoder, **dumps_kwargs) File "/pydantic/pydantic/main.py", line 278, in schema s['properties'] = {f.alias: f.schema(by_alias) for f in cls.__fields__.values()} File "/pydantic/pydantic/main.py", line 278, in <dictcomp> s['properties'] = {f.alias: f.schema(by_alias) for f in cls.__fields__.values()} File "/pydantic/pydantic/fields.py", line 146, in schema if issubclass(self.type_, Enum): TypeError: issubclass() arg 1 must be a class
TypeError
def __init__( self, language: str, treebank: Optional[str] = None, stanza_debug_level="ERROR" ) -> None: """Constructor for ``get_stanza_models`` wrapper class. TODO: Do tests for all langs and available models for each >>> stanza_wrapper = StanzaWrapper(language='grc', stanza_debug_level="INFO") >>> isinstance(stanza_wrapper, StanzaWrapper) True >>> stanza_wrapper.language 'grc' >>> stanza_wrapper.treebank 'proiel' >>> stanza_wrapper = StanzaWrapper(language="grc", treebank="perseus", stanza_debug_level="INFO") >>> isinstance(stanza_wrapper, StanzaWrapper) True >>> stanza_wrapper.language 'grc' >>> stanza_wrapper.treebank 'perseus' >>> from cltkv1.languages.example_texts import get_example_text >>> stanza_doc = stanza_wrapper.parse(get_example_text("grc")) >>> StanzaWrapper(language="xxx", stanza_debug_level="INFO") Traceback (most recent call last): ... cltkv1.core.exceptions.UnknownLanguageError: Language 'xxx' either not in scope for CLTK or not supported by Stanza. >>> stanza_wrapper = StanzaWrapper(language="grc", treebank="proiel", stanza_debug_level="INFO") >>> stanza_doc = stanza_wrapper.parse(get_example_text("grc")) >>> stanza_wrapper = StanzaWrapper(language="lat", treebank="perseus", stanza_debug_level="INFO") >>> stanza_doc = stanza_wrapper.parse(get_example_text("lat")) >>> stanza_wrapper = StanzaWrapper(language="lat", treebank="proiel", stanza_debug_level="INFO") >>> stanza_doc = stanza_wrapper.parse(get_example_text("lat")) >>> stanza_wrapper = StanzaWrapper(language="chu", stanza_debug_level="INFO") >>> stanza_doc = stanza_wrapper.parse(get_example_text("chu")) # >>> stanza_wrapper = StanzaWrapper(language="cop", stanza_debug_level="INFO") # >>> stanza_doc = stanza_wrapper.parse(get_example_text("cop")) >>> stanza_wrapper = StanzaWrapper(language="lzh", stanza_debug_level="INFO") >>> stanza_doc = stanza_wrapper.parse(get_example_text("lzh")) >>> stanza_wrapper = StanzaWrapper(language="lat", treebank="xxx", stanza_debug_level="INFO") Traceback (most recent call last): ... cltkv1.core.exceptions.UnimplementedAlgorithmError: Invalid treebank 'xxx' for language 'lat'. """ self.language = language self.treebank = treebank self.stanza_debug_level = stanza_debug_level # Setup language self.map_langs_cltk_stanza = { "chu": "Old_Church_Slavonic", "cop": "Coptic", "fro": "Old_French", "grc": "Ancient_Greek", "got": "Gothic", "lat": "Latin", "lzh": "Classical_Chinese", } self.wrapper_available = self.is_wrapper_available() # type: bool if not self.wrapper_available: raise UnknownLanguageError( "Language '{}' either not in scope for CLTK or not supported by Stanza.".format( self.language ) ) self.stanza_code = self._get_stanza_code() # Setup optional treebank if specified # TODO: Write tests for all treebanks self.map_code_treebanks = dict( grc=["proiel", "perseus"], la=["perseus", "proiel", "ittb"] ) # if not specified, will use the default treebank chosen by stanza if self.treebank: valid_treebank = self._is_valid_treebank() if not valid_treebank: raise UnimplementedAlgorithmError( f"Invalid treebank '{self.treebank}' for language '{self.language}'." ) else: self.treebank = self._get_default_treebank() # check if model present # this fp is just to confirm that some model has already been downloaded. # TODO: This is a weak check for the models actually being downloaded and valid # TODO: Use ``models_dir`` var from below and make self. or global to module self.model_path = os.path.expanduser( f"~/stanza_resources/{self.stanza_code}/tokenize/{self.treebank}.pt" ) if not self._is_model_present(): # download model if necessary self._download_model() # instantiate actual stanza class # Note: `suppress_stdout` is used to prevent `stanza` # from printing a long log of its parameters to screen. # Though we should capture these, within `_load_pipeline()`, # for the log file. with suppress_stdout(): self.nlp = self._load_pipeline()
def __init__( self, language: str, treebank: Optional[str] = None, stanza_debug_level="ERROR" ) -> None: """Constructor for ``get_stanza_models`` wrapper class. TODO: Do tests for all langs and available models for each >>> stanza_wrapper = StanzaWrapper(language='grc', stanza_debug_level="INFO") >>> isinstance(stanza_wrapper, StanzaWrapper) True >>> stanza_wrapper.language 'grc' >>> stanza_wrapper.treebank 'proiel' >>> stanza_wrapper = StanzaWrapper(language="grc", treebank="perseus", stanza_debug_level="INFO") >>> isinstance(stanza_wrapper, StanzaWrapper) True >>> stanza_wrapper.language 'grc' >>> stanza_wrapper.treebank 'perseus' >>> from cltkv1.languages.example_texts import get_example_text >>> stanza_doc = stanza_wrapper.parse(get_example_text("grc")) >>> StanzaWrapper(language="xxx", stanza_debug_level="INFO") Traceback (most recent call last): ... cltkv1.core.exceptions.UnknownLanguageError: Language 'xxx' either not in scope for CLTK or not supported by Stanza. >>> stanza_wrapper = StanzaWrapper(language="grc", treebank="proiel", stanza_debug_level="INFO") >>> stanza_doc = stanza_wrapper.parse(get_example_text("grc")) >>> stanza_wrapper = StanzaWrapper(language="lat", treebank="perseus", stanza_debug_level="INFO") >>> stanza_doc = stanza_wrapper.parse(get_example_text("lat")) >>> stanza_wrapper = StanzaWrapper(language="lat", treebank="proiel", stanza_debug_level="INFO") >>> stanza_doc = stanza_wrapper.parse(get_example_text("lat")) >>> stanza_wrapper = StanzaWrapper(language="chu", stanza_debug_level="INFO") >>> stanza_doc = stanza_wrapper.parse(get_example_text("chu")) >>> stanza_wrapper = StanzaWrapper(language="cop", stanza_debug_level="INFO") >>> stanza_doc = stanza_wrapper.parse(get_example_text("cop")) >>> stanza_wrapper = StanzaWrapper(language="lzh", stanza_debug_level="INFO") >>> stanza_doc = stanza_wrapper.parse(get_example_text("lzh")) >>> stanza_wrapper = StanzaWrapper(language="lat", treebank="xxx", stanza_debug_level="INFO") Traceback (most recent call last): ... cltkv1.core.exceptions.UnimplementedAlgorithmError: Invalid treebank 'xxx' for language 'lat'. """ self.language = language self.treebank = treebank self.stanza_debug_level = stanza_debug_level # Setup language self.map_langs_cltk_stanza = { "chu": "Old_Church_Slavonic", "cop": "Coptic", "fro": "Old_French", "grc": "Ancient_Greek", "got": "Gothic", "lat": "Latin", "lzh": "Classical_Chinese", } self.wrapper_available = self.is_wrapper_available() # type: bool if not self.wrapper_available: raise UnknownLanguageError( "Language '{}' either not in scope for CLTK or not supported by Stanza.".format( self.language ) ) self.stanza_code = self._get_stanza_code() # Setup optional treebank if specified # TODO: Write tests for all treebanks self.map_code_treebanks = dict( grc=["proiel", "perseus"], la=["perseus", "proiel", "ittb"] ) # if not specified, will use the default treebank chosen by stanza if self.treebank: valid_treebank = self._is_valid_treebank() if not valid_treebank: raise UnimplementedAlgorithmError( f"Invalid treebank '{self.treebank}' for language '{self.language}'." ) else: self.treebank = self._get_default_treebank() # check if model present # this fp is just to confirm that some model has already been downloaded. # TODO: This is a weak check for the models actually being downloaded and valid # TODO: Use ``models_dir`` var from below and make self. or global to module self.model_path = os.path.expanduser( f"~/stanza_resources/{self.stanza_code}/tokenize/{self.treebank}.pt" ) if not self._is_model_present(): # download model if necessary self._download_model() # instantiate actual stanza class # Note: `suppress_stdout` is used to prevent `stanza` # from printing a long log of its parameters to screen. # Though we should capture these, within `_load_pipeline()`, # for the log file. with suppress_stdout(): self.nlp = self._load_pipeline()
https://github.com/cltk/cltk/issues/67
In [1]: from cltk.corpus.utils.importer import CorpusImporter In [2]: corpus_importer = CorpusImporter('latin') In [3]: corpus_importer. corpus_importer.import_corpus corpus_importer.list_corpora corpus_importer.language In [3]: corpus_importer. corpus_importer.import_corpus corpus_importer.list_corpora corpus_importer.language In [3]: corpus_importer.list_corpora Out[3]: ['latin_text_perseus', 'latin_treebank_perseus', 'latin_text_lacus_curtius', 'latin_text_latin_library', 'phi5', 'phi7', 'latin_proper_names', 'cltk_linguistic_data', 'latin_pos'] In [4]: corpus_importer.import_corpus('phi5', '/Users/kyle/Downloads/corpora/PH/') /Users/kyle/Downloads/corpora/PHI5/ /Users/kyle/Downloads/corpora/PHI7/ In [4]: corpus_importer.import_corpus('phi5', '/Users/kyle/Downloads/corpora/PHI/') /Users/kyle/Downloads/corpora/PHI5/ /Users/kyle/Downloads/corpora/PHI7/ In [4]: corpus_importer.import_corpus('phi5', '/Users/kyle/Downloads/corpora/PHI/') /Users/kyle/Downloads/corpora/PHI5/ /Users/kyle/Downloads/corpora/PHI7/ In [4]: corpus_importer.import_corpus('phi5', '/Users/kyle/Downloads/corpora/PHI5/') --- Logging error --- Traceback (most recent call last): File "/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/logging/__init__.py", line 971, in emit msg = self.format(record) File "/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/logging/__init__.py", line 821, in format return fmt.format(record) File "/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/logging/__init__.py", line 558, in format record.message = record.getMessage() File "/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/logging/__init__.py", line 321, in getMessage msg = msg % self.args TypeError: not all arguments converted during string formatting Call stack: File "/Users/kyle/Downloads/cltk/venv/bin/ipython", line 11, in <module> sys.exit(start_ipython()) File "/Users/kyle/Downloads/cltk/venv/lib/python3.4/site-packages/IPython/__init__.py", line 120, in start_ipython return launch_new_instance(argv=argv, **kwargs) File "/Users/kyle/Downloads/cltk/venv/lib/python3.4/site-packages/IPython/config/application.py", line 564, in launch_instance app.start() File "/Users/kyle/Downloads/cltk/venv/lib/python3.4/site-packages/IPython/terminal/ipapp.py", line 371, in start self.shell.mainloop() File "/Users/kyle/Downloads/cltk/venv/lib/python3.4/site-packages/IPython/terminal/interactiveshell.py", line 443, in mainloop self.interact(display_banner=display_banner) File "/Users/kyle/Downloads/cltk/venv/lib/python3.4/site-packages/IPython/terminal/interactiveshell.py", line 567, in interact self.run_cell(source_raw, store_history=True) File "/Users/kyle/Downloads/cltk/venv/lib/python3.4/site-packages/IPython/core/interactiveshell.py", line 2741, in run_cell interactivity=interactivity, compiler=compiler) File "/Users/kyle/Downloads/cltk/venv/lib/python3.4/site-packages/IPython/core/interactiveshell.py", line 2833, in run_ast_nodes if self.run_code(code): File "/Users/kyle/Downloads/cltk/venv/lib/python3.4/site-packages/IPython/core/interactiveshell.py", line 2883, in run_code exec(code_obj, self.user_global_ns, self.user_ns) File "<ipython-input-4-b2bc3206f5ea>", line 1, in <module> corpus_importer.import_corpus('phi5', '/Users/kyle/Downloads/corpora/PHI5/') File "/Users/kyle/Downloads/cltk/cltk/corpus/utils/importer.py", line 188, in import_corpus logger.info('Incoming path:', path) Message: 'Incoming path:' Arguments: ('/Users/kyle/Downloads/corpora/PHI5/',)
TypeError
def analyze(self, text: str) -> Doc: """The primary method for the NLP object, to which raw text strings are passed. >>> from cltkv1 import NLP >>> from cltkv1.languages.example_texts import get_example_text >>> from cltkv1.core.data_types import Doc >>> cltk_nlp = NLP(language="lat") >>> cltk_doc = cltk_nlp.analyze(text=get_example_text("lat")) >>> isinstance(cltk_doc, Doc) True >>> cltk_doc.words[0] # doctest: +ELLIPSIS Word(index_char_start=None, index_char_stop=None, index_token=0, index_sentence=0, string='Gallia', pos='NOUN', lemma='mallis', scansion=None, xpos='A1|grn1|casA|gen2', upos='NOUN', dependency_relation='nsubj', governor=3, features={'Case': 'Nom', 'Degree': 'Pos', 'Gender': 'Fem', 'Number': 'Sing'}, embedding=..., stop=False, named_entity=True) >>> from cltkv1.languages.example_texts import get_example_text >>> cltk_nlp = NLP(language="grc") >>> cltk_doc = cltk_nlp.analyze(text=get_example_text("grc")) >>> cltk_doc.words[0] # doctest: +ELLIPSIS Word(index_char_start=None, index_char_stop=None, index_token=0, index_sentence=0, string='ὅτι', pos='ADV', lemma='ὅτι', scansion=None, xpos='Df', upos='ADV', dependency_relation='advmod', governor=6, features={}, embedding=..., stop=True, named_entity=False) >>> cltk_nlp = NLP(language="chu") >>> cltk_doc = cltk_nlp.analyze(text=get_example_text("chu")) >>> cltk_doc.words[0] # doctest: +ELLIPSIS Word(index_char_start=None, index_char_stop=None, index_token=0, index_sentence=0, string='отьчє', pos='NOUN', lemma='отьць', scansion=None, xpos='Nb', upos='NOUN', dependency_relation='vocative', governor=7, features={'Case': 'Voc', 'Gender': 'Masc', 'Number': 'Sing'}, embedding=None, stop=None, named_entity=None) >>> cltk_nlp = NLP(language="fro") >>> cltk_doc = cltk_nlp.analyze(text=get_example_text("fro")) >>> cltk_doc.words[0] # doctest: +ELLIPSIS Word(index_char_start=None, index_char_stop=None, index_token=0, index_sentence=0, string='Une', pos='DET', lemma=None, scansion=None, xpos='DETndf', upos='DET', dependency_relation=None, governor=-1, features={'Definite': 'Ind', 'PronType': 'Art'}, embedding=None, stop=False, named_entity=False) >>> cltk_nlp = NLP(language="got") >>> cltk_doc = cltk_nlp.analyze(text=get_example_text("got")) >>> cltk_doc.words[0] # doctest: +ELLIPSIS Word(index_char_start=None, index_char_stop=None, index_token=0, index_sentence=0, string='swa', pos='ADV', lemma='swa', scansion=None, xpos='Df', upos='ADV', dependency_relation='advmod', governor=1, features={}, embedding=..., stop=None, named_entity=None) >>> len(cltk_doc.sentences) 3 # >>> cltk_nlp = NLP(language="cop") # >>> cltk_doc = cltk_nlp.analyze(text=get_example_text("cop")) # >>> cltk_doc.words[0] # doctest: +ELLIPSIS # Word(index_char_start=None, index_char_stop=None, index_token=0, index_sentence=0, string='ⲧⲏⲛ', pos='VERB', lemma='ⲧⲏⲛ', scansion=None, xpos='VSTAT', upos='VERB', dependency_relation='root', governor=-1, features={'VerbForm': 'Fin'}, embedding=None, stop=None, named_entity=None) >>> cltk_nlp = NLP(language="lzh") >>> cltk_doc = cltk_nlp.analyze(text=get_example_text("lzh")) >>> cltk_doc.words[0] # doctest: +ELLIPSIS Word(index_char_start=None, index_char_stop=None, index_token=0, index_sentence=0, string='黃', pos='NOUN', lemma='黃', scansion=None, xpos='n,名詞,描写,形質', upos='NOUN', dependency_relation='nmod', governor=1, features={}, embedding=None, stop=None, named_entity=None) """ doc = Doc(language=self.language.iso_639_3_code, raw=text) for process in self.pipeline.processes: a_process = process(input_doc=doc, language=self.language.iso_639_3_code) a_process.run() doc = a_process.output_doc return doc
def analyze(self, text: str) -> Doc: """The primary method for the NLP object, to which raw text strings are passed. >>> from cltkv1 import NLP >>> from cltkv1.languages.example_texts import get_example_text >>> from cltkv1.core.data_types import Doc >>> cltk_nlp = NLP(language="lat") >>> cltk_doc = cltk_nlp.analyze(text=get_example_text("lat")) >>> isinstance(cltk_doc, Doc) True >>> cltk_doc.words[0] # doctest: +ELLIPSIS Word(index_char_start=None, index_char_stop=None, index_token=0, index_sentence=0, string='Gallia', pos='NOUN', lemma='mallis', scansion=None, xpos='A1|grn1|casA|gen2', upos='NOUN', dependency_relation='nsubj', governor=3, features={'Case': 'Nom', 'Degree': 'Pos', 'Gender': 'Fem', 'Number': 'Sing'}, embedding=..., stop=False, named_entity=True) >>> from cltkv1.languages.example_texts import get_example_text >>> cltk_nlp = NLP(language="grc") >>> cltk_doc = cltk_nlp.analyze(text=get_example_text("grc")) >>> cltk_doc.words[0] # doctest: +ELLIPSIS Word(index_char_start=None, index_char_stop=None, index_token=0, index_sentence=0, string='ὅτι', pos='ADV', lemma='ὅτι', scansion=None, xpos='Df', upos='ADV', dependency_relation='advmod', governor=6, features={}, embedding=..., stop=True, named_entity=False) >>> cltk_nlp = NLP(language="chu") >>> cltk_doc = cltk_nlp.analyze(text=get_example_text("chu")) >>> cltk_doc.words[0] # doctest: +ELLIPSIS Word(index_char_start=None, index_char_stop=None, index_token=0, index_sentence=0, string='отьчє', pos='NOUN', lemma='отьць', scansion=None, xpos='Nb', upos='NOUN', dependency_relation='vocative', governor=7, features={'Case': 'Voc', 'Gender': 'Masc', 'Number': 'Sing'}, embedding=None, stop=None, named_entity=None) >>> cltk_nlp = NLP(language="fro") >>> cltk_doc = cltk_nlp.analyze(text=get_example_text("fro")) >>> cltk_doc.words[0] # doctest: +ELLIPSIS Word(index_char_start=None, index_char_stop=None, index_token=0, index_sentence=0, string='Une', pos='DET', lemma=None, scansion=None, xpos='DETndf', upos='DET', dependency_relation=None, governor=-1, features={'Definite': 'Ind', 'PronType': 'Art'}, embedding=None, stop=False, named_entity=False) >>> cltk_nlp = NLP(language="got") >>> cltk_doc = cltk_nlp.analyze(text=get_example_text("got")) >>> cltk_doc.words[0] # doctest: +ELLIPSIS Word(index_char_start=None, index_char_stop=None, index_token=0, index_sentence=0, string='swa', pos='ADV', lemma='swa', scansion=None, xpos='Df', upos='ADV', dependency_relation='advmod', governor=1, features={}, embedding=..., stop=None, named_entity=None) >>> len(cltk_doc.sentences) 3 >>> cltk_nlp = NLP(language="cop") >>> cltk_doc = cltk_nlp.analyze(text=get_example_text("cop")) >>> cltk_doc.words[0] # doctest: +ELLIPSIS Word(index_char_start=None, index_char_stop=None, index_token=0, index_sentence=0, string='ⲧⲏⲛ', pos='VERB', lemma='ⲧⲏⲛ', scansion=None, xpos='VSTAT', upos='VERB', dependency_relation='root', governor=-1, features={'VerbForm': 'Fin'}, embedding=None, stop=None, named_entity=None) >>> cltk_nlp = NLP(language="lzh") >>> cltk_doc = cltk_nlp.analyze(text=get_example_text("lzh")) >>> cltk_doc.words[0] # doctest: +ELLIPSIS Word(index_char_start=None, index_char_stop=None, index_token=0, index_sentence=0, string='黃', pos='NOUN', lemma='黃', scansion=None, xpos='n,名詞,描写,形質', upos='NOUN', dependency_relation='nmod', governor=1, features={}, embedding=None, stop=None, named_entity=None) """ doc = Doc(language=self.language.iso_639_3_code, raw=text) for process in self.pipeline.processes: a_process = process(input_doc=doc, language=self.language.iso_639_3_code) a_process.run() doc = a_process.output_doc return doc
https://github.com/cltk/cltk/issues/67
In [1]: from cltk.corpus.utils.importer import CorpusImporter In [2]: corpus_importer = CorpusImporter('latin') In [3]: corpus_importer. corpus_importer.import_corpus corpus_importer.list_corpora corpus_importer.language In [3]: corpus_importer. corpus_importer.import_corpus corpus_importer.list_corpora corpus_importer.language In [3]: corpus_importer.list_corpora Out[3]: ['latin_text_perseus', 'latin_treebank_perseus', 'latin_text_lacus_curtius', 'latin_text_latin_library', 'phi5', 'phi7', 'latin_proper_names', 'cltk_linguistic_data', 'latin_pos'] In [4]: corpus_importer.import_corpus('phi5', '/Users/kyle/Downloads/corpora/PH/') /Users/kyle/Downloads/corpora/PHI5/ /Users/kyle/Downloads/corpora/PHI7/ In [4]: corpus_importer.import_corpus('phi5', '/Users/kyle/Downloads/corpora/PHI/') /Users/kyle/Downloads/corpora/PHI5/ /Users/kyle/Downloads/corpora/PHI7/ In [4]: corpus_importer.import_corpus('phi5', '/Users/kyle/Downloads/corpora/PHI/') /Users/kyle/Downloads/corpora/PHI5/ /Users/kyle/Downloads/corpora/PHI7/ In [4]: corpus_importer.import_corpus('phi5', '/Users/kyle/Downloads/corpora/PHI5/') --- Logging error --- Traceback (most recent call last): File "/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/logging/__init__.py", line 971, in emit msg = self.format(record) File "/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/logging/__init__.py", line 821, in format return fmt.format(record) File "/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/logging/__init__.py", line 558, in format record.message = record.getMessage() File "/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/logging/__init__.py", line 321, in getMessage msg = msg % self.args TypeError: not all arguments converted during string formatting Call stack: File "/Users/kyle/Downloads/cltk/venv/bin/ipython", line 11, in <module> sys.exit(start_ipython()) File "/Users/kyle/Downloads/cltk/venv/lib/python3.4/site-packages/IPython/__init__.py", line 120, in start_ipython return launch_new_instance(argv=argv, **kwargs) File "/Users/kyle/Downloads/cltk/venv/lib/python3.4/site-packages/IPython/config/application.py", line 564, in launch_instance app.start() File "/Users/kyle/Downloads/cltk/venv/lib/python3.4/site-packages/IPython/terminal/ipapp.py", line 371, in start self.shell.mainloop() File "/Users/kyle/Downloads/cltk/venv/lib/python3.4/site-packages/IPython/terminal/interactiveshell.py", line 443, in mainloop self.interact(display_banner=display_banner) File "/Users/kyle/Downloads/cltk/venv/lib/python3.4/site-packages/IPython/terminal/interactiveshell.py", line 567, in interact self.run_cell(source_raw, store_history=True) File "/Users/kyle/Downloads/cltk/venv/lib/python3.4/site-packages/IPython/core/interactiveshell.py", line 2741, in run_cell interactivity=interactivity, compiler=compiler) File "/Users/kyle/Downloads/cltk/venv/lib/python3.4/site-packages/IPython/core/interactiveshell.py", line 2833, in run_ast_nodes if self.run_code(code): File "/Users/kyle/Downloads/cltk/venv/lib/python3.4/site-packages/IPython/core/interactiveshell.py", line 2883, in run_code exec(code_obj, self.user_global_ns, self.user_ns) File "<ipython-input-4-b2bc3206f5ea>", line 1, in <module> corpus_importer.import_corpus('phi5', '/Users/kyle/Downloads/corpora/PHI5/') File "/Users/kyle/Downloads/cltk/cltk/corpus/utils/importer.py", line 188, in import_corpus logger.info('Incoming path:', path) Message: 'Incoming path:' Arguments: ('/Users/kyle/Downloads/corpora/PHI5/',)
TypeError
def handle(self, *args, **options): """Create Customer objects for Subscribers without Customer objects associated.""" for subscriber in get_subscriber_model().objects.filter(djstripe_customers=None): # use get_or_create in case of race conditions on large subscriber bases Customer.get_or_create(subscriber=subscriber) print("Created subscriber for {0}".format(subscriber.email))
def handle(self, *args, **options): """Create Customer objects for Subscribers without Customer objects associated.""" for subscriber in get_subscriber_model().objects.filter(customer__isnull=True): # use get_or_create in case of race conditions on large subscriber bases Customer.get_or_create(subscriber=subscriber) print("Created subscriber for {0}".format(subscriber.email))
https://github.com/dj-stripe/dj-stripe/issues/497
Traceback (most recent call last): File "manage.py", line 22, in <module> execute_from_command_line(sys.argv) File "/Users/jdln/.virtualenvs/djstripe/lib/python3.6/site-packages/django/core/management/__init__.py", line 363, in execute_from_command_line utility.execute() File "/Users/jdln/.virtualenvs/djstripe/lib/python3.6/site-packages/django/core/management/__init__.py", line 355, in execute self.fetch_command(subcommand).run_from_argv(self.argv) File "/Users/jdln/.virtualenvs/djstripe/lib/python3.6/site-packages/django/core/management/base.py", line 283, in run_from_argv self.execute(*args, **cmd_options) File "/Users/jdln/.virtualenvs/djstripe/lib/python3.6/site-packages/django/core/management/base.py", line 330, in execute output = self.handle(*args, **options) File "/Users/jdln/temp/dj-stripe/djstripe/management/commands/djstripe_init_customers.py", line 25, in handle for subscriber in get_subscriber_model().objects.filter(customer__isnull=True): File "/Users/jdln/.virtualenvs/djstripe/lib/python3.6/site-packages/django/db/models/manager.py", line 85, in manager_method return getattr(self.get_queryset(), name)(*args, **kwargs) File "/Users/jdln/.virtualenvs/djstripe/lib/python3.6/site-packages/django/db/models/query.py", line 781, in filter return self._filter_or_exclude(False, *args, **kwargs) File "/Users/jdln/.virtualenvs/djstripe/lib/python3.6/site-packages/django/db/models/query.py", line 799, in _filter_or_exclude clone.query.add_q(Q(*args, **kwargs)) File "/Users/jdln/.virtualenvs/djstripe/lib/python3.6/site-packages/django/db/models/sql/query.py", line 1260, in add_q clause, _ = self._add_q(q_object, self.used_aliases) File "/Users/jdln/.virtualenvs/djstripe/lib/python3.6/site-packages/django/db/models/sql/query.py", line 1286, in _add_q allow_joins=allow_joins, split_subq=split_subq, File "/Users/jdln/.virtualenvs/djstripe/lib/python3.6/site-packages/django/db/models/sql/query.py", line 1164, in build_filter lookups, parts, reffed_expression = self.solve_lookup_type(arg) File "/Users/jdln/.virtualenvs/djstripe/lib/python3.6/site-packages/django/db/models/sql/query.py", line 1044, in solve_lookup_type _, field, _, lookup_parts = self.names_to_path(lookup_splitted, self.get_meta()) File "/Users/jdln/.virtualenvs/djstripe/lib/python3.6/site-packages/django/db/models/sql/query.py", line 1362, in names_to_path "Choices are: %s" % (name, ", ".join(available))) django.core.exceptions.FieldError: Cannot resolve keyword 'customer' into field. Choices are: date_joined, djstripe_customers, email, first_name, groups, id, is_active, is_staff, is_superuser, last_login, last_name, logentry, password, user_permissions, username
django.core.exceptions.FieldError
def api_retrieve(self, api_key=settings.STRIPE_SECRET_KEY): # OVERRIDING the parent version of this function # Cards must be manipulated through a customer or account. # TODO: When managed accounts are supported, this method needs to check if # either a customer or account is supplied to determine the correct object to use. customer = self.customer.api_retrieve(api_key=api_key) # If the customer is deleted, the sources attribute will be absent. # eg. {"id": "cus_XXXXXXXX", "deleted": True} if "sources" not in customer: # We fake a native stripe InvalidRequestError so that it's caught like an invalid ID error. raise InvalidRequestError("No such source: %s" % (self.stripe_id), "id") return customer.sources.retrieve(self.stripe_id, expand=self.expand_fields)
def api_retrieve(self, api_key=settings.STRIPE_SECRET_KEY): # OVERRIDING the parent version of this function # Cards must be manipulated through a customer or account. # TODO: When managed accounts are supported, this method needs to check if either a customer or # account is supplied to determine the correct object to use. return self.customer.api_retrieve(api_key=api_key).sources.retrieve( self.stripe_id, expand=self.expand_fields )
https://github.com/dj-stripe/dj-stripe/issues/432
customer = Customer.retrieve(id="cus_ACOAhvtRVLJTDT", api_key=settings.STRIPE_SECRET_KEY) customer <Customer id=cus_ACOAhvtRVLJTDT at 0x7fd435fffae8> JSON: { "deleted": true, "id": "cus_ACOAhvtRVLJTDT" } "sources" in customer False customer.sources.retrieve(...) Traceback (most recent call last): File "<console>", line 1, in <module> File "/home/vagrant/env/lib/python3.4/site-packages/stripe/resource.py", line 135, in __getattr__ raise AttributeError(*err.args) AttributeError: sources
AttributeError
def parser_one_line(self, line): """Parse one string line into feature values. Args: line (str): a string indicating one instance Returns: list: Parsed results,including label, features and impression_id """ impression_id = 0 words = line.strip().split(self.ID_spliter) if len(words) == 2: impression_id = words[1].strip() cols = words[0].strip().split(self.col_spliter) label = float(cols[0]) features = [] for word in cols[1:]: if not word.strip(): continue tokens = word.split(":") features.append([int(tokens[0]) - 1, int(tokens[1]) - 1, float(tokens[2])]) return label, features, impression_id
def parser_one_line(self, line): """Parse one string line into feature values. Args: line (str): a string indicating one instance Returns: list: Parsed results,including label, features and impression_id """ impression_id = None words = line.strip().split(self.ID_spliter) if len(words) == 2: impression_id = words[1].strip() cols = words[0].strip().split(self.col_spliter) label = float(cols[0]) features = [] for word in cols[1:]: if not word.strip(): continue tokens = word.split(":") features.append([int(tokens[0]) - 1, int(tokens[1]) - 1, float(tokens[2])]) return label, features, impression_id
https://github.com/microsoft/recommenders/issues/1132
=========================================================================== test session starts =========================================================================== platform linux -- Python 3.6.7, pytest-4.1.1, py-1.7.0, pluggy-0.8.1 rootdir: /home/miguel/repos/Recommenders, inifile: collected 1 item tests/smoke/test_notebooks_gpu.py F [100%] ================================================================================ FAILURES ================================================================================= _____________________________________________________________________________ test_dkn_smoke ______________________________________________________________________________ notebooks = {'als_deep_dive': '/home/miguel/repos/Recommenders/examples/02_model_collaborative_filtering/als_deep_dive.ipynb', 'al...eep_dive': '/home/miguel/repos/Recommenders/examples/02_model_collaborative_filtering/cornac_bpr_deep_dive.ipynb', ...} @pytest.mark.smoke @pytest.mark.gpu def test_dkn_smoke(notebooks): notebook_path = notebooks["dkn_quickstart"] pm.execute_notebook( notebook_path, OUTPUT_NOTEBOOK, kernel_name=KERNEL_NAME, parameters=dict(epoch=1), ) tests/smoke/test_notebooks_gpu.py:118: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ ../../anaconda/envs/reco_gpu/lib/python3.6/site-packages/papermill/execute.py:78: in execute_notebook raise_for_execution_errors(nb, output_path) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ nb = {'cells': [{'cell_type': 'code', 'metadata': {'inputHidden': True, 'hide_input': True}, 'execution_count': None, 'sour...end_time': '2020-07-02T08:05:40.865564', 'duration': 9.904528, 'exception': True}}, 'nbformat': 4, 'nbformat_minor': 2} output_path = 'output.ipynb' def raise_for_execution_errors(nb, output_path): error = None for cell in nb.cells: if cell.get("outputs") is None: continue for output in cell.outputs: if output.output_type == "error": error = PapermillExecutionError( exec_count=cell.execution_count, source=cell.source, ename=output.ename, evalue=output.evalue, traceback=output.traceback, ) break if error: # Write notebook back out with the Error Message at the top of the Notebook. error_msg = ERROR_MESSAGE_TEMPLATE % str(error.exec_count) error_msg_cell = nbformat.v4.new_code_cell( source="%%html\n" + error_msg, outputs=[ nbformat.v4.new_output(output_type="display_data", data={"text/html": error_msg}) ], metadata={"inputHidden": True, "hide_input": True}, ) nb.cells = [error_msg_cell] + nb.cells write_ipynb(nb, output_path) raise error E papermill.exceptions.PapermillExecutionError: E --------------------------------------------------------------------------- E Exception encountered at "In [7]": E --------------------------------------------------------------------------- E NotFoundError Traceback (most recent call last) E <ipython-input-7-607bbc4e411e> in <module> E ----> 1 model = DKN(hparams, input_creator) E E ~/repos/Recommenders/reco_utils/recommender/deeprec/models/dkn.py in __init__(self, hparams, iterator_creator) E 73 self.context_embedding.assign(c_embedding_transformed) E 74 E ---> 75 super().__init__(hparams, iterator_creator, graph=self.graph) E 76 E 77 def _init_embedding(self, file_path): E E ~/repos/Recommenders/reco_utils/recommender/deeprec/models/base_model.py in __init__(self, hparams, iterator_creator, graph, seed) E 30 E 31 self.graph = graph if graph is not None else tf.Graph() E ---> 32 self.iterator = iterator_creator(hparams, self.graph) E 33 self.train_num_ngs = ( E 34 hparams.train_num_ngs if "train_num_ngs" in hparams else None E E ~/repos/Recommenders/reco_utils/recommender/deeprec/io/dkn_iterator.py in __init__(self, hparams, graph, col_spliter, ID_spliter) E 67 with tf.gfile.GFile(hparams.news_feature_file, "r") as rd: E 68 while True: E ---> 69 line = rd.readline() E 70 if not line: E 71 break E E ~/anaconda/envs/reco_gpu/lib/python3.6/site-packages/tensorflow/python/lib/io/file_io.py in readline(self) E 181 def readline(self): E 182 r"""Reads the next line from the file. Leaves the '\n' at the end.""" E --> 183 self._preread_check() E 184 return self._prepare_value(self._read_buf.ReadLineAsString()) E 185 E E ~/anaconda/envs/reco_gpu/lib/python3.6/site-packages/tensorflow/python/lib/io/file_io.py in _preread_check(self) E 83 with errors.raise_exception_on_not_ok_status() as status: E 84 self._read_buf = pywrap_tensorflow.CreateBufferedInputStream( E ---> 85 compat.as_bytes(self.__name), 1024 * 512, status) E 86 E 87 def _prewrite_check(self): E E ~/anaconda/envs/reco_gpu/lib/python3.6/site-packages/tensorflow/python/framework/errors_impl.py in __exit__(self, type_arg, value_arg, traceback_arg) E 526 None, None, E 527 compat.as_text(c_api.TF_Message(self.status.status)), E --> 528 c_api.TF_GetCode(self.status.status)) E 529 # Delete the underlying status object from memory otherwise it stays alive E 530 # as there is a reference to status from this from the traceback due to E E NotFoundError: ../../tests/resources/deeprec/dkn/doc_feature.txt; No such file or directory ../../anaconda/envs/reco_gpu/lib/python3.6/site-packages/papermill/execute.py:198: PapermillExecutionError -------------------------------------------------------------------------- Captured stderr call --------------------------------------------------------------------------- 62%|██████▏ | 13/21 [00:08<00:06, 1.23it/s] ============================================================================ warnings summary ============================================================================= /home/miguel/anaconda/envs/reco_gpu/lib/python3.6/site-packages/nbconvert/exporters/exporter_locator.py:28 /home/miguel/anaconda/envs/reco_gpu/lib/python3.6/site-packages/nbconvert/exporters/exporter_locator.py:28: DeprecationWarning: `nbconvert.exporters.exporter_locator` is deprecated in favor of `nbconvert.exporters.base` since nbconvert 5.0. DeprecationWarning) /home/miguel/anaconda/envs/reco_gpu/lib/python3.6/site-packages/tornado/web.py:1747 /home/miguel/anaconda/envs/reco_gpu/lib/python3.6/site-packages/tornado/web.py:1747: DeprecationWarning: @asynchronous is deprecated, use coroutines instead DeprecationWarning) tests/smoke/test_notebooks_gpu.py::test_dkn_smoke /home/miguel/anaconda/envs/reco_gpu/lib/python3.6/site-packages/jupyter_client/session.py:371: DeprecationWarning: Session._key_changed is deprecated in traitlets 4.1: use @observe and @unobserve instead. def _key_changed(self): -- Docs: https://docs.pytest.org/en/latest/warnings.html ================================================================== 1 failed, 3 warnings in 11.56 seconds ==================================================================
PapermillExecutionError
def load_data_from_file(self, infile): """Read and parse data from a file. Args: infile (str): text input file. Each line in this file is an instance. Returns: obj: An iterator that will yields parsed results, in the format of graph feed_dict. """ label_list = [] features_list = [] impression_id_list = [] cnt = 0 with tf.gfile.GFile(infile, "r") as rd: while True: line = rd.readline() if not line: break label, features, impression_id = self.parser_one_line(line) features_list.append(features) label_list.append(label) impression_id_list.append(impression_id) cnt += 1 if cnt == self.batch_size: res = self._convert_data(label_list, features_list) yield self.gen_feed_dict(res), impression_id_list, self.batch_size label_list = [] features_list = [] impression_id_list = [] cnt = 0 if cnt > 0: res = self._convert_data(label_list, features_list) yield self.gen_feed_dict(res), impression_id_list, cnt
def load_data_from_file(self, infile): """Read and parse data from a file. Args: infile (str): text input file. Each line in this file is an instance. Returns: obj: An iterator that will yields parsed results, in the format of graph feed_dict. """ label_list = [] features_list = [] impression_id_list = [] cnt = 0 with tf.gfile.GFile(infile, "r") as rd: while True: line = rd.readline() if not line: break label, features, impression_id = self.parser_one_line(line) features_list.append(features) label_list.append(label) impression_id_list.append(impression_id) cnt += 1 if cnt == self.batch_size: res = self._convert_data(label_list, features_list) yield self.gen_feed_dict(res) label_list = [] features_list = [] impression_id_list = [] cnt = 0 if cnt > 0: res = self._convert_data(label_list, features_list) yield self.gen_feed_dict(res)
https://github.com/microsoft/recommenders/issues/1132
=========================================================================== test session starts =========================================================================== platform linux -- Python 3.6.7, pytest-4.1.1, py-1.7.0, pluggy-0.8.1 rootdir: /home/miguel/repos/Recommenders, inifile: collected 1 item tests/smoke/test_notebooks_gpu.py F [100%] ================================================================================ FAILURES ================================================================================= _____________________________________________________________________________ test_dkn_smoke ______________________________________________________________________________ notebooks = {'als_deep_dive': '/home/miguel/repos/Recommenders/examples/02_model_collaborative_filtering/als_deep_dive.ipynb', 'al...eep_dive': '/home/miguel/repos/Recommenders/examples/02_model_collaborative_filtering/cornac_bpr_deep_dive.ipynb', ...} @pytest.mark.smoke @pytest.mark.gpu def test_dkn_smoke(notebooks): notebook_path = notebooks["dkn_quickstart"] pm.execute_notebook( notebook_path, OUTPUT_NOTEBOOK, kernel_name=KERNEL_NAME, parameters=dict(epoch=1), ) tests/smoke/test_notebooks_gpu.py:118: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ ../../anaconda/envs/reco_gpu/lib/python3.6/site-packages/papermill/execute.py:78: in execute_notebook raise_for_execution_errors(nb, output_path) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ nb = {'cells': [{'cell_type': 'code', 'metadata': {'inputHidden': True, 'hide_input': True}, 'execution_count': None, 'sour...end_time': '2020-07-02T08:05:40.865564', 'duration': 9.904528, 'exception': True}}, 'nbformat': 4, 'nbformat_minor': 2} output_path = 'output.ipynb' def raise_for_execution_errors(nb, output_path): error = None for cell in nb.cells: if cell.get("outputs") is None: continue for output in cell.outputs: if output.output_type == "error": error = PapermillExecutionError( exec_count=cell.execution_count, source=cell.source, ename=output.ename, evalue=output.evalue, traceback=output.traceback, ) break if error: # Write notebook back out with the Error Message at the top of the Notebook. error_msg = ERROR_MESSAGE_TEMPLATE % str(error.exec_count) error_msg_cell = nbformat.v4.new_code_cell( source="%%html\n" + error_msg, outputs=[ nbformat.v4.new_output(output_type="display_data", data={"text/html": error_msg}) ], metadata={"inputHidden": True, "hide_input": True}, ) nb.cells = [error_msg_cell] + nb.cells write_ipynb(nb, output_path) raise error E papermill.exceptions.PapermillExecutionError: E --------------------------------------------------------------------------- E Exception encountered at "In [7]": E --------------------------------------------------------------------------- E NotFoundError Traceback (most recent call last) E <ipython-input-7-607bbc4e411e> in <module> E ----> 1 model = DKN(hparams, input_creator) E E ~/repos/Recommenders/reco_utils/recommender/deeprec/models/dkn.py in __init__(self, hparams, iterator_creator) E 73 self.context_embedding.assign(c_embedding_transformed) E 74 E ---> 75 super().__init__(hparams, iterator_creator, graph=self.graph) E 76 E 77 def _init_embedding(self, file_path): E E ~/repos/Recommenders/reco_utils/recommender/deeprec/models/base_model.py in __init__(self, hparams, iterator_creator, graph, seed) E 30 E 31 self.graph = graph if graph is not None else tf.Graph() E ---> 32 self.iterator = iterator_creator(hparams, self.graph) E 33 self.train_num_ngs = ( E 34 hparams.train_num_ngs if "train_num_ngs" in hparams else None E E ~/repos/Recommenders/reco_utils/recommender/deeprec/io/dkn_iterator.py in __init__(self, hparams, graph, col_spliter, ID_spliter) E 67 with tf.gfile.GFile(hparams.news_feature_file, "r") as rd: E 68 while True: E ---> 69 line = rd.readline() E 70 if not line: E 71 break E E ~/anaconda/envs/reco_gpu/lib/python3.6/site-packages/tensorflow/python/lib/io/file_io.py in readline(self) E 181 def readline(self): E 182 r"""Reads the next line from the file. Leaves the '\n' at the end.""" E --> 183 self._preread_check() E 184 return self._prepare_value(self._read_buf.ReadLineAsString()) E 185 E E ~/anaconda/envs/reco_gpu/lib/python3.6/site-packages/tensorflow/python/lib/io/file_io.py in _preread_check(self) E 83 with errors.raise_exception_on_not_ok_status() as status: E 84 self._read_buf = pywrap_tensorflow.CreateBufferedInputStream( E ---> 85 compat.as_bytes(self.__name), 1024 * 512, status) E 86 E 87 def _prewrite_check(self): E E ~/anaconda/envs/reco_gpu/lib/python3.6/site-packages/tensorflow/python/framework/errors_impl.py in __exit__(self, type_arg, value_arg, traceback_arg) E 526 None, None, E 527 compat.as_text(c_api.TF_Message(self.status.status)), E --> 528 c_api.TF_GetCode(self.status.status)) E 529 # Delete the underlying status object from memory otherwise it stays alive E 530 # as there is a reference to status from this from the traceback due to E E NotFoundError: ../../tests/resources/deeprec/dkn/doc_feature.txt; No such file or directory ../../anaconda/envs/reco_gpu/lib/python3.6/site-packages/papermill/execute.py:198: PapermillExecutionError -------------------------------------------------------------------------- Captured stderr call --------------------------------------------------------------------------- 62%|██████▏ | 13/21 [00:08<00:06, 1.23it/s] ============================================================================ warnings summary ============================================================================= /home/miguel/anaconda/envs/reco_gpu/lib/python3.6/site-packages/nbconvert/exporters/exporter_locator.py:28 /home/miguel/anaconda/envs/reco_gpu/lib/python3.6/site-packages/nbconvert/exporters/exporter_locator.py:28: DeprecationWarning: `nbconvert.exporters.exporter_locator` is deprecated in favor of `nbconvert.exporters.base` since nbconvert 5.0. DeprecationWarning) /home/miguel/anaconda/envs/reco_gpu/lib/python3.6/site-packages/tornado/web.py:1747 /home/miguel/anaconda/envs/reco_gpu/lib/python3.6/site-packages/tornado/web.py:1747: DeprecationWarning: @asynchronous is deprecated, use coroutines instead DeprecationWarning) tests/smoke/test_notebooks_gpu.py::test_dkn_smoke /home/miguel/anaconda/envs/reco_gpu/lib/python3.6/site-packages/jupyter_client/session.py:371: DeprecationWarning: Session._key_changed is deprecated in traitlets 4.1: use @observe and @unobserve instead. def _key_changed(self): -- Docs: https://docs.pytest.org/en/latest/warnings.html ================================================================== 1 failed, 3 warnings in 11.56 seconds ==================================================================
PapermillExecutionError
def run_eval(self, filename): """Evaluate the given file and returns some evaluation metrics. Args: filename (str): A file name that will be evaluated. Returns: dict: A dictionary contains evaluation metrics. """ load_sess = self.sess preds = [] labels = [] imp_indexs = [] for batch_data_input, imp_index, data_size in self.iterator.load_data_from_file( filename ): step_pred, step_labels = self.eval(load_sess, batch_data_input) preds.extend(np.reshape(step_pred, -1)) labels.extend(np.reshape(step_labels, -1)) imp_indexs.extend(np.reshape(imp_index, -1)) res = cal_metric(labels, preds, self.hparams.metrics) if self.hparams.pairwise_metrics != None: group_labels, group_preds = self.group_labels(labels, preds, imp_indexs) res_pairwise = cal_metric( group_labels, group_preds, self.hparams.pairwise_metrics ) res.update(res_pairwise) return res
def run_eval(self, filename): """Evaluate the given file and returns some evaluation metrics. Args: filename (str): A file name that will be evaluated. Returns: dict: A dictionary contains evaluation metrics. """ load_sess = self.sess preds = [] labels = [] imp_indexs = [] for batch_data_input, imp_index, data_size in self.iterator.load_data_from_file( filename ): step_pred, step_labels = self.eval(load_sess, batch_data_input) preds.extend(np.reshape(step_pred, -1)) labels.extend(np.reshape(step_labels, -1)) imp_indexs.extend(np.reshape(imp_index, -1)) group_labels, group_preds = self.group_labels(labels, preds, imp_indexs) res = cal_metric(labels, preds, self.hparams.metrics) res_pairwise = cal_metric(group_labels, group_preds, self.hparams.pairwise_metrics) res.update(res_pairwise) return res
https://github.com/microsoft/recommenders/issues/1132
=========================================================================== test session starts =========================================================================== platform linux -- Python 3.6.7, pytest-4.1.1, py-1.7.0, pluggy-0.8.1 rootdir: /home/miguel/repos/Recommenders, inifile: collected 1 item tests/smoke/test_notebooks_gpu.py F [100%] ================================================================================ FAILURES ================================================================================= _____________________________________________________________________________ test_dkn_smoke ______________________________________________________________________________ notebooks = {'als_deep_dive': '/home/miguel/repos/Recommenders/examples/02_model_collaborative_filtering/als_deep_dive.ipynb', 'al...eep_dive': '/home/miguel/repos/Recommenders/examples/02_model_collaborative_filtering/cornac_bpr_deep_dive.ipynb', ...} @pytest.mark.smoke @pytest.mark.gpu def test_dkn_smoke(notebooks): notebook_path = notebooks["dkn_quickstart"] pm.execute_notebook( notebook_path, OUTPUT_NOTEBOOK, kernel_name=KERNEL_NAME, parameters=dict(epoch=1), ) tests/smoke/test_notebooks_gpu.py:118: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ ../../anaconda/envs/reco_gpu/lib/python3.6/site-packages/papermill/execute.py:78: in execute_notebook raise_for_execution_errors(nb, output_path) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ nb = {'cells': [{'cell_type': 'code', 'metadata': {'inputHidden': True, 'hide_input': True}, 'execution_count': None, 'sour...end_time': '2020-07-02T08:05:40.865564', 'duration': 9.904528, 'exception': True}}, 'nbformat': 4, 'nbformat_minor': 2} output_path = 'output.ipynb' def raise_for_execution_errors(nb, output_path): error = None for cell in nb.cells: if cell.get("outputs") is None: continue for output in cell.outputs: if output.output_type == "error": error = PapermillExecutionError( exec_count=cell.execution_count, source=cell.source, ename=output.ename, evalue=output.evalue, traceback=output.traceback, ) break if error: # Write notebook back out with the Error Message at the top of the Notebook. error_msg = ERROR_MESSAGE_TEMPLATE % str(error.exec_count) error_msg_cell = nbformat.v4.new_code_cell( source="%%html\n" + error_msg, outputs=[ nbformat.v4.new_output(output_type="display_data", data={"text/html": error_msg}) ], metadata={"inputHidden": True, "hide_input": True}, ) nb.cells = [error_msg_cell] + nb.cells write_ipynb(nb, output_path) raise error E papermill.exceptions.PapermillExecutionError: E --------------------------------------------------------------------------- E Exception encountered at "In [7]": E --------------------------------------------------------------------------- E NotFoundError Traceback (most recent call last) E <ipython-input-7-607bbc4e411e> in <module> E ----> 1 model = DKN(hparams, input_creator) E E ~/repos/Recommenders/reco_utils/recommender/deeprec/models/dkn.py in __init__(self, hparams, iterator_creator) E 73 self.context_embedding.assign(c_embedding_transformed) E 74 E ---> 75 super().__init__(hparams, iterator_creator, graph=self.graph) E 76 E 77 def _init_embedding(self, file_path): E E ~/repos/Recommenders/reco_utils/recommender/deeprec/models/base_model.py in __init__(self, hparams, iterator_creator, graph, seed) E 30 E 31 self.graph = graph if graph is not None else tf.Graph() E ---> 32 self.iterator = iterator_creator(hparams, self.graph) E 33 self.train_num_ngs = ( E 34 hparams.train_num_ngs if "train_num_ngs" in hparams else None E E ~/repos/Recommenders/reco_utils/recommender/deeprec/io/dkn_iterator.py in __init__(self, hparams, graph, col_spliter, ID_spliter) E 67 with tf.gfile.GFile(hparams.news_feature_file, "r") as rd: E 68 while True: E ---> 69 line = rd.readline() E 70 if not line: E 71 break E E ~/anaconda/envs/reco_gpu/lib/python3.6/site-packages/tensorflow/python/lib/io/file_io.py in readline(self) E 181 def readline(self): E 182 r"""Reads the next line from the file. Leaves the '\n' at the end.""" E --> 183 self._preread_check() E 184 return self._prepare_value(self._read_buf.ReadLineAsString()) E 185 E E ~/anaconda/envs/reco_gpu/lib/python3.6/site-packages/tensorflow/python/lib/io/file_io.py in _preread_check(self) E 83 with errors.raise_exception_on_not_ok_status() as status: E 84 self._read_buf = pywrap_tensorflow.CreateBufferedInputStream( E ---> 85 compat.as_bytes(self.__name), 1024 * 512, status) E 86 E 87 def _prewrite_check(self): E E ~/anaconda/envs/reco_gpu/lib/python3.6/site-packages/tensorflow/python/framework/errors_impl.py in __exit__(self, type_arg, value_arg, traceback_arg) E 526 None, None, E 527 compat.as_text(c_api.TF_Message(self.status.status)), E --> 528 c_api.TF_GetCode(self.status.status)) E 529 # Delete the underlying status object from memory otherwise it stays alive E 530 # as there is a reference to status from this from the traceback due to E E NotFoundError: ../../tests/resources/deeprec/dkn/doc_feature.txt; No such file or directory ../../anaconda/envs/reco_gpu/lib/python3.6/site-packages/papermill/execute.py:198: PapermillExecutionError -------------------------------------------------------------------------- Captured stderr call --------------------------------------------------------------------------- 62%|██████▏ | 13/21 [00:08<00:06, 1.23it/s] ============================================================================ warnings summary ============================================================================= /home/miguel/anaconda/envs/reco_gpu/lib/python3.6/site-packages/nbconvert/exporters/exporter_locator.py:28 /home/miguel/anaconda/envs/reco_gpu/lib/python3.6/site-packages/nbconvert/exporters/exporter_locator.py:28: DeprecationWarning: `nbconvert.exporters.exporter_locator` is deprecated in favor of `nbconvert.exporters.base` since nbconvert 5.0. DeprecationWarning) /home/miguel/anaconda/envs/reco_gpu/lib/python3.6/site-packages/tornado/web.py:1747 /home/miguel/anaconda/envs/reco_gpu/lib/python3.6/site-packages/tornado/web.py:1747: DeprecationWarning: @asynchronous is deprecated, use coroutines instead DeprecationWarning) tests/smoke/test_notebooks_gpu.py::test_dkn_smoke /home/miguel/anaconda/envs/reco_gpu/lib/python3.6/site-packages/jupyter_client/session.py:371: DeprecationWarning: Session._key_changed is deprecated in traitlets 4.1: use @observe and @unobserve instead. def _key_changed(self): -- Docs: https://docs.pytest.org/en/latest/warnings.html ================================================================== 1 failed, 3 warnings in 11.56 seconds ==================================================================
PapermillExecutionError
def predict(self, infile_name, outfile_name): """Make predictions on the given data, and output predicted scores to a file. Args: infile_name (str): Input file name, format is same as train/val/test file. outfile_name (str): Output file name, each line is the predict score. Returns: obj: An instance of self. """ load_sess = self.sess with tf.gfile.GFile(outfile_name, "w") as wt: for batch_data_input, _, _ in self.iterator.load_data_from_file(infile_name): step_pred = self.infer(load_sess, batch_data_input) step_pred = np.reshape(step_pred, -1) wt.write("\n".join(map(str, step_pred))) # line break after each batch. wt.write("\n") return self
def predict(self, infile_name, outfile_name): """Make predictions on the given data, and output predicted scores to a file. Args: infile_name (str): Input file name, format is same as train/val/test file. outfile_name (str): Output file name, each line is the predict score. Returns: obj: An instance of self. """ load_sess = self.sess with tf.gfile.GFile(outfile_name, "w") as wt: for batch_data_input in self.iterator.load_data_from_file(infile_name): step_pred = self.infer(load_sess, batch_data_input) step_pred = np.reshape(step_pred, -1) wt.write("\n".join(map(str, step_pred))) # line break after each batch. wt.write("\n") return self
https://github.com/microsoft/recommenders/issues/1132
=========================================================================== test session starts =========================================================================== platform linux -- Python 3.6.7, pytest-4.1.1, py-1.7.0, pluggy-0.8.1 rootdir: /home/miguel/repos/Recommenders, inifile: collected 1 item tests/smoke/test_notebooks_gpu.py F [100%] ================================================================================ FAILURES ================================================================================= _____________________________________________________________________________ test_dkn_smoke ______________________________________________________________________________ notebooks = {'als_deep_dive': '/home/miguel/repos/Recommenders/examples/02_model_collaborative_filtering/als_deep_dive.ipynb', 'al...eep_dive': '/home/miguel/repos/Recommenders/examples/02_model_collaborative_filtering/cornac_bpr_deep_dive.ipynb', ...} @pytest.mark.smoke @pytest.mark.gpu def test_dkn_smoke(notebooks): notebook_path = notebooks["dkn_quickstart"] pm.execute_notebook( notebook_path, OUTPUT_NOTEBOOK, kernel_name=KERNEL_NAME, parameters=dict(epoch=1), ) tests/smoke/test_notebooks_gpu.py:118: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ ../../anaconda/envs/reco_gpu/lib/python3.6/site-packages/papermill/execute.py:78: in execute_notebook raise_for_execution_errors(nb, output_path) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ nb = {'cells': [{'cell_type': 'code', 'metadata': {'inputHidden': True, 'hide_input': True}, 'execution_count': None, 'sour...end_time': '2020-07-02T08:05:40.865564', 'duration': 9.904528, 'exception': True}}, 'nbformat': 4, 'nbformat_minor': 2} output_path = 'output.ipynb' def raise_for_execution_errors(nb, output_path): error = None for cell in nb.cells: if cell.get("outputs") is None: continue for output in cell.outputs: if output.output_type == "error": error = PapermillExecutionError( exec_count=cell.execution_count, source=cell.source, ename=output.ename, evalue=output.evalue, traceback=output.traceback, ) break if error: # Write notebook back out with the Error Message at the top of the Notebook. error_msg = ERROR_MESSAGE_TEMPLATE % str(error.exec_count) error_msg_cell = nbformat.v4.new_code_cell( source="%%html\n" + error_msg, outputs=[ nbformat.v4.new_output(output_type="display_data", data={"text/html": error_msg}) ], metadata={"inputHidden": True, "hide_input": True}, ) nb.cells = [error_msg_cell] + nb.cells write_ipynb(nb, output_path) raise error E papermill.exceptions.PapermillExecutionError: E --------------------------------------------------------------------------- E Exception encountered at "In [7]": E --------------------------------------------------------------------------- E NotFoundError Traceback (most recent call last) E <ipython-input-7-607bbc4e411e> in <module> E ----> 1 model = DKN(hparams, input_creator) E E ~/repos/Recommenders/reco_utils/recommender/deeprec/models/dkn.py in __init__(self, hparams, iterator_creator) E 73 self.context_embedding.assign(c_embedding_transformed) E 74 E ---> 75 super().__init__(hparams, iterator_creator, graph=self.graph) E 76 E 77 def _init_embedding(self, file_path): E E ~/repos/Recommenders/reco_utils/recommender/deeprec/models/base_model.py in __init__(self, hparams, iterator_creator, graph, seed) E 30 E 31 self.graph = graph if graph is not None else tf.Graph() E ---> 32 self.iterator = iterator_creator(hparams, self.graph) E 33 self.train_num_ngs = ( E 34 hparams.train_num_ngs if "train_num_ngs" in hparams else None E E ~/repos/Recommenders/reco_utils/recommender/deeprec/io/dkn_iterator.py in __init__(self, hparams, graph, col_spliter, ID_spliter) E 67 with tf.gfile.GFile(hparams.news_feature_file, "r") as rd: E 68 while True: E ---> 69 line = rd.readline() E 70 if not line: E 71 break E E ~/anaconda/envs/reco_gpu/lib/python3.6/site-packages/tensorflow/python/lib/io/file_io.py in readline(self) E 181 def readline(self): E 182 r"""Reads the next line from the file. Leaves the '\n' at the end.""" E --> 183 self._preread_check() E 184 return self._prepare_value(self._read_buf.ReadLineAsString()) E 185 E E ~/anaconda/envs/reco_gpu/lib/python3.6/site-packages/tensorflow/python/lib/io/file_io.py in _preread_check(self) E 83 with errors.raise_exception_on_not_ok_status() as status: E 84 self._read_buf = pywrap_tensorflow.CreateBufferedInputStream( E ---> 85 compat.as_bytes(self.__name), 1024 * 512, status) E 86 E 87 def _prewrite_check(self): E E ~/anaconda/envs/reco_gpu/lib/python3.6/site-packages/tensorflow/python/framework/errors_impl.py in __exit__(self, type_arg, value_arg, traceback_arg) E 526 None, None, E 527 compat.as_text(c_api.TF_Message(self.status.status)), E --> 528 c_api.TF_GetCode(self.status.status)) E 529 # Delete the underlying status object from memory otherwise it stays alive E 530 # as there is a reference to status from this from the traceback due to E E NotFoundError: ../../tests/resources/deeprec/dkn/doc_feature.txt; No such file or directory ../../anaconda/envs/reco_gpu/lib/python3.6/site-packages/papermill/execute.py:198: PapermillExecutionError -------------------------------------------------------------------------- Captured stderr call --------------------------------------------------------------------------- 62%|██████▏ | 13/21 [00:08<00:06, 1.23it/s] ============================================================================ warnings summary ============================================================================= /home/miguel/anaconda/envs/reco_gpu/lib/python3.6/site-packages/nbconvert/exporters/exporter_locator.py:28 /home/miguel/anaconda/envs/reco_gpu/lib/python3.6/site-packages/nbconvert/exporters/exporter_locator.py:28: DeprecationWarning: `nbconvert.exporters.exporter_locator` is deprecated in favor of `nbconvert.exporters.base` since nbconvert 5.0. DeprecationWarning) /home/miguel/anaconda/envs/reco_gpu/lib/python3.6/site-packages/tornado/web.py:1747 /home/miguel/anaconda/envs/reco_gpu/lib/python3.6/site-packages/tornado/web.py:1747: DeprecationWarning: @asynchronous is deprecated, use coroutines instead DeprecationWarning) tests/smoke/test_notebooks_gpu.py::test_dkn_smoke /home/miguel/anaconda/envs/reco_gpu/lib/python3.6/site-packages/jupyter_client/session.py:371: DeprecationWarning: Session._key_changed is deprecated in traitlets 4.1: use @observe and @unobserve instead. def _key_changed(self): -- Docs: https://docs.pytest.org/en/latest/warnings.html ================================================================== 1 failed, 3 warnings in 11.56 seconds ==================================================================
PapermillExecutionError
def find_wikidata_id(name, limit=1, session=None): """Find the entity ID in wikidata from a title string. Args: name (str): A string with search terms (eg. "Batman (1989) film") limit (int): Number of results to return session (requests.Session): requests session to reuse connections Returns: (str): wikidata entityID corresponding to the title string. 'entityNotFound' will be returned if no page is found """ session = get_session(session=session) params = dict( action="query", list="search", srsearch=bytes(name, encoding="utf8"), srlimit=limit, srprop="", format="json", ) try: response = session.get(API_URL_WIKIPEDIA, params=params) page_id = response.json()["query"]["search"][0]["pageid"] except Exception as e: # TODO: distinguish between connection error and entity not found logger.error("ENTITY NOT FOUND") return "entityNotFound" params = dict( action="query", prop="pageprops", ppprop="wikibase_item", pageids=[page_id], format="json", ) try: response = session.get(API_URL_WIKIPEDIA, params=params) entity_id = response.json()["query"]["pages"][str(page_id)]["pageprops"][ "wikibase_item" ] except Exception as e: # TODO: distinguish between connection error and entity not found logger.error("ENTITY NOT FOUND") return "entityNotFound" return entity_id
def find_wikidata_id(name, limit=1, session=None): """Find the entity ID in wikidata from a title string. Args: name (str): A string with search terms (eg. "Batman (1989) film") limit (int): Number of results to return session (requests.Session): requests session to reuse connections Returns: (str): wikidata entityID corresponding to the title string. 'entityNotFound' will be returned if no page is found """ session = get_session(session=session) params = dict( action="query", list="search", srsearch=bytes(name, encoding="utf8"), srlimit=limit, srprop="", format="json", ) try: response = session.get(API_URL_WIKIPEDIA, params=params) page_id = response.json()["query"]["search"][0]["pageid"] except Exception as e: # TODO: log exception # print(e) return "entityNotFound" params = dict( action="query", prop="pageprops", ppprop="wikibase_item", pageids=[page_id], format="json", ) try: response = session.get(API_URL_WIKIPEDIA, params=params) entity_id = response.json()["query"]["pages"][str(page_id)]["pageprops"][ "wikibase_item" ] except Exception as e: # TODO: log exception # print(e) return "entityNotFound" return entity_id
https://github.com/microsoft/recommenders/issues/919
============================= test session starts ============================== platform linux -- Python 3.6.8, pytest-5.0.1, py-1.8.0, pluggy-0.12.0 rootdir: /data/home/recocat/cicd/28/s collected 29 items / 12 deselected / 17 selected tests/integration/test_criteo.py . [ 5%] tests/integration/test_movielens.py ......... [ 58%] tests/integration/test_notebooks_python.py ......F [100%] =================================== FAILURES =================================== __________________________ test_wikidata_integration ___________________________ notebooks = {'als_deep_dive': '/data/home/recocat/cicd/28/s/notebooks/02_model/als_deep_dive.ipynb', 'als_pyspark': '/data/home/re...aseline_deep_dive.ipynb', 'data_split': '/data/home/recocat/cicd/28/s/notebooks/01_prepare_data/data_split.ipynb', ...} tmp = '/tmp/pytest-of-recocat/pytest-1697/tmpruv_um4p' @pytest.mark.integration def test_wikidata_integration(notebooks, tmp): notebook_path = notebooks["wikidata_KG"] MOVIELENS_SAMPLE_SIZE = 5 pm.execute_notebook(notebook_path, OUTPUT_NOTEBOOK, kernel_name=KERNEL_NAME, parameters=dict(MOVIELENS_DATA_SIZE='100k', MOVIELENS_SAMPLE=True, MOVIELENS_SAMPLE_SIZE=MOVIELENS_SAMPLE_SIZE)) tests/integration/test_notebooks_python.py:173: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ /anaconda/envs/nightly_reco_base/lib/python3.6/site-packages/papermill/execute.py:94: in execute_notebook raise_for_execution_errors(nb, output_path) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ nb = {'cells': [{'cell_type': 'code', 'metadata': {'inputHidden': True, 'hide_input': True}, 'execution_count': None, 'sour...nd_time': '2019-09-07T23:48:50.115802', 'duration': 14.995972, 'exception': True}}, 'nbformat': 4, 'nbformat_minor': 2} output_path = 'output.ipynb' def raise_for_execution_errors(nb, output_path): """Assigned parameters into the appropriate place in the input notebook Parameters ---------- nb : NotebookNode Executable notebook object output_path : str Path to write executed notebook """ error = None for cell in nb.cells: if cell.get("outputs") is None: continue for output in cell.outputs: if output.output_type == "error": error = PapermillExecutionError( exec_count=cell.execution_count, source=cell.source, ename=output.ename, evalue=output.evalue, traceback=output.traceback, ) break if error: # Write notebook back out with the Error Message at the top of the Notebook. error_msg = ERROR_MESSAGE_TEMPLATE % str(error.exec_count) error_msg_cell = nbformat.v4.new_code_cell( source="%%html\n" + error_msg, outputs=[ nbformat.v4.new_output(output_type="display_data", data={"text/html": error_msg}) ], metadata={"inputHidden": True, "hide_input": True}, ) nb.cells = [error_msg_cell] + nb.cells write_ipynb(nb, output_path) raise error E papermill.exceptions.PapermillExecutionError: E --------------------------------------------------------------------------- E Exception encountered at "In [19]": E --------------------------------------------------------------------------- E ImportError Traceback (most recent call last) E /anaconda/envs/nightly_reco_base/lib/python3.6/site-packages/tqdm/_tqdm.py in pandas(tclass, *targs, **tkwargs) E 612 # pandas>=0.23.0 E --> 613 from pandas.core.groupby.groupby import DataFrameGroupBy, \ E 614 SeriesGroupBy, GroupBy, PanelGroupBy E E ImportError: cannot import name 'DataFrameGroupBy' E E During handling of the above exception, another exception occurred: E E ImportError Traceback (most recent call last) E <ipython-input-19-6ccb9974139b> in <module> E ----> 1 tqdm().pandas(desc="Number of movies completed") E 2 result = pd.concat(list(movies.progress_apply(lambda x: wikidata_KG_from_movielens(x), axis=1))) E E /anaconda/envs/nightly_reco_base/lib/python3.6/site-packages/tqdm/_tqdm.py in pandas(tclass, *targs, **tkwargs) E 614 SeriesGroupBy, GroupBy, PanelGroupBy E 615 except ImportError: E --> 616 from pandas.core.groupby import DataFrameGroupBy, \ E 617 SeriesGroupBy, GroupBy, PanelGroupBy E 618 E E ImportError: cannot import name 'PanelGroupBy'
PapermillExecutionError
def query_entity_links(entity_id, session=None): """Query all linked pages from a wikidata entityID Args: entity_id (str): A wikidata entity ID session (requests.Session): requests session to reuse connections Returns: (json): dictionary with linked pages. """ query = ( """ PREFIX entity: <http://www.wikidata.org/entity/> #partial results SELECT ?propUrl ?propLabel ?valUrl ?valLabel WHERE { hint:Query hint:optimizer 'None' . { BIND(entity:""" + entity_id + """ AS ?valUrl) . BIND("N/A" AS ?propUrl ) . BIND("identity"@en AS ?propLabel ) . } UNION { entity:""" + entity_id + """ ?propUrl ?valUrl . ?property ?ref ?propUrl . ?property rdf:type wikibase:Property . ?property rdfs:label ?propLabel } ?valUrl rdfs:label ?valLabel FILTER (LANG(?valLabel) = 'en') . OPTIONAL{ ?valUrl wdt:P18 ?picture .} FILTER (lang(?propLabel) = 'en' ) } ORDER BY ?propUrl ?valUrl LIMIT 500 """ ) session = get_session(session=session) try: data = session.get( API_URL_WIKIDATA, params=dict(query=query, format="json") ).json() except Exception as e: logger.error("ENTITY NOT FOUND") return {} return data
def query_entity_links(entity_id, session=None): """Query all linked pages from a wikidata entityID Args: entity_id (str): A wikidata entity ID session (requests.Session): requests session to reuse connections Returns: (json): dictionary with linked pages. """ query = ( """ PREFIX entity: <http://www.wikidata.org/entity/> #partial results SELECT ?propUrl ?propLabel ?valUrl ?valLabel WHERE { hint:Query hint:optimizer 'None' . { BIND(entity:""" + entity_id + """ AS ?valUrl) . BIND("N/A" AS ?propUrl ) . BIND("identity"@en AS ?propLabel ) . } UNION { entity:""" + entity_id + """ ?propUrl ?valUrl . ?property ?ref ?propUrl . ?property rdf:type wikibase:Property . ?property rdfs:label ?propLabel } ?valUrl rdfs:label ?valLabel FILTER (LANG(?valLabel) = 'en') . OPTIONAL{ ?valUrl wdt:P18 ?picture .} FILTER (lang(?propLabel) = 'en' ) } ORDER BY ?propUrl ?valUrl LIMIT 500 """ ) session = get_session(session=session) try: data = session.get( API_URL_WIKIDATA, params=dict(query=query, format="json") ).json() except Exception as e: # TODO log exception # print(e) # print("Entity ID not Found in Wikidata") return {} return data
https://github.com/microsoft/recommenders/issues/919
============================= test session starts ============================== platform linux -- Python 3.6.8, pytest-5.0.1, py-1.8.0, pluggy-0.12.0 rootdir: /data/home/recocat/cicd/28/s collected 29 items / 12 deselected / 17 selected tests/integration/test_criteo.py . [ 5%] tests/integration/test_movielens.py ......... [ 58%] tests/integration/test_notebooks_python.py ......F [100%] =================================== FAILURES =================================== __________________________ test_wikidata_integration ___________________________ notebooks = {'als_deep_dive': '/data/home/recocat/cicd/28/s/notebooks/02_model/als_deep_dive.ipynb', 'als_pyspark': '/data/home/re...aseline_deep_dive.ipynb', 'data_split': '/data/home/recocat/cicd/28/s/notebooks/01_prepare_data/data_split.ipynb', ...} tmp = '/tmp/pytest-of-recocat/pytest-1697/tmpruv_um4p' @pytest.mark.integration def test_wikidata_integration(notebooks, tmp): notebook_path = notebooks["wikidata_KG"] MOVIELENS_SAMPLE_SIZE = 5 pm.execute_notebook(notebook_path, OUTPUT_NOTEBOOK, kernel_name=KERNEL_NAME, parameters=dict(MOVIELENS_DATA_SIZE='100k', MOVIELENS_SAMPLE=True, MOVIELENS_SAMPLE_SIZE=MOVIELENS_SAMPLE_SIZE)) tests/integration/test_notebooks_python.py:173: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ /anaconda/envs/nightly_reco_base/lib/python3.6/site-packages/papermill/execute.py:94: in execute_notebook raise_for_execution_errors(nb, output_path) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ nb = {'cells': [{'cell_type': 'code', 'metadata': {'inputHidden': True, 'hide_input': True}, 'execution_count': None, 'sour...nd_time': '2019-09-07T23:48:50.115802', 'duration': 14.995972, 'exception': True}}, 'nbformat': 4, 'nbformat_minor': 2} output_path = 'output.ipynb' def raise_for_execution_errors(nb, output_path): """Assigned parameters into the appropriate place in the input notebook Parameters ---------- nb : NotebookNode Executable notebook object output_path : str Path to write executed notebook """ error = None for cell in nb.cells: if cell.get("outputs") is None: continue for output in cell.outputs: if output.output_type == "error": error = PapermillExecutionError( exec_count=cell.execution_count, source=cell.source, ename=output.ename, evalue=output.evalue, traceback=output.traceback, ) break if error: # Write notebook back out with the Error Message at the top of the Notebook. error_msg = ERROR_MESSAGE_TEMPLATE % str(error.exec_count) error_msg_cell = nbformat.v4.new_code_cell( source="%%html\n" + error_msg, outputs=[ nbformat.v4.new_output(output_type="display_data", data={"text/html": error_msg}) ], metadata={"inputHidden": True, "hide_input": True}, ) nb.cells = [error_msg_cell] + nb.cells write_ipynb(nb, output_path) raise error E papermill.exceptions.PapermillExecutionError: E --------------------------------------------------------------------------- E Exception encountered at "In [19]": E --------------------------------------------------------------------------- E ImportError Traceback (most recent call last) E /anaconda/envs/nightly_reco_base/lib/python3.6/site-packages/tqdm/_tqdm.py in pandas(tclass, *targs, **tkwargs) E 612 # pandas>=0.23.0 E --> 613 from pandas.core.groupby.groupby import DataFrameGroupBy, \ E 614 SeriesGroupBy, GroupBy, PanelGroupBy E E ImportError: cannot import name 'DataFrameGroupBy' E E During handling of the above exception, another exception occurred: E E ImportError Traceback (most recent call last) E <ipython-input-19-6ccb9974139b> in <module> E ----> 1 tqdm().pandas(desc="Number of movies completed") E 2 result = pd.concat(list(movies.progress_apply(lambda x: wikidata_KG_from_movielens(x), axis=1))) E E /anaconda/envs/nightly_reco_base/lib/python3.6/site-packages/tqdm/_tqdm.py in pandas(tclass, *targs, **tkwargs) E 614 SeriesGroupBy, GroupBy, PanelGroupBy E 615 except ImportError: E --> 616 from pandas.core.groupby import DataFrameGroupBy, \ E 617 SeriesGroupBy, GroupBy, PanelGroupBy E 618 E E ImportError: cannot import name 'PanelGroupBy'
PapermillExecutionError
def query_entity_description(entity_id, session=None): """Query entity wikidata description from entityID Args: entity_id (str): A wikidata page ID. session (requests.Session): requests session to reuse connections Returns: (str): Wikidata short description of the entityID descriptionNotFound' will be returned if no description is found """ query = ( """ PREFIX wd: <http://www.wikidata.org/entity/> PREFIX schema: <http://schema.org/> SELECT ?o WHERE { wd:""" + entity_id + """ schema:description ?o. FILTER ( lang(?o) = "en" ) } """ ) session = get_session(session=session) try: r = session.get(API_URL_WIKIDATA, params=dict(query=query, format="json")) description = r.json()["results"]["bindings"][0]["o"]["value"] except Exception as e: logger.error("DESCRIPTION NOT FOUND") return "descriptionNotFound" return description
def query_entity_description(entity_id, session=None): """Query entity wikidata description from entityID Args: entity_id (str): A wikidata page ID. session (requests.Session): requests session to reuse connections Returns: (str): Wikidata short description of the entityID descriptionNotFound' will be returned if no description is found """ query = ( """ PREFIX wd: <http://www.wikidata.org/entity/> PREFIX schema: <http://schema.org/> SELECT ?o WHERE { wd:""" + entity_id + """ schema:description ?o. FILTER ( lang(?o) = "en" ) } """ ) session = get_session(session=session) try: r = session.get(API_URL_WIKIDATA, params=dict(query=query, format="json")) description = r.json()["results"]["bindings"][0]["o"]["value"] except Exception as e: # TODO: log exception # print(e) # print("Description not found") return "descriptionNotFound" return description
https://github.com/microsoft/recommenders/issues/919
============================= test session starts ============================== platform linux -- Python 3.6.8, pytest-5.0.1, py-1.8.0, pluggy-0.12.0 rootdir: /data/home/recocat/cicd/28/s collected 29 items / 12 deselected / 17 selected tests/integration/test_criteo.py . [ 5%] tests/integration/test_movielens.py ......... [ 58%] tests/integration/test_notebooks_python.py ......F [100%] =================================== FAILURES =================================== __________________________ test_wikidata_integration ___________________________ notebooks = {'als_deep_dive': '/data/home/recocat/cicd/28/s/notebooks/02_model/als_deep_dive.ipynb', 'als_pyspark': '/data/home/re...aseline_deep_dive.ipynb', 'data_split': '/data/home/recocat/cicd/28/s/notebooks/01_prepare_data/data_split.ipynb', ...} tmp = '/tmp/pytest-of-recocat/pytest-1697/tmpruv_um4p' @pytest.mark.integration def test_wikidata_integration(notebooks, tmp): notebook_path = notebooks["wikidata_KG"] MOVIELENS_SAMPLE_SIZE = 5 pm.execute_notebook(notebook_path, OUTPUT_NOTEBOOK, kernel_name=KERNEL_NAME, parameters=dict(MOVIELENS_DATA_SIZE='100k', MOVIELENS_SAMPLE=True, MOVIELENS_SAMPLE_SIZE=MOVIELENS_SAMPLE_SIZE)) tests/integration/test_notebooks_python.py:173: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ /anaconda/envs/nightly_reco_base/lib/python3.6/site-packages/papermill/execute.py:94: in execute_notebook raise_for_execution_errors(nb, output_path) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ nb = {'cells': [{'cell_type': 'code', 'metadata': {'inputHidden': True, 'hide_input': True}, 'execution_count': None, 'sour...nd_time': '2019-09-07T23:48:50.115802', 'duration': 14.995972, 'exception': True}}, 'nbformat': 4, 'nbformat_minor': 2} output_path = 'output.ipynb' def raise_for_execution_errors(nb, output_path): """Assigned parameters into the appropriate place in the input notebook Parameters ---------- nb : NotebookNode Executable notebook object output_path : str Path to write executed notebook """ error = None for cell in nb.cells: if cell.get("outputs") is None: continue for output in cell.outputs: if output.output_type == "error": error = PapermillExecutionError( exec_count=cell.execution_count, source=cell.source, ename=output.ename, evalue=output.evalue, traceback=output.traceback, ) break if error: # Write notebook back out with the Error Message at the top of the Notebook. error_msg = ERROR_MESSAGE_TEMPLATE % str(error.exec_count) error_msg_cell = nbformat.v4.new_code_cell( source="%%html\n" + error_msg, outputs=[ nbformat.v4.new_output(output_type="display_data", data={"text/html": error_msg}) ], metadata={"inputHidden": True, "hide_input": True}, ) nb.cells = [error_msg_cell] + nb.cells write_ipynb(nb, output_path) raise error E papermill.exceptions.PapermillExecutionError: E --------------------------------------------------------------------------- E Exception encountered at "In [19]": E --------------------------------------------------------------------------- E ImportError Traceback (most recent call last) E /anaconda/envs/nightly_reco_base/lib/python3.6/site-packages/tqdm/_tqdm.py in pandas(tclass, *targs, **tkwargs) E 612 # pandas>=0.23.0 E --> 613 from pandas.core.groupby.groupby import DataFrameGroupBy, \ E 614 SeriesGroupBy, GroupBy, PanelGroupBy E E ImportError: cannot import name 'DataFrameGroupBy' E E During handling of the above exception, another exception occurred: E E ImportError Traceback (most recent call last) E <ipython-input-19-6ccb9974139b> in <module> E ----> 1 tqdm().pandas(desc="Number of movies completed") E 2 result = pd.concat(list(movies.progress_apply(lambda x: wikidata_KG_from_movielens(x), axis=1))) E E /anaconda/envs/nightly_reco_base/lib/python3.6/site-packages/tqdm/_tqdm.py in pandas(tclass, *targs, **tkwargs) E 614 SeriesGroupBy, GroupBy, PanelGroupBy E 615 except ImportError: E --> 616 from pandas.core.groupby import DataFrameGroupBy, \ E 617 SeriesGroupBy, GroupBy, PanelGroupBy E 618 E E ImportError: cannot import name 'PanelGroupBy'
PapermillExecutionError
def process_split_ratio(ratio): """Generate split ratio lists. Args: ratio (float or list): a float number that indicates split ratio or a list of float numbers that indicate split ratios (if it is a multi-split). Returns: tuple: a tuple containing bool: A boolean variable multi that indicates if the splitting is multi or single. list: A list of normalized split ratios. """ if isinstance(ratio, float): if ratio <= 0 or ratio >= 1: raise ValueError("Split ratio has to be between 0 and 1") multi = False elif isinstance(ratio, list): if any([x <= 0 for x in ratio]): raise ValueError( "All split ratios in the ratio list should be larger than 0." ) # normalize split ratios if they are not summed to 1 if math.fsum(ratio) != 1.0: ratio = [x / math.fsum(ratio) for x in ratio] multi = True else: raise TypeError("Split ratio should be either float or a list of floats.") return multi, ratio
def process_split_ratio(ratio): """Generate split ratio lists. Args: ratio (float or list): a float number that indicates split ratio or a list of float numbers that indicate split ratios (if it is a multi-split). Returns: tuple: a tuple containing bool: A boolean variable multi that indicates if the splitting is multi or single. list: A list of normalized split ratios. """ if isinstance(ratio, float): if ratio <= 0 or ratio >= 1: raise ValueError("Split ratio has to be between 0 and 1") multi = False elif isinstance(ratio, list): if any([x <= 0 for x in ratio]): raise ValueError( "All split ratios in the ratio list should be larger than 0." ) # normalize split ratios if they are not summed to 1 if sum(ratio) != 1.0: ratio = [x / sum(ratio) for x in ratio] multi = True else: raise TypeError("Split ratio should be either float or a list of floats.") return multi, ratio
https://github.com/microsoft/recommenders/issues/866
Traceback (most recent call last): File "C:\Users\scgraham\AppData\Local\Continuum\anaconda3\envs\reco_base\lib\site-packages\IPython\core\interactiveshell.py", line 3325, in run_code exec(code_obj, self.user_global_ns, self.user_ns) File "<ipython-input-5-49ca4e7e6410>", line 1, in <module> x, y, z = python_random_split(df, ratio=[.7, .2, .1]) File "C:\Users\scgraham\repos\Recommenders\reco_utils\dataset\python_splitters.py", line 38, in python_random_split splits = split_pandas_data_with_ratios(data, ratio, shuffle=True, seed=seed) File "C:\Users\scgraham\repos\Recommenders\reco_utils\dataset\split_utils.py", line 155, in split_pandas_data_with_ratios raise ValueError("The ratios have to sum to 1") ValueError: The ratios have to sum to 1
ValueError
def split_pandas_data_with_ratios(data, ratios, seed=42, shuffle=False): """Helper function to split pandas DataFrame with given ratios .. note:: Implementation referenced from `this source <https://stackoverflow.com/questions/38250710/how-to-split-data-into-3-sets-train-validation-and-test>`_. Args: data (pd.DataFrame): Pandas data frame to be split. ratios (list of floats): list of ratios for split. The ratios have to sum to 1. seed (int): random seed. shuffle (bool): whether data will be shuffled when being split. Returns: list: List of pd.DataFrame split by the given specifications. """ if math.fsum(ratios) != 1.0: raise ValueError("The ratios have to sum to 1") split_index = np.cumsum(ratios).tolist()[:-1] if shuffle: data = data.sample(frac=1, random_state=seed) splits = np.split(data, [round(x * len(data)) for x in split_index]) # Add split index (this makes splitting by group more efficient). for i in range(len(ratios)): splits[i]["split_index"] = i return splits
def split_pandas_data_with_ratios(data, ratios, seed=42, shuffle=False): """Helper function to split pandas DataFrame with given ratios .. note:: Implementation referenced from `this source <https://stackoverflow.com/questions/38250710/how-to-split-data-into-3-sets-train-validation-and-test>`_. Args: data (pd.DataFrame): Pandas data frame to be split. ratios (list of floats): list of ratios for split. The ratios have to sum to 1. seed (int): random seed. shuffle (bool): whether data will be shuffled when being split. Returns: list: List of pd.DataFrame split by the given specifications. """ if sum(ratios) != 1.0: raise ValueError("The ratios have to sum to 1") split_index = np.cumsum(ratios).tolist()[:-1] if shuffle: data = data.sample(frac=1, random_state=seed) splits = np.split(data, [round(x * len(data)) for x in split_index]) # Add split index (this makes splitting by group more efficient). for i in range(len(ratios)): splits[i]["split_index"] = i return splits
https://github.com/microsoft/recommenders/issues/866
Traceback (most recent call last): File "C:\Users\scgraham\AppData\Local\Continuum\anaconda3\envs\reco_base\lib\site-packages\IPython\core\interactiveshell.py", line 3325, in run_code exec(code_obj, self.user_global_ns, self.user_ns) File "<ipython-input-5-49ca4e7e6410>", line 1, in <module> x, y, z = python_random_split(df, ratio=[.7, .2, .1]) File "C:\Users\scgraham\repos\Recommenders\reco_utils\dataset\python_splitters.py", line 38, in python_random_split splits = split_pandas_data_with_ratios(data, ratio, shuffle=True, seed=seed) File "C:\Users\scgraham\repos\Recommenders\reco_utils\dataset\split_utils.py", line 155, in split_pandas_data_with_ratios raise ValueError("The ratios have to sum to 1") ValueError: The ratios have to sum to 1
ValueError
def get_experiment_status(status_url=NNI_STATUS_URL): """Helper method. Gets the experiment status from the REST endpoint Args: status_url (str): URL for the REST endpoint Returns: dict: status of the experiment """ return requests.get(status_url).json()
def get_experiment_status(status_url): """ Helper method. Gets the experiment status from the REST endpoint Args: status_url (str): URL for the REST endpoint Returns: str: status of the experiment """ nni_status = requests.get(status_url).json() return nni_status["status"]
https://github.com/microsoft/recommenders/issues/746
tests/integration/test_movielens.py ......... [ 62%] tests/integration/test_notebooks_python.py .....F [100%] =================================== FAILURES =================================== _____________________________ test_nni_tuning_svd ______________________________ notebooks = {'als_deep_dive': '/data/home/recocat/cicd/7/s/notebooks/02_model/als_deep_dive.ipynb', 'als_pyspark': '/data/home/rec...baseline_deep_dive.ipynb', 'data_split': '/data/home/recocat/cicd/7/s/notebooks/01_prepare_data/data_split.ipynb', ...} tmp = '/tmp/pytest-of-recocat/pytest-200/tmpuj9_hwzq' @pytest.mark.integration def test_nni_tuning_svd(notebooks, tmp): notebook_path = notebooks["nni_tuning_svd"] # First stop NNI in case it is running subprocess.run([sys.prefix + '/bin/nnictl', 'stop']) check_stopped() pm.execute_notebook(notebook_path, OUTPUT_NOTEBOOK, kernel_name=KERNEL_NAME, parameters=dict(MOVIELENS_DATA_SIZE="100k", SURPRISE_READER="ml-100k", TMP_DIR=tmp, MAX_TRIAL_NUM=1, NUM_EPOCHS=1)) tests/integration/test_notebooks_python.py:164: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ /anaconda/envs/nightly_reco_base/lib/python3.6/site-packages/papermill/execute.py:94: in execute_notebook raise_for_execution_errors(nb, output_path) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ nb = {'cells': [{'cell_type': 'code', 'metadata': {'inputHidden': True, 'hide_input': True}, 'execution_count': None, 'sour...d_time': '2019-04-18T12:09:01.272263', 'duration': 345.187067, 'exception': True}}, 'nbformat': 4, 'nbformat_minor': 2} output_path = 'output.ipynb' def raise_for_execution_errors(nb, output_path): """Assigned parameters into the appropriate place in the input notebook Parameters ---------- nb : NotebookNode Executable notebook object output_path : str Path to write executed notebook """ error = None for cell in nb.cells: if cell.get("outputs") is None: continue for output in cell.outputs: if output.output_type == "error": error = PapermillExecutionError( exec_count=cell.execution_count, source=cell.source, ename=output.ename, evalue=output.evalue, traceback=output.traceback, ) break if error: # Write notebook back out with the Error Message at the top of the Notebook. error_msg = ERROR_MESSAGE_TEMPLATE % str(error.exec_count) error_msg_cell = nbformat.v4.new_code_cell( source="%%html\n" + error_msg, outputs=[ nbformat.v4.new_output(output_type="display_data", data={"text/html": error_msg}) ], metadata={"inputHidden": True, "hide_input": True}, ) nb.cells = [error_msg_cell] + nb.cells write_ipynb(nb, output_path) raise error E papermill.exceptions.PapermillExecutionError: E --------------------------------------------------------------------------- E Exception encountered at "In [32]": E --------------------------------------------------------------------------- E RuntimeError Traceback (most recent call last) E <ipython-input-32-120fb8357fc9> in <module> E 1 t = time.time() E ----> 2 stop_and_restart() E 3 time_smac = time.time() - t E E <ipython-input-20-609ee7726ae6> in stop_and_restart() E 6 proc = subprocess.run([sys.prefix + '/bin/nnictl', 'create', '--config', config_path]) E 7 if proc.returncode != 0: E ----> 8 raise RuntimeError("'nnictl create' failed with code %d" % proc.returncode) E 9 check_experiment_status(wait=WAITING_TIME, max_retries=MAX_RETRIES) E E RuntimeError: 'nnictl create' failed with code 1 /anaconda/envs/nightly_reco_base/lib/python3.6/site-packages/papermill/execute.py:241: PapermillExecutionError
PapermillExecutionError
def check_experiment_status(wait=WAITING_TIME, max_retries=MAX_RETRIES): """Checks the status of the current experiment on the NNI REST endpoint Waits until the tuning has completed Args: wait (numeric) : time to wait in seconds max_retries (int): max number of retries """ i = 0 while i < max_retries: nni_status = get_experiment_status(NNI_STATUS_URL) if nni_status["status"] in ["DONE", "TUNER_NO_MORE_TRIAL"]: break elif nni_status["status"] not in ["RUNNING", "NO_MORE_TRIAL"]: raise RuntimeError( "NNI experiment failed to complete with status {} - {}".format( nni_status["status"], nni_status["errors"][0] ) ) time.sleep(wait) i += 1 if i == max_retries: raise TimeoutError("check_experiment_status() timed out")
def check_experiment_status(wait=WAITING_TIME, max_retries=MAX_RETRIES): """Checks the status of the current experiment on the NNI REST endpoint Waits until the tuning has completed Args: wait (numeric) : time to wait in seconds max_retries (int): max number of retries """ i = 0 while i < max_retries: status = get_experiment_status(NNI_STATUS_URL) if status in ["DONE", "TUNER_NO_MORE_TRIAL"]: break elif status not in ["RUNNING", "NO_MORE_TRIAL"]: raise RuntimeError( "NNI experiment failed to complete with status {}".format(status) ) time.sleep(wait) i += 1 if i == max_retries: raise TimeoutError("check_experiment_status() timed out")
https://github.com/microsoft/recommenders/issues/746
tests/integration/test_movielens.py ......... [ 62%] tests/integration/test_notebooks_python.py .....F [100%] =================================== FAILURES =================================== _____________________________ test_nni_tuning_svd ______________________________ notebooks = {'als_deep_dive': '/data/home/recocat/cicd/7/s/notebooks/02_model/als_deep_dive.ipynb', 'als_pyspark': '/data/home/rec...baseline_deep_dive.ipynb', 'data_split': '/data/home/recocat/cicd/7/s/notebooks/01_prepare_data/data_split.ipynb', ...} tmp = '/tmp/pytest-of-recocat/pytest-200/tmpuj9_hwzq' @pytest.mark.integration def test_nni_tuning_svd(notebooks, tmp): notebook_path = notebooks["nni_tuning_svd"] # First stop NNI in case it is running subprocess.run([sys.prefix + '/bin/nnictl', 'stop']) check_stopped() pm.execute_notebook(notebook_path, OUTPUT_NOTEBOOK, kernel_name=KERNEL_NAME, parameters=dict(MOVIELENS_DATA_SIZE="100k", SURPRISE_READER="ml-100k", TMP_DIR=tmp, MAX_TRIAL_NUM=1, NUM_EPOCHS=1)) tests/integration/test_notebooks_python.py:164: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ /anaconda/envs/nightly_reco_base/lib/python3.6/site-packages/papermill/execute.py:94: in execute_notebook raise_for_execution_errors(nb, output_path) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ nb = {'cells': [{'cell_type': 'code', 'metadata': {'inputHidden': True, 'hide_input': True}, 'execution_count': None, 'sour...d_time': '2019-04-18T12:09:01.272263', 'duration': 345.187067, 'exception': True}}, 'nbformat': 4, 'nbformat_minor': 2} output_path = 'output.ipynb' def raise_for_execution_errors(nb, output_path): """Assigned parameters into the appropriate place in the input notebook Parameters ---------- nb : NotebookNode Executable notebook object output_path : str Path to write executed notebook """ error = None for cell in nb.cells: if cell.get("outputs") is None: continue for output in cell.outputs: if output.output_type == "error": error = PapermillExecutionError( exec_count=cell.execution_count, source=cell.source, ename=output.ename, evalue=output.evalue, traceback=output.traceback, ) break if error: # Write notebook back out with the Error Message at the top of the Notebook. error_msg = ERROR_MESSAGE_TEMPLATE % str(error.exec_count) error_msg_cell = nbformat.v4.new_code_cell( source="%%html\n" + error_msg, outputs=[ nbformat.v4.new_output(output_type="display_data", data={"text/html": error_msg}) ], metadata={"inputHidden": True, "hide_input": True}, ) nb.cells = [error_msg_cell] + nb.cells write_ipynb(nb, output_path) raise error E papermill.exceptions.PapermillExecutionError: E --------------------------------------------------------------------------- E Exception encountered at "In [32]": E --------------------------------------------------------------------------- E RuntimeError Traceback (most recent call last) E <ipython-input-32-120fb8357fc9> in <module> E 1 t = time.time() E ----> 2 stop_and_restart() E 3 time_smac = time.time() - t E E <ipython-input-20-609ee7726ae6> in stop_and_restart() E 6 proc = subprocess.run([sys.prefix + '/bin/nnictl', 'create', '--config', config_path]) E 7 if proc.returncode != 0: E ----> 8 raise RuntimeError("'nnictl create' failed with code %d" % proc.returncode) E 9 check_experiment_status(wait=WAITING_TIME, max_retries=MAX_RETRIES) E E RuntimeError: 'nnictl create' failed with code 1 /anaconda/envs/nightly_reco_base/lib/python3.6/site-packages/papermill/execute.py:241: PapermillExecutionError
PapermillExecutionError
def check_stopped(wait=WAITING_TIME, max_retries=MAX_RETRIES): """Checks that there is no NNI experiment active (the URL is not accessible) This method should be called after "nnictl stop" for verification Args: wait (numeric) : time to wait in seconds max_retries (int): max number of retries """ i = 0 while i < max_retries: try: get_experiment_status(NNI_STATUS_URL) except: break time.sleep(wait) i += 1 if i == max_retries: raise TimeoutError("check_stopped() timed out")
def check_stopped(wait=WAITING_TIME, max_retries=MAX_RETRIES): """ Checks that there is no NNI experiment active (the URL is not accessible) This method should be called after 'nnictl stop' for verification Args: wait (numeric) : time to wait in seconds max_retries (int): max number of retries """ i = 0 while i < max_retries: try: get_experiment_status(NNI_STATUS_URL) except: break time.sleep(wait) i += 1 if i == max_retries: raise TimeoutError("check_stopped() timed out")
https://github.com/microsoft/recommenders/issues/746
tests/integration/test_movielens.py ......... [ 62%] tests/integration/test_notebooks_python.py .....F [100%] =================================== FAILURES =================================== _____________________________ test_nni_tuning_svd ______________________________ notebooks = {'als_deep_dive': '/data/home/recocat/cicd/7/s/notebooks/02_model/als_deep_dive.ipynb', 'als_pyspark': '/data/home/rec...baseline_deep_dive.ipynb', 'data_split': '/data/home/recocat/cicd/7/s/notebooks/01_prepare_data/data_split.ipynb', ...} tmp = '/tmp/pytest-of-recocat/pytest-200/tmpuj9_hwzq' @pytest.mark.integration def test_nni_tuning_svd(notebooks, tmp): notebook_path = notebooks["nni_tuning_svd"] # First stop NNI in case it is running subprocess.run([sys.prefix + '/bin/nnictl', 'stop']) check_stopped() pm.execute_notebook(notebook_path, OUTPUT_NOTEBOOK, kernel_name=KERNEL_NAME, parameters=dict(MOVIELENS_DATA_SIZE="100k", SURPRISE_READER="ml-100k", TMP_DIR=tmp, MAX_TRIAL_NUM=1, NUM_EPOCHS=1)) tests/integration/test_notebooks_python.py:164: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ /anaconda/envs/nightly_reco_base/lib/python3.6/site-packages/papermill/execute.py:94: in execute_notebook raise_for_execution_errors(nb, output_path) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ nb = {'cells': [{'cell_type': 'code', 'metadata': {'inputHidden': True, 'hide_input': True}, 'execution_count': None, 'sour...d_time': '2019-04-18T12:09:01.272263', 'duration': 345.187067, 'exception': True}}, 'nbformat': 4, 'nbformat_minor': 2} output_path = 'output.ipynb' def raise_for_execution_errors(nb, output_path): """Assigned parameters into the appropriate place in the input notebook Parameters ---------- nb : NotebookNode Executable notebook object output_path : str Path to write executed notebook """ error = None for cell in nb.cells: if cell.get("outputs") is None: continue for output in cell.outputs: if output.output_type == "error": error = PapermillExecutionError( exec_count=cell.execution_count, source=cell.source, ename=output.ename, evalue=output.evalue, traceback=output.traceback, ) break if error: # Write notebook back out with the Error Message at the top of the Notebook. error_msg = ERROR_MESSAGE_TEMPLATE % str(error.exec_count) error_msg_cell = nbformat.v4.new_code_cell( source="%%html\n" + error_msg, outputs=[ nbformat.v4.new_output(output_type="display_data", data={"text/html": error_msg}) ], metadata={"inputHidden": True, "hide_input": True}, ) nb.cells = [error_msg_cell] + nb.cells write_ipynb(nb, output_path) raise error E papermill.exceptions.PapermillExecutionError: E --------------------------------------------------------------------------- E Exception encountered at "In [32]": E --------------------------------------------------------------------------- E RuntimeError Traceback (most recent call last) E <ipython-input-32-120fb8357fc9> in <module> E 1 t = time.time() E ----> 2 stop_and_restart() E 3 time_smac = time.time() - t E E <ipython-input-20-609ee7726ae6> in stop_and_restart() E 6 proc = subprocess.run([sys.prefix + '/bin/nnictl', 'create', '--config', config_path]) E 7 if proc.returncode != 0: E ----> 8 raise RuntimeError("'nnictl create' failed with code %d" % proc.returncode) E 9 check_experiment_status(wait=WAITING_TIME, max_retries=MAX_RETRIES) E E RuntimeError: 'nnictl create' failed with code 1 /anaconda/envs/nightly_reco_base/lib/python3.6/site-packages/papermill/execute.py:241: PapermillExecutionError
PapermillExecutionError
def check_metrics_written(wait=WAITING_TIME, max_retries=MAX_RETRIES): """Waits until the metrics have been written to the trial logs Args: wait (numeric) : time to wait in seconds max_retries (int): max number of retries """ i = 0 while i < max_retries: all_trials = requests.get(NNI_TRIAL_JOBS_URL).json() if all(["finalMetricData" in trial for trial in all_trials]): break time.sleep(wait) i += 1 if i == max_retries: raise TimeoutError("check_metrics_written() timed out")
def check_metrics_written(wait=WAITING_TIME, max_retries=MAX_RETRIES): """ Waits until the metrics have been written to the trial logs """ i = 0 while i < max_retries: all_trials = requests.get(NNI_TRIAL_JOBS_URL).json() if all(["finalMetricData" in trial for trial in all_trials]): break time.sleep(wait) i += 1 if i == max_retries: raise TimeoutError("check_metrics_written() timed out")
https://github.com/microsoft/recommenders/issues/746
tests/integration/test_movielens.py ......... [ 62%] tests/integration/test_notebooks_python.py .....F [100%] =================================== FAILURES =================================== _____________________________ test_nni_tuning_svd ______________________________ notebooks = {'als_deep_dive': '/data/home/recocat/cicd/7/s/notebooks/02_model/als_deep_dive.ipynb', 'als_pyspark': '/data/home/rec...baseline_deep_dive.ipynb', 'data_split': '/data/home/recocat/cicd/7/s/notebooks/01_prepare_data/data_split.ipynb', ...} tmp = '/tmp/pytest-of-recocat/pytest-200/tmpuj9_hwzq' @pytest.mark.integration def test_nni_tuning_svd(notebooks, tmp): notebook_path = notebooks["nni_tuning_svd"] # First stop NNI in case it is running subprocess.run([sys.prefix + '/bin/nnictl', 'stop']) check_stopped() pm.execute_notebook(notebook_path, OUTPUT_NOTEBOOK, kernel_name=KERNEL_NAME, parameters=dict(MOVIELENS_DATA_SIZE="100k", SURPRISE_READER="ml-100k", TMP_DIR=tmp, MAX_TRIAL_NUM=1, NUM_EPOCHS=1)) tests/integration/test_notebooks_python.py:164: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ /anaconda/envs/nightly_reco_base/lib/python3.6/site-packages/papermill/execute.py:94: in execute_notebook raise_for_execution_errors(nb, output_path) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ nb = {'cells': [{'cell_type': 'code', 'metadata': {'inputHidden': True, 'hide_input': True}, 'execution_count': None, 'sour...d_time': '2019-04-18T12:09:01.272263', 'duration': 345.187067, 'exception': True}}, 'nbformat': 4, 'nbformat_minor': 2} output_path = 'output.ipynb' def raise_for_execution_errors(nb, output_path): """Assigned parameters into the appropriate place in the input notebook Parameters ---------- nb : NotebookNode Executable notebook object output_path : str Path to write executed notebook """ error = None for cell in nb.cells: if cell.get("outputs") is None: continue for output in cell.outputs: if output.output_type == "error": error = PapermillExecutionError( exec_count=cell.execution_count, source=cell.source, ename=output.ename, evalue=output.evalue, traceback=output.traceback, ) break if error: # Write notebook back out with the Error Message at the top of the Notebook. error_msg = ERROR_MESSAGE_TEMPLATE % str(error.exec_count) error_msg_cell = nbformat.v4.new_code_cell( source="%%html\n" + error_msg, outputs=[ nbformat.v4.new_output(output_type="display_data", data={"text/html": error_msg}) ], metadata={"inputHidden": True, "hide_input": True}, ) nb.cells = [error_msg_cell] + nb.cells write_ipynb(nb, output_path) raise error E papermill.exceptions.PapermillExecutionError: E --------------------------------------------------------------------------- E Exception encountered at "In [32]": E --------------------------------------------------------------------------- E RuntimeError Traceback (most recent call last) E <ipython-input-32-120fb8357fc9> in <module> E 1 t = time.time() E ----> 2 stop_and_restart() E 3 time_smac = time.time() - t E E <ipython-input-20-609ee7726ae6> in stop_and_restart() E 6 proc = subprocess.run([sys.prefix + '/bin/nnictl', 'create', '--config', config_path]) E 7 if proc.returncode != 0: E ----> 8 raise RuntimeError("'nnictl create' failed with code %d" % proc.returncode) E 9 check_experiment_status(wait=WAITING_TIME, max_retries=MAX_RETRIES) E E RuntimeError: 'nnictl create' failed with code 1 /anaconda/envs/nightly_reco_base/lib/python3.6/site-packages/papermill/execute.py:241: PapermillExecutionError
PapermillExecutionError
def get_trials(optimize_mode): """Obtain information about the trials of the current experiment via the REST endpoint Args: optimize_mode (str): One of "minimize", "maximize". Determines how to obtain the best default metric. Returns: list: Trials info, list of (metrics, log path) dict: Metrics for the best choice of hyperparameters dict: Best hyperparameters str: Log path for the best trial """ if optimize_mode not in ["minimize", "maximize"]: raise ValueError("optimize_mode should equal either minimize or maximize") all_trials = requests.get(NNI_TRIAL_JOBS_URL).json() trials = [ (eval(trial["finalMetricData"][0]["data"]), trial["logPath"].split(":")[-1]) for trial in all_trials ] sorted_trials = sorted( trials, key=lambda x: x[0]["default"], reverse=(optimize_mode == "maximize") ) best_trial_path = sorted_trials[0][1] # Read the metrics from the trial directory in order to get the name of the default metric with open(os.path.join(best_trial_path, "metrics.json"), "r") as fp: best_metrics = json.load(fp) with open(os.path.join(best_trial_path, "parameter.cfg"), "r") as fp: best_params = json.load(fp) return trials, best_metrics, best_params, best_trial_path
def get_trials(optimize_mode): """Obtain information about the trials of the current experiment via the REST endpoint Args: optimize_mode (str): One of 'minimize', 'maximize'. Determines how to obtain the best default metric. Returns: list: Trials info, list of (metrics, log path) dict: Metrics for the best choice of hyperparameters dict: Best hyperparameters str: Log path for the best trial """ if optimize_mode not in ["minimize", "maximize"]: raise ValueError("optimize_mode should equal either 'minimize' or 'maximize'") all_trials = requests.get(NNI_TRIAL_JOBS_URL).json() trials = [ (eval(trial["finalMetricData"][0]["data"]), trial["logPath"].split(":")[-1]) for trial in all_trials ] sorted_trials = sorted( trials, key=lambda x: x[0]["default"], reverse=(optimize_mode == "maximize") ) best_trial_path = sorted_trials[0][1] # Read the metrics from the trial directory in order to get the name of the default metric with open(os.path.join(best_trial_path, "metrics.json"), "r") as fp: best_metrics = json.load(fp) with open(os.path.join(best_trial_path, "parameter.cfg"), "r") as fp: best_params = json.load(fp) return trials, best_metrics, best_params, best_trial_path
https://github.com/microsoft/recommenders/issues/746
tests/integration/test_movielens.py ......... [ 62%] tests/integration/test_notebooks_python.py .....F [100%] =================================== FAILURES =================================== _____________________________ test_nni_tuning_svd ______________________________ notebooks = {'als_deep_dive': '/data/home/recocat/cicd/7/s/notebooks/02_model/als_deep_dive.ipynb', 'als_pyspark': '/data/home/rec...baseline_deep_dive.ipynb', 'data_split': '/data/home/recocat/cicd/7/s/notebooks/01_prepare_data/data_split.ipynb', ...} tmp = '/tmp/pytest-of-recocat/pytest-200/tmpuj9_hwzq' @pytest.mark.integration def test_nni_tuning_svd(notebooks, tmp): notebook_path = notebooks["nni_tuning_svd"] # First stop NNI in case it is running subprocess.run([sys.prefix + '/bin/nnictl', 'stop']) check_stopped() pm.execute_notebook(notebook_path, OUTPUT_NOTEBOOK, kernel_name=KERNEL_NAME, parameters=dict(MOVIELENS_DATA_SIZE="100k", SURPRISE_READER="ml-100k", TMP_DIR=tmp, MAX_TRIAL_NUM=1, NUM_EPOCHS=1)) tests/integration/test_notebooks_python.py:164: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ /anaconda/envs/nightly_reco_base/lib/python3.6/site-packages/papermill/execute.py:94: in execute_notebook raise_for_execution_errors(nb, output_path) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ nb = {'cells': [{'cell_type': 'code', 'metadata': {'inputHidden': True, 'hide_input': True}, 'execution_count': None, 'sour...d_time': '2019-04-18T12:09:01.272263', 'duration': 345.187067, 'exception': True}}, 'nbformat': 4, 'nbformat_minor': 2} output_path = 'output.ipynb' def raise_for_execution_errors(nb, output_path): """Assigned parameters into the appropriate place in the input notebook Parameters ---------- nb : NotebookNode Executable notebook object output_path : str Path to write executed notebook """ error = None for cell in nb.cells: if cell.get("outputs") is None: continue for output in cell.outputs: if output.output_type == "error": error = PapermillExecutionError( exec_count=cell.execution_count, source=cell.source, ename=output.ename, evalue=output.evalue, traceback=output.traceback, ) break if error: # Write notebook back out with the Error Message at the top of the Notebook. error_msg = ERROR_MESSAGE_TEMPLATE % str(error.exec_count) error_msg_cell = nbformat.v4.new_code_cell( source="%%html\n" + error_msg, outputs=[ nbformat.v4.new_output(output_type="display_data", data={"text/html": error_msg}) ], metadata={"inputHidden": True, "hide_input": True}, ) nb.cells = [error_msg_cell] + nb.cells write_ipynb(nb, output_path) raise error E papermill.exceptions.PapermillExecutionError: E --------------------------------------------------------------------------- E Exception encountered at "In [32]": E --------------------------------------------------------------------------- E RuntimeError Traceback (most recent call last) E <ipython-input-32-120fb8357fc9> in <module> E 1 t = time.time() E ----> 2 stop_and_restart() E 3 time_smac = time.time() - t E E <ipython-input-20-609ee7726ae6> in stop_and_restart() E 6 proc = subprocess.run([sys.prefix + '/bin/nnictl', 'create', '--config', config_path]) E 7 if proc.returncode != 0: E ----> 8 raise RuntimeError("'nnictl create' failed with code %d" % proc.returncode) E 9 check_experiment_status(wait=WAITING_TIME, max_retries=MAX_RETRIES) E E RuntimeError: 'nnictl create' failed with code 1 /anaconda/envs/nightly_reco_base/lib/python3.6/site-packages/papermill/execute.py:241: PapermillExecutionError
PapermillExecutionError
def svd_training(params): """ Train Surprise SVD using the given hyper-parameters """ logger.debug("Start training...") train_data = pd.read_pickle( path=os.path.join(params["datastore"], params["train_datapath"]) ) validation_data = pd.read_pickle( path=os.path.join(params["datastore"], params["validation_datapath"]) ) svd_params = { p: params[p] for p in [ "random_state", "n_epochs", "verbose", "biased", "n_factors", "init_mean", "init_std_dev", "lr_all", "reg_all", "lr_bu", "lr_bi", "lr_pu", "lr_qi", "reg_bu", "reg_bi", "reg_pu", "reg_qi", ] } svd = surprise.SVD(**svd_params) train_set = surprise.Dataset.load_from_df( train_data, reader=surprise.Reader(params["surprise_reader"]) ).build_full_trainset() svd.fit(train_set) logger.debug("Evaluating...") metrics_dict = {} rating_metrics = params["rating_metrics"] if len(rating_metrics) > 0: predictions = compute_rating_predictions( svd, validation_data, usercol=params["usercol"], itemcol=params["itemcol"] ) for metric in rating_metrics: result = getattr(evaluation, metric)(validation_data, predictions) logger.debug("%s = %g", metric, result) if metric == params["primary_metric"]: metrics_dict["default"] = result else: metrics_dict[metric] = result ranking_metrics = params["ranking_metrics"] if len(ranking_metrics) > 0: all_predictions = compute_ranking_predictions( svd, train_data, usercol=params["usercol"], itemcol=params["itemcol"], remove_seen=params["remove_seen"], ) k = params["k"] for metric in ranking_metrics: result = getattr(evaluation, metric)( validation_data, all_predictions, col_prediction="prediction", k=k ) logger.debug("%s@%d = %g", metric, k, result) if metric == params["primary_metric"]: metrics_dict["default"] = result else: metrics_dict[metric] = result if len(ranking_metrics) == 0 and len(rating_metrics) == 0: raise ValueError("No metrics were specified.") # Report the metrics nni.report_final_result(metrics_dict) # Save the metrics in a JSON file output_dir = os.environ.get("NNI_OUTPUT_DIR") with open(os.path.join(output_dir, "metrics.json"), "w") as fp: temp_dict = metrics_dict.copy() temp_dict[params["primary_metric"]] = temp_dict.pop("default") json.dump(temp_dict, fp) return svd
def svd_training(params): """ Train Surprise SVD using the given hyper-parameters """ logger.debug("Start training...") train_data = pd.read_pickle( path=os.path.join(params["datastore"], params["train_datapath"]) ) validation_data = pd.read_pickle( path=os.path.join(params["datastore"], params["validation_datapath"]) ) svd_params = { p: params[p] for p in [ "random_state", "n_epochs", "verbose", "biased", "n_factors", "init_mean", "init_std_dev", "lr_all", "reg_all", "lr_bu", "lr_bi", "lr_pu", "lr_qi", "reg_bu", "reg_bi", "reg_pu", "reg_qi", ] } svd = surprise.SVD(**svd_params) train_set = surprise.Dataset.load_from_df( train_data, reader=surprise.Reader(params["surprise_reader"]) ).build_full_trainset() svd.fit(train_set) logger.debug("Evaluating...") metrics_dict = {} rating_metrics = params["rating_metrics"] if len(rating_metrics) > 0: predictions = compute_rating_predictions( svd, validation_data, usercol=params["usercol"], itemcol=params["itemcol"] ) for metric in rating_metrics: result = getattr(evaluation, metric)(validation_data, predictions) logger.debug("%s = %g", metric, result) if metric == params["primary_metric"]: metrics_dict["default"] = result else: metrics_dict[metric] = result ranking_metrics = params["ranking_metrics"] if len(ranking_metrics) > 0: all_predictions = compute_ranking_predictions( svd, train_data, usercol=params["usercol"], itemcol=params["itemcol"], recommend_seen=params["recommend_seen"], ) k = params["k"] for metric in ranking_metrics: result = getattr(evaluation, metric)( validation_data, all_predictions, col_prediction="prediction", k=k ) logger.debug("%s@%d = %g", metric, k, result) if metric == params["primary_metric"]: metrics_dict["default"] = result else: metrics_dict[metric] = result if len(ranking_metrics) == 0 and len(rating_metrics) == 0: raise ValueError("No metrics were specified.") # Report the metrics nni.report_final_result(metrics_dict) # Save the metrics in a JSON file output_dir = os.environ.get("NNI_OUTPUT_DIR") with open(os.path.join(output_dir, "metrics.json"), "w") as fp: temp_dict = metrics_dict.copy() temp_dict[params["primary_metric"]] = temp_dict.pop("default") json.dump(temp_dict, fp) return svd
https://github.com/microsoft/recommenders/issues/746
tests/integration/test_movielens.py ......... [ 62%] tests/integration/test_notebooks_python.py .....F [100%] =================================== FAILURES =================================== _____________________________ test_nni_tuning_svd ______________________________ notebooks = {'als_deep_dive': '/data/home/recocat/cicd/7/s/notebooks/02_model/als_deep_dive.ipynb', 'als_pyspark': '/data/home/rec...baseline_deep_dive.ipynb', 'data_split': '/data/home/recocat/cicd/7/s/notebooks/01_prepare_data/data_split.ipynb', ...} tmp = '/tmp/pytest-of-recocat/pytest-200/tmpuj9_hwzq' @pytest.mark.integration def test_nni_tuning_svd(notebooks, tmp): notebook_path = notebooks["nni_tuning_svd"] # First stop NNI in case it is running subprocess.run([sys.prefix + '/bin/nnictl', 'stop']) check_stopped() pm.execute_notebook(notebook_path, OUTPUT_NOTEBOOK, kernel_name=KERNEL_NAME, parameters=dict(MOVIELENS_DATA_SIZE="100k", SURPRISE_READER="ml-100k", TMP_DIR=tmp, MAX_TRIAL_NUM=1, NUM_EPOCHS=1)) tests/integration/test_notebooks_python.py:164: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ /anaconda/envs/nightly_reco_base/lib/python3.6/site-packages/papermill/execute.py:94: in execute_notebook raise_for_execution_errors(nb, output_path) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ nb = {'cells': [{'cell_type': 'code', 'metadata': {'inputHidden': True, 'hide_input': True}, 'execution_count': None, 'sour...d_time': '2019-04-18T12:09:01.272263', 'duration': 345.187067, 'exception': True}}, 'nbformat': 4, 'nbformat_minor': 2} output_path = 'output.ipynb' def raise_for_execution_errors(nb, output_path): """Assigned parameters into the appropriate place in the input notebook Parameters ---------- nb : NotebookNode Executable notebook object output_path : str Path to write executed notebook """ error = None for cell in nb.cells: if cell.get("outputs") is None: continue for output in cell.outputs: if output.output_type == "error": error = PapermillExecutionError( exec_count=cell.execution_count, source=cell.source, ename=output.ename, evalue=output.evalue, traceback=output.traceback, ) break if error: # Write notebook back out with the Error Message at the top of the Notebook. error_msg = ERROR_MESSAGE_TEMPLATE % str(error.exec_count) error_msg_cell = nbformat.v4.new_code_cell( source="%%html\n" + error_msg, outputs=[ nbformat.v4.new_output(output_type="display_data", data={"text/html": error_msg}) ], metadata={"inputHidden": True, "hide_input": True}, ) nb.cells = [error_msg_cell] + nb.cells write_ipynb(nb, output_path) raise error E papermill.exceptions.PapermillExecutionError: E --------------------------------------------------------------------------- E Exception encountered at "In [32]": E --------------------------------------------------------------------------- E RuntimeError Traceback (most recent call last) E <ipython-input-32-120fb8357fc9> in <module> E 1 t = time.time() E ----> 2 stop_and_restart() E 3 time_smac = time.time() - t E E <ipython-input-20-609ee7726ae6> in stop_and_restart() E 6 proc = subprocess.run([sys.prefix + '/bin/nnictl', 'create', '--config', config_path]) E 7 if proc.returncode != 0: E ----> 8 raise RuntimeError("'nnictl create' failed with code %d" % proc.returncode) E 9 check_experiment_status(wait=WAITING_TIME, max_retries=MAX_RETRIES) E E RuntimeError: 'nnictl create' failed with code 1 /anaconda/envs/nightly_reco_base/lib/python3.6/site-packages/papermill/execute.py:241: PapermillExecutionError
PapermillExecutionError
def get_params(): parser = argparse.ArgumentParser() # Data path parser.add_argument( "--datastore", type=str, dest="datastore", help="Datastore path" ) parser.add_argument("--train-datapath", type=str, dest="train_datapath") parser.add_argument("--validation-datapath", type=str, dest="validation_datapath") parser.add_argument("--surprise-reader", type=str, dest="surprise_reader") parser.add_argument("--usercol", type=str, dest="usercol", default="userID") parser.add_argument("--itemcol", type=str, dest="itemcol", default="itemID") # Metrics parser.add_argument( "--rating-metrics", type=str, nargs="*", dest="rating_metrics", default=[] ) parser.add_argument( "--ranking-metrics", type=str, nargs="*", dest="ranking_metrics", default=[] ) parser.add_argument("--k", type=int, dest="k", default=None) parser.add_argument("--remove-seen", dest="remove_seen", action="store_false") # Training parameters parser.add_argument("--random-state", type=int, dest="random_state", default=0) parser.add_argument("--verbose", dest="verbose", action="store_true") parser.add_argument("--epochs", type=int, dest="n_epochs", default=30) parser.add_argument("--biased", dest="biased", action="store_true") parser.add_argument("--primary-metric", dest="primary_metric", default="rmse") # Hyperparameters to be tuned parser.add_argument("--n_factors", type=int, dest="n_factors", default=100) parser.add_argument("--init_mean", type=float, dest="init_mean", default=0.0) parser.add_argument("--init_std_dev", type=float, dest="init_std_dev", default=0.1) parser.add_argument("--lr_all", type=float, dest="lr_all", default=0.005) parser.add_argument("--reg_all", type=float, dest="reg_all", default=0.02) parser.add_argument("--lr_bu", type=float, dest="lr_bu", default=None) parser.add_argument("--lr_bi", type=float, dest="lr_bi", default=None) parser.add_argument("--lr_pu", type=float, dest="lr_pu", default=None) parser.add_argument("--lr_qi", type=float, dest="lr_qi", default=None) parser.add_argument("--reg_bu", type=float, dest="reg_bu", default=None) parser.add_argument("--reg_bi", type=float, dest="reg_bi", default=None) parser.add_argument("--reg_pu", type=float, dest="reg_pu", default=None) parser.add_argument("--reg_qi", type=float, dest="reg_qi", default=None) args = parser.parse_args() return args
def get_params(): parser = argparse.ArgumentParser() # Data path parser.add_argument( "--datastore", type=str, dest="datastore", help="Datastore path" ) parser.add_argument("--train-datapath", type=str, dest="train_datapath") parser.add_argument("--validation-datapath", type=str, dest="validation_datapath") parser.add_argument("--surprise-reader", type=str, dest="surprise_reader") parser.add_argument("--usercol", type=str, dest="usercol", default="userID") parser.add_argument("--itemcol", type=str, dest="itemcol", default="itemID") # Metrics parser.add_argument( "--rating-metrics", type=str, nargs="*", dest="rating_metrics", default=[] ) parser.add_argument( "--ranking-metrics", type=str, nargs="*", dest="ranking_metrics", default=[] ) parser.add_argument("--k", type=int, dest="k", default=None) parser.add_argument("--recommend-seen", dest="recommend_seen", action="store_true") # Training parameters parser.add_argument("--random-state", type=int, dest="random_state", default=0) parser.add_argument("--verbose", dest="verbose", action="store_true") parser.add_argument("--epochs", type=int, dest="n_epochs", default=30) parser.add_argument("--biased", dest="biased", action="store_true") parser.add_argument("--primary-metric", dest="primary_metric", default="rmse") # Hyperparameters to be tuned parser.add_argument("--n_factors", type=int, dest="n_factors", default=100) parser.add_argument("--init_mean", type=float, dest="init_mean", default=0.0) parser.add_argument("--init_std_dev", type=float, dest="init_std_dev", default=0.1) parser.add_argument("--lr_all", type=float, dest="lr_all", default=0.005) parser.add_argument("--reg_all", type=float, dest="reg_all", default=0.02) parser.add_argument("--lr_bu", type=float, dest="lr_bu", default=None) parser.add_argument("--lr_bi", type=float, dest="lr_bi", default=None) parser.add_argument("--lr_pu", type=float, dest="lr_pu", default=None) parser.add_argument("--lr_qi", type=float, dest="lr_qi", default=None) parser.add_argument("--reg_bu", type=float, dest="reg_bu", default=None) parser.add_argument("--reg_bi", type=float, dest="reg_bi", default=None) parser.add_argument("--reg_pu", type=float, dest="reg_pu", default=None) parser.add_argument("--reg_qi", type=float, dest="reg_qi", default=None) args = parser.parse_args() return args
https://github.com/microsoft/recommenders/issues/746
tests/integration/test_movielens.py ......... [ 62%] tests/integration/test_notebooks_python.py .....F [100%] =================================== FAILURES =================================== _____________________________ test_nni_tuning_svd ______________________________ notebooks = {'als_deep_dive': '/data/home/recocat/cicd/7/s/notebooks/02_model/als_deep_dive.ipynb', 'als_pyspark': '/data/home/rec...baseline_deep_dive.ipynb', 'data_split': '/data/home/recocat/cicd/7/s/notebooks/01_prepare_data/data_split.ipynb', ...} tmp = '/tmp/pytest-of-recocat/pytest-200/tmpuj9_hwzq' @pytest.mark.integration def test_nni_tuning_svd(notebooks, tmp): notebook_path = notebooks["nni_tuning_svd"] # First stop NNI in case it is running subprocess.run([sys.prefix + '/bin/nnictl', 'stop']) check_stopped() pm.execute_notebook(notebook_path, OUTPUT_NOTEBOOK, kernel_name=KERNEL_NAME, parameters=dict(MOVIELENS_DATA_SIZE="100k", SURPRISE_READER="ml-100k", TMP_DIR=tmp, MAX_TRIAL_NUM=1, NUM_EPOCHS=1)) tests/integration/test_notebooks_python.py:164: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ /anaconda/envs/nightly_reco_base/lib/python3.6/site-packages/papermill/execute.py:94: in execute_notebook raise_for_execution_errors(nb, output_path) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ nb = {'cells': [{'cell_type': 'code', 'metadata': {'inputHidden': True, 'hide_input': True}, 'execution_count': None, 'sour...d_time': '2019-04-18T12:09:01.272263', 'duration': 345.187067, 'exception': True}}, 'nbformat': 4, 'nbformat_minor': 2} output_path = 'output.ipynb' def raise_for_execution_errors(nb, output_path): """Assigned parameters into the appropriate place in the input notebook Parameters ---------- nb : NotebookNode Executable notebook object output_path : str Path to write executed notebook """ error = None for cell in nb.cells: if cell.get("outputs") is None: continue for output in cell.outputs: if output.output_type == "error": error = PapermillExecutionError( exec_count=cell.execution_count, source=cell.source, ename=output.ename, evalue=output.evalue, traceback=output.traceback, ) break if error: # Write notebook back out with the Error Message at the top of the Notebook. error_msg = ERROR_MESSAGE_TEMPLATE % str(error.exec_count) error_msg_cell = nbformat.v4.new_code_cell( source="%%html\n" + error_msg, outputs=[ nbformat.v4.new_output(output_type="display_data", data={"text/html": error_msg}) ], metadata={"inputHidden": True, "hide_input": True}, ) nb.cells = [error_msg_cell] + nb.cells write_ipynb(nb, output_path) raise error E papermill.exceptions.PapermillExecutionError: E --------------------------------------------------------------------------- E Exception encountered at "In [32]": E --------------------------------------------------------------------------- E RuntimeError Traceback (most recent call last) E <ipython-input-32-120fb8357fc9> in <module> E 1 t = time.time() E ----> 2 stop_and_restart() E 3 time_smac = time.time() - t E E <ipython-input-20-609ee7726ae6> in stop_and_restart() E 6 proc = subprocess.run([sys.prefix + '/bin/nnictl', 'create', '--config', config_path]) E 7 if proc.returncode != 0: E ----> 8 raise RuntimeError("'nnictl create' failed with code %d" % proc.returncode) E 9 check_experiment_status(wait=WAITING_TIME, max_retries=MAX_RETRIES) E E RuntimeError: 'nnictl create' failed with code 1 /anaconda/envs/nightly_reco_base/lib/python3.6/site-packages/papermill/execute.py:241: PapermillExecutionError
PapermillExecutionError
def __init__(self, hparams, iterator_creator, graph=None): """ Initializing the model. Create common logics which are needed by all deeprec models, such as loss function, parameter set. :param hparams: a tf.contrib.training.HParams object, hold the entire set of hyperparameters. """ if not graph: self.graph = tf.Graph() else: self.graph = graph self.iterator = iterator_creator(hparams, self.graph) with self.graph.as_default(): self.hparams = hparams self.layer_params = [] self.embed_params = [] self.cross_params = [] self.layer_keeps = tf.placeholder(tf.float32, name="layer_keeps") self.keep_prob_train = None self.keep_prob_test = None self.is_train_stage = tf.placeholder(tf.bool, shape=(), name="is_training") self.initializer = self._get_initializer() self.logit = self._build_graph() self.pred = self._get_pred(self.logit, self.hparams.method) self.loss = self._get_loss() self.saver = tf.train.Saver(max_to_keep=self.hparams.epochs) self.update = self._build_train_opt() self.init_op = tf.global_variables_initializer() self.merged = self._add_summaries() # set GPU use with demand growth gpu_options = tf.GPUOptions(allow_growth=True) self.sess = tf.Session( graph=self.graph, config=tf.ConfigProto(gpu_options=gpu_options) ) self.sess.run(self.init_op)
def __init__(self, hparams, iterator_creator, graph=None): """ Initializing the model. Create common logics which are needed by all deeprec models, such as loss function, parameter set. :param hparams: a tf.contrib.training.HParams object, hold the entire set of hyperparameters. """ if not graph: self.graph = tf.Graph() else: self.graph = graph self.iterator = iterator_creator(hparams, self.graph) with self.graph.as_default(): self.hparams = hparams self.layer_params = [] self.embed_params = [] self.cross_params = [] self.layer_keeps = tf.placeholder(tf.float32, name="layer_keeps") self.keep_prob_train = None self.keep_prob_test = None self.is_train_stage = tf.placeholder(tf.bool, shape=(), name="is_training") self.initializer = self._get_initializer() self.logit = self._build_graph() self.pred = self._get_pred(self.logit, self.hparams.method) self.loss = self._get_loss() self.saver = tf.train.Saver(max_to_keep=self.hparams.epochs) self.update = self._build_train_opt() self.init_op = tf.global_variables_initializer() self.merged = self._add_summaries() self.sess = tf.Session(graph=self.graph) self.sess.run(self.init_op)
https://github.com/microsoft/recommenders/issues/478
2019-01-31T18:59:24.6946026Z tests/smoke/test_deeprec_model.py ..FF [ 57%] 2019-01-31T19:00:09.5820104Z tests/smoke/test_notebooks_gpu.py ..F [100%] 2019-01-31T19:00:09.5820400Z 2019-01-31T19:00:09.5823428Z =================================== FAILURES =================================== 2019-01-31T19:00:09.5824122Z ____________________________ test_notebook_xdeepfm _____________________________ 2019-01-31T19:00:09.5824306Z 2019-01-31T19:00:09.5825291Z notebooks = {'als_deep_dive': '/data/home/recocat/cicd/18/s/notebooks/02_model/als_deep_dive.ipynb', 'als_pyspark': '/data/home/re...aseline_deep_dive.ipynb', 'data_split': '/data/home/recocat/cicd/18/s/notebooks/01_prepare_data/data_split.ipynb', ...} 2019-01-31T19:00:09.5825418Z 2019-01-31T19:00:09.5825517Z @pytest.mark.smoke 2019-01-31T19:00:09.5825559Z @pytest.mark.gpu 2019-01-31T19:00:09.5825598Z @pytest.mark.deeprec 2019-01-31T19:00:09.5825994Z def test_notebook_xdeepfm(notebooks): 2019-01-31T19:00:09.5826074Z notebook_path = notebooks["xdeepfm_quickstart"] 2019-01-31T19:00:09.5828021Z pm.execute_notebook( 2019-01-31T19:00:09.5828205Z notebook_path, 2019-01-31T19:00:09.5828310Z OUTPUT_NOTEBOOK, 2019-01-31T19:00:09.5828356Z kernel_name=KERNEL_NAME, 2019-01-31T19:00:09.5828405Z > parameters=dict(epochs_for_synthetic_run=20, epochs_for_criteo_run=1), 2019-01-31T19:00:09.5828507Z ) 2019-01-31T19:00:09.5828539Z 2019-01-31T19:00:09.5828581Z tests/smoke/test_deeprec_model.py:72: 2019-01-31T19:00:09.5828670Z _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 2019-01-31T19:00:09.5829162Z /anaconda/envs/nightly_gpu/lib/python3.6/site-packages/papermill/execute.py:78: in execute_notebook 2019-01-31T19:00:09.5829243Z raise_for_execution_errors(nb, output_path) 2019-01-31T19:00:09.5829344Z _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 2019-01-31T19:00:09.5829380Z 2019-01-31T19:00:09.5829784Z nb = {'cells': [{'cell_type': 'code', 'metadata': {'inputHidden': True, 'hide_input': True}, 'execution_count': None, 'sour...d_time': '2019-01-31T18:55:44.346403', 'duration': 139.097582, 'exception': True}}, 'nbformat': 4, 'nbformat_minor': 2} 2019-01-31T19:00:09.5830271Z output_path = 'output.ipynb' 2019-01-31T19:00:09.5830308Z 2019-01-31T19:00:09.5830392Z def raise_for_execution_errors(nb, output_path): 2019-01-31T19:00:09.5830438Z error = None 2019-01-31T19:00:09.5830638Z for cell in nb.cells: 2019-01-31T19:00:09.5832196Z if cell.get("outputs") is None: 2019-01-31T19:00:09.5832399Z continue 2019-01-31T19:00:09.5832445Z 2019-01-31T19:00:09.5832809Z for output in cell.outputs: 2019-01-31T19:00:09.5833664Z if output.output_type == "error": 2019-01-31T19:00:09.5833911Z error = PapermillExecutionError( 2019-01-31T19:00:09.5835901Z exec_count=cell.execution_count, 2019-01-31T19:00:09.5835980Z source=cell.source, 2019-01-31T19:00:09.5836098Z ename=output.ename, 2019-01-31T19:00:09.5836143Z evalue=output.evalue, 2019-01-31T19:00:09.5836189Z traceback=output.traceback, 2019-01-31T19:00:09.5836276Z ) 2019-01-31T19:00:09.5836318Z break 2019-01-31T19:00:09.5836357Z 2019-01-31T19:00:09.5836435Z if error: 2019-01-31T19:00:09.5836483Z # Write notebook back out with the Error Message at the top of the Notebook. 2019-01-31T19:00:09.5842777Z error_msg = ERROR_MESSAGE_TEMPLATE % str(error.exec_count) 2019-01-31T19:00:09.5843714Z error_msg_cell = nbformat.v4.new_code_cell( 2019-01-31T19:00:09.5843767Z source="%%html\n" + error_msg, 2019-01-31T19:00:09.5843859Z outputs=[ 2019-01-31T19:00:09.5843909Z nbformat.v4.new_output(output_type="display_data", data={"text/html": error_msg}) 2019-01-31T19:00:09.5843958Z ], 2019-01-31T19:00:09.5844056Z metadata={"inputHidden": True, "hide_input": True}, 2019-01-31T19:00:09.5844103Z ) 2019-01-31T19:00:09.5844146Z nb.cells = [error_msg_cell] + nb.cells 2019-01-31T19:00:09.5844232Z write_ipynb(nb, output_path) 2019-01-31T19:00:09.5844276Z > raise error 2019-01-31T19:00:09.5844320Z E papermill.exceptions.PapermillExecutionError: 2019-01-31T19:00:09.5844751Z E --------------------------------------------------------------------------- 2019-01-31T19:00:09.5844808Z E Exception encountered at "In [12]": 2019-01-31T19:00:09.5845084Z E --------------------------------------------------------------------------- 2019-01-31T19:00:09.5845141Z E ResourceExhaustedError Traceback (most recent call last) 2019-01-31T19:00:09.5845399Z E /anaconda/envs/nightly_gpu/lib/python3.6/site-packages/tensorflow/python/client/session.py in _do_call(self, fn, *args) 2019-01-31T19:00:09.5845502Z E 1333 try: 2019-01-31T19:00:09.5845686Z E -> 1334 return fn(*args) 2019-01-31T19:00:09.5845736Z E 1335 except errors.OpError as e: 2019-01-31T19:00:09.5845824Z E 2019-01-31T19:00:09.5846113Z E /anaconda/envs/nightly_gpu/lib/python3.6/site-packages/tensorflow/python/client/session.py in _run_fn(feed_dict, fetch_list, target_list, options, run_metadata) 2019-01-31T19:00:09.5846215Z E 1318 return self._call_tf_sessionrun( 2019-01-31T19:00:09.5846433Z E -> 1319 options, feed_dict, fetch_list, target_list, run_metadata) 2019-01-31T19:00:09.5846492Z E 1320 2019-01-31T19:00:09.5846573Z E 2019-01-31T19:00:09.5846872Z E /anaconda/envs/nightly_gpu/lib/python3.6/site-packages/tensorflow/python/client/session.py in _call_tf_sessionrun(self, options, feed_dict, fetch_list, target_list, run_metadata) 2019-01-31T19:00:09.5846939Z E 1406 self._session, options, feed_dict, fetch_list, target_list, 2019-01-31T19:00:09.5847322Z E -> 1407 run_metadata) 2019-01-31T19:00:09.5847371Z E 1408 2019-01-31T19:00:09.5847413Z E 2019-01-31T19:00:09.5847545Z E ResourceExhaustedError: OOM when allocating tensor of shape [2300000,10] and type float 2019-01-31T19:00:09.5847621Z E [[{{node XDeepFM/embedding/embedding_layer/Adam/Initializer/zeros}} = Const[dtype=DT_FLOAT, value=Tensor<type: float shape: [2300000,10] values: [0 0 0...]...>, _device="/job:localhost/replica:0/task:0/device:GPU:0"]()]] 2019-01-31T19:00:09.5847730Z E
PapermillExecutionError
def install_prerequisites(): # pre-requisites for bench repo cloning run_os_command( { "apt-get": [ "sudo apt-get update", "sudo apt-get install -y git build-essential python3-setuptools python3-dev libffi-dev", ], "yum": [ 'sudo yum groupinstall -y "Development tools"', "sudo yum install -y epel-release redhat-lsb-core git python-setuptools python-devel openssl-devel libffi-devel", ], } ) install_package("curl") install_package("wget") install_package("git") install_package("pip3", "python3-pip") success = run_os_command( { "python3": "sudo -H python3 -m pip install --upgrade setuptools wheel cryptography ansible==2.8.5 pip" } ) if not (success or shutil.which("ansible")): could_not_install("Ansible")
def install_prerequisites(): # pre-requisites for bench repo cloning run_os_command( { "apt-get": [ "sudo apt-get update", "sudo apt-get install -y git build-essential python3-setuptools python3-dev libffi-dev", ], "yum": [ 'sudo yum groupinstall -y "Development tools"', "sudo yum install -y epel-release redhat-lsb-core git python-setuptools python-devel openssl-devel libffi-devel", ], } ) install_package("curl") install_package("wget") install_package("git") install_package("pip3", "python3-pip") success = run_os_command( { "python3": "sudo -H python3 -m pip install --upgrade setuptools cryptography ansible==2.8.5 pip" } ) if not (success or shutil.which("ansible")): could_not_install("Ansible")
https://github.com/frappe/bench/issues/1059
Checking System Compatibility... ubuntu 18 is compatible! Hit:1 http://mirror.hetzner.de/ubuntu/packages bionic InRelease Hit:2 http://mirror.hetzner.de/ubuntu/packages bionic-updates InRelease Hit:3 http://mirror.hetzner.de/ubuntu/packages bionic-backports InRelease Hit:4 http://mirror.hetzner.de/ubuntu/packages bionic-security InRelease Get:5 http://security.ubuntu.com/ubuntu bionic-security InRelease [88.7 kB] Hit:6 http://archive.ubuntu.com/ubuntu bionic InRelease Hit:7 http://archive.ubuntu.com/ubuntu bionic-updates InRelease Hit:8 http://archive.ubuntu.com/ubuntu bionic-backports InRelease Fetched 88.7 kB in 2s (59.1 kB/s) Reading package lists... Done Reading package lists... Done Building dependency tree Reading state information... Done build-essential is already the newest version (12.4ubuntu1). libffi-dev is already the newest version (3.2.1-8). python3-setuptools is already the newest version (39.0.1-2). git is already the newest version (1:2.17.1-1ubuntu0.7). python3-dev is already the newest version (3.6.7-1~18.04). 0 upgraded, 0 newly installed, 0 to remove and 0 not upgraded. curl already installed! wget already installed! git already installed! pip3 already installed! Requirement already up-to-date: setuptools in /usr/local/lib/python3.6/dist-packages (50.0.0) Requirement already up-to-date: cryptography in /usr/local/lib/python3.6/dist-packages (3.1) Collecting ansible==2.8.5 Using cached ansible-2.8.5.tar.gz (14.4 MB) Requirement already up-to-date: pip in /usr/local/lib/python3.6/dist-packages (20.2.2) Requirement already satisfied, skipping upgrade: six>=1.4.1 in /usr/local/lib/python3.6/dist-packages (from cryptography) (1.15.0) Requirement already satisfied, skipping upgrade: cffi!=1.11.3,>=1.8 in /usr/local/lib/python3.6/dist-packages (from cryptography) (1.14.2) Requirement already satisfied, skipping upgrade: jinja2 in /usr/local/lib/python3.6/dist-packages (from ansible==2.8.5) (2.11.2) Requirement already satisfied, skipping upgrade: PyYAML in /usr/lib/python3/dist-packages (from ansible==2.8.5) (3.12) Requirement already satisfied, skipping upgrade: pycparser in /usr/local/lib/python3.6/dist-packages (from cffi!=1.11.3,>=1.8->cryptography) (2.20) Requirement already satisfied, skipping upgrade: MarkupSafe>=0.23 in /usr/local/lib/python3.6/dist-packages (from jinja2->ansible==2.8.5) (1.1.1) Using legacy 'setup.py install' for ansible, since package 'wheel' is not installed. Installing collected packages: ansible Running setup.py install for ansible ... done Successfully installed ansible Traceback (most recent call last): File "/usr/bin/ansible-playbook", line 32, in <module> from ansible import context ModuleNotFoundError: No module named 'ansible' Traceback (most recent call last): File "install.py", line 437, in <module> install_bench(args) File "install.py", line 211, in install_bench run_playbook('create_user.yml', extra_vars=extra_vars) File "install.py", line 362, in run_playbook success = subprocess.check_call(args, cwd=os.path.join(cwd, 'playbooks'), stdout=log_stream, stderr=sys.stderr) File "/usr/lib/python3.6/subprocess.py", line 311, in check_call raise CalledProcessError(retcode, cmd) subprocess.CalledProcessError: Command '['ansible-playbook', '-c', 'local', 'create_user.yml', '-vvvv', '-e', '@/tmp/extra_vars.json']' returned non-zero exit status 1.
ModuleNotFoundError
def update_checkpoint_dependencies(self, jobs=None): """Update dependencies of checkpoints.""" updated = False self.update_checkpoint_outputs() if jobs is None: jobs = [job for job in self.jobs if not self.needrun(job)] for job in jobs: if job.is_checkpoint: depending = list(self.depending[job]) # re-evaluate depending jobs, replace and update DAG for j in depending: logger.info("Updating job {}.".format(j)) newjob = j.updated() self.replace_job(j, newjob, recursive=False) updated = True if updated: self.postprocess() return updated
def update_checkpoint_dependencies(self, jobs=None): """Update dependencies of checkpoints.""" updated = False self.update_checkpoint_outputs() if jobs is None: jobs = [job for job in self.jobs if not self.needrun(job)] for job in jobs: if job.is_checkpoint: depending = list(self.depending[job]) # re-evaluate depending jobs, replace and update DAG for j in depending: logger.info("Updating job {} ({}).".format(self.jobid(j), j)) newjob = j.updated() self.replace_job(j, newjob, recursive=False) updated = True if updated: self.postprocess() return updated
https://github.com/snakemake/snakemake/issues/817
Traceback (most recent call last): File "/mnt/data0/jmeppley/projects/snakemake_bugs/snakemake.272/DTR-phage-pipeline/snakemake/snakemake/__init__.py", line 687, in snakemake success = workflow.execute( File "/mnt/data0/jmeppley/projects/snakemake_bugs/snakemake.272/DTR-phage-pipeline/snakemake/snakemake/workflow.py", line 699, in execute dag.update_checkpoint_dependencies() File "/mnt/data0/jmeppley/projects/snakemake_bugs/snakemake.272/DTR-phage-pipeline/snakemake/snakemake/dag.py", line 1300, in update_checkpoint_dependencies logger.info("Updating job {} ({}).".format(self.jobid(j), j)) File "/mnt/data0/jmeppley/projects/snakemake_bugs/snakemake.272/DTR-phage-pipeline/snakemake/snakemake/dag.py", line 704, in jobid return self._jobid[job] KeyError: align_dtr_seqs_to_polished_refs
KeyError
def __init__( self, workflow, rules=None, dryrun=False, targetfiles=None, targetrules=None, forceall=False, forcerules=None, forcefiles=None, priorityfiles=None, priorityrules=None, untilfiles=None, untilrules=None, omitfiles=None, omitrules=None, ignore_ambiguity=False, force_incomplete=False, ignore_incomplete=False, notemp=False, keep_remote_local=False, batch=None, ): self.dryrun = dryrun self.dependencies = defaultdict(partial(defaultdict, set)) self.depending = defaultdict(partial(defaultdict, set)) self._needrun = set() self._priority = dict() self._reason = defaultdict(Reason) self._finished = set() self._dynamic = set() self._len = 0 self.workflow = workflow self.rules = set(rules) self.ignore_ambiguity = ignore_ambiguity self.targetfiles = targetfiles self.targetrules = targetrules self.priorityfiles = priorityfiles self.priorityrules = priorityrules self.targetjobs = set() self.prioritytargetjobs = set() self._ready_jobs = set() self.notemp = notemp self.keep_remote_local = keep_remote_local self._jobid = dict() self.job_cache = dict() self.conda_envs = dict() self.container_imgs = dict() self._progress = 0 self._group = dict() self._n_until_ready = defaultdict(int) self._running = set() self.job_factory = JobFactory() self.group_job_factory = GroupJobFactory() self.forcerules = set() self.forcefiles = set() self.untilrules = set() self.untilfiles = set() self.omitrules = set() self.omitfiles = set() self.updated_subworkflow_files = set() if forceall: self.forcerules.update(self.rules) elif forcerules: self.forcerules.update(forcerules) if forcefiles: self.forcefiles.update(forcefiles) if untilrules: self.untilrules.update(set(rule.name for rule in untilrules)) if untilfiles: self.untilfiles.update(untilfiles) if omitrules: self.omitrules.update(set(rule.name for rule in omitrules)) if omitfiles: self.omitfiles.update(omitfiles) self.has_dynamic_rules = any(rule.dynamic_output for rule in self.rules) self.omitforce = set() self.batch = batch if batch is not None and not batch.is_final: # Since not all input files of a batching rule are considered, we cannot run # beyond that rule. # For the final batch, we do not need to omit anything. self.omitrules.add(batch.rulename) self.force_incomplete = force_incomplete self.ignore_incomplete = ignore_incomplete self.periodic_wildcard_detector = PeriodicityDetector() self.update_output_index()
def __init__( self, workflow, rules=None, dryrun=False, targetfiles=None, targetrules=None, forceall=False, forcerules=None, forcefiles=None, priorityfiles=None, priorityrules=None, untilfiles=None, untilrules=None, omitfiles=None, omitrules=None, ignore_ambiguity=False, force_incomplete=False, ignore_incomplete=False, notemp=False, keep_remote_local=False, batch=None, ): self.dryrun = dryrun self.dependencies = defaultdict(partial(defaultdict, set)) self.depending = defaultdict(partial(defaultdict, set)) self._needrun = set() self._priority = dict() self._reason = defaultdict(Reason) self._finished = set() self._dynamic = set() self._len = 0 self.workflow = workflow self.rules = set(rules) self.ignore_ambiguity = ignore_ambiguity self.targetfiles = targetfiles self.targetrules = targetrules self.priorityfiles = priorityfiles self.priorityrules = priorityrules self.targetjobs = set() self.prioritytargetjobs = set() self._ready_jobs = set() self.notemp = notemp self.keep_remote_local = keep_remote_local self._jobid = dict() self.job_cache = dict() self.conda_envs = dict() self.container_imgs = dict() self._progress = 0 self._group = dict() self._n_until_ready = defaultdict(int) self.job_factory = JobFactory() self.group_job_factory = GroupJobFactory() self.forcerules = set() self.forcefiles = set() self.untilrules = set() self.untilfiles = set() self.omitrules = set() self.omitfiles = set() self.updated_subworkflow_files = set() if forceall: self.forcerules.update(self.rules) elif forcerules: self.forcerules.update(forcerules) if forcefiles: self.forcefiles.update(forcefiles) if untilrules: self.untilrules.update(set(rule.name for rule in untilrules)) if untilfiles: self.untilfiles.update(untilfiles) if omitrules: self.omitrules.update(set(rule.name for rule in omitrules)) if omitfiles: self.omitfiles.update(omitfiles) self.has_dynamic_rules = any(rule.dynamic_output for rule in self.rules) self.omitforce = set() self.batch = batch if batch is not None and not batch.is_final: # Since not all input files of a batching rule are considered, we cannot run # beyond that rule. # For the final batch, we do not need to omit anything. self.omitrules.add(batch.rulename) self.force_incomplete = force_incomplete self.ignore_incomplete = ignore_incomplete self.periodic_wildcard_detector = PeriodicityDetector() self.update_output_index()
https://github.com/snakemake/snakemake/issues/806
Building DAG of jobs... Using shell: /usr/bin/bash Provided cores: 4 Rules claiming more threads will be scaled down. Job counts: count jobs 1 all 1 corpus 2 shard 4 [Wed Dec 16 11:46:00 2020] checkpoint shard: output: data/batches.es jobid: 2 wildcards: lang=es Downstream jobs will be updated after completion. [Wed Dec 16 11:46:00 2020] checkpoint shard: output: data/batches.en jobid: 3 wildcards: lang=en Downstream jobs will be updated after completion. Updating job 1 (corpus). Updating job 0 (all). [Wed Dec 16 11:46:36 2020] Finished job 2. 1 of 4 steps (25%) done [Wed Dec 16 11:46:36 2020] checkpoint shard: output: data/batches.en jobid: 3 wildcards: lang=en Downstream jobs will be updated after completion. Updating job 4 (corpus). Updating job 5 (all). [Wed Dec 16 11:47:00 2020] Finished job 3. 2 of 40 steps (5%) done Traceback (most recent call last): File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/__init__.py", line 687, in snakemake success = workflow.execute( File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/workflow.py", line 1006, in execute success = scheduler.schedule() File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/scheduler.py", line 470, in schedule run = self.job_selector(needrun) File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/scheduler.py", line 764, in job_selector_greedy c = list(map(self.job_reward, jobs)) # job rewards File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/scheduler.py", line 847, in job_reward input_size = job.inputsize File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/jobs.py", line 378, in inputsize self._inputsize = sum(f.size for f in self.input) File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/jobs.py", line 378, in <genexpr> self._inputsize = sum(f.size for f in self.input) File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/io.py", line 242, in wrapper return func(self, *args, **kwargs) File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/io.py", line 257, in wrapper return func(self, *args, **kwargs) File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/io.py", line 556, in size return self.size_local File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/io.py", line 561, in size_local self.check_broken_symlink() File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/io.py", line 566, in check_broken_symlink if not self.exists_local and os.lstat(self.file): FileNotFoundError: [Errno 2] No such file or directory: 'data/corpus.txt' [Wed Dec 16 11:47:36 2020] Error in rule shard: jobid: 3 output: data/batches.en shell: for i in $(seq 0 $(( 2 % 5 + 1))); do # 0 .. 3 -> 4 for j in $(seq 0 $(( 1 % 2 + 1))); do # 0 .. 2 -> 3 mkdir -p data/en/$i/$j echo 'hello en' > data/en/$i/$j/text if [[ "en" == "es" ]]; then sleep 3 else sleep 5 fi done done ls -d data/en/*/* > data/batches.en (one of the commands exited with non-zero exit code; note that snakemake uses bash strict mode!) Removing output files of failed job shard since they might be corrupted: data/batches.en exception calling callback for <Future at 0x7f745430c3a0 state=finished raised RuleException> Traceback (most recent call last): File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/executors/__init__.py", line 2318, in run_wrapper run( File "/home/cristian/Documentos/bitextor/files/tmp/min_snakemake_5_30_2/Snakefile", line 37, in __rule_shard checkpoints.shard.get(lang=trg_lang).output[0].open() as trg_f: File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/shell.py", line 202, in __new__ del cls._processes[jobid] KeyError: 3 During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/executors/__init__.py", line 560, in _callback raise ex File "/usr/lib/python3.8/concurrent/futures/thread.py", line 57, in run result = self.fn(*self.args, **self.kwargs) File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/executors/__init__.py", line 546, in cached_or_run run_func(*args) File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/executors/__init__.py", line 2349, in run_wrapper raise RuleException( snakemake.exceptions.RuleException: KeyError in line 11 of /home/cristian/Documentos/bitextor/files/tmp/min_snakemake_5_30_2/Snakefile: 3 File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/executors/__init__.py", line 2318, in run_wrapper File "/home/cristian/Documentos/bitextor/files/tmp/min_snakemake_5_30_2/Snakefile", line 11, in __rule_shard During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/usr/lib/python3.8/concurrent/futures/_base.py", line 328, in _invoke_callbacks callback(self) File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/executors/__init__.py", line 572, in _callback error_callback(job) File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/scheduler.py", line 579, in _error self._handle_error(job) File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/scheduler.py", line 589, in _handle_error self.running.remove(job) KeyError: shard
FileNotFoundError
def update_ready(self, jobs=None): """Update information whether a job is ready to execute. Given jobs must be needrun jobs! """ if jobs is None: jobs = self.needrun_jobs potential_new_ready_jobs = False candidate_groups = set() for job in jobs: if job in self._ready_jobs or job in self._running: # job has been seen before or is running, no need to process again continue if not self.finished(job) and self._ready(job): potential_new_ready_jobs = True if job.group is None: self._ready_jobs.add(job) else: group = self._group[job] group.finalize() candidate_groups.add(group) self._ready_jobs.update( group for group in candidate_groups if all(self._ready(job) for job in group) ) return potential_new_ready_jobs
def update_ready(self, jobs=None): """Update information whether a job is ready to execute. Given jobs must be needrun jobs! """ if jobs is None: jobs = self.needrun_jobs potential_new_ready_jobs = False candidate_groups = set() for job in jobs: if job in self._ready_jobs: # job has been seen before, no need to process again continue if not self.finished(job) and self._ready(job): potential_new_ready_jobs = True if job.group is None: self._ready_jobs.add(job) else: group = self._group[job] group.finalize() candidate_groups.add(group) self._ready_jobs.update( group for group in candidate_groups if all(self._ready(job) for job in group) ) return potential_new_ready_jobs
https://github.com/snakemake/snakemake/issues/806
Building DAG of jobs... Using shell: /usr/bin/bash Provided cores: 4 Rules claiming more threads will be scaled down. Job counts: count jobs 1 all 1 corpus 2 shard 4 [Wed Dec 16 11:46:00 2020] checkpoint shard: output: data/batches.es jobid: 2 wildcards: lang=es Downstream jobs will be updated after completion. [Wed Dec 16 11:46:00 2020] checkpoint shard: output: data/batches.en jobid: 3 wildcards: lang=en Downstream jobs will be updated after completion. Updating job 1 (corpus). Updating job 0 (all). [Wed Dec 16 11:46:36 2020] Finished job 2. 1 of 4 steps (25%) done [Wed Dec 16 11:46:36 2020] checkpoint shard: output: data/batches.en jobid: 3 wildcards: lang=en Downstream jobs will be updated after completion. Updating job 4 (corpus). Updating job 5 (all). [Wed Dec 16 11:47:00 2020] Finished job 3. 2 of 40 steps (5%) done Traceback (most recent call last): File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/__init__.py", line 687, in snakemake success = workflow.execute( File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/workflow.py", line 1006, in execute success = scheduler.schedule() File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/scheduler.py", line 470, in schedule run = self.job_selector(needrun) File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/scheduler.py", line 764, in job_selector_greedy c = list(map(self.job_reward, jobs)) # job rewards File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/scheduler.py", line 847, in job_reward input_size = job.inputsize File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/jobs.py", line 378, in inputsize self._inputsize = sum(f.size for f in self.input) File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/jobs.py", line 378, in <genexpr> self._inputsize = sum(f.size for f in self.input) File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/io.py", line 242, in wrapper return func(self, *args, **kwargs) File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/io.py", line 257, in wrapper return func(self, *args, **kwargs) File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/io.py", line 556, in size return self.size_local File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/io.py", line 561, in size_local self.check_broken_symlink() File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/io.py", line 566, in check_broken_symlink if not self.exists_local and os.lstat(self.file): FileNotFoundError: [Errno 2] No such file or directory: 'data/corpus.txt' [Wed Dec 16 11:47:36 2020] Error in rule shard: jobid: 3 output: data/batches.en shell: for i in $(seq 0 $(( 2 % 5 + 1))); do # 0 .. 3 -> 4 for j in $(seq 0 $(( 1 % 2 + 1))); do # 0 .. 2 -> 3 mkdir -p data/en/$i/$j echo 'hello en' > data/en/$i/$j/text if [[ "en" == "es" ]]; then sleep 3 else sleep 5 fi done done ls -d data/en/*/* > data/batches.en (one of the commands exited with non-zero exit code; note that snakemake uses bash strict mode!) Removing output files of failed job shard since they might be corrupted: data/batches.en exception calling callback for <Future at 0x7f745430c3a0 state=finished raised RuleException> Traceback (most recent call last): File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/executors/__init__.py", line 2318, in run_wrapper run( File "/home/cristian/Documentos/bitextor/files/tmp/min_snakemake_5_30_2/Snakefile", line 37, in __rule_shard checkpoints.shard.get(lang=trg_lang).output[0].open() as trg_f: File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/shell.py", line 202, in __new__ del cls._processes[jobid] KeyError: 3 During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/executors/__init__.py", line 560, in _callback raise ex File "/usr/lib/python3.8/concurrent/futures/thread.py", line 57, in run result = self.fn(*self.args, **self.kwargs) File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/executors/__init__.py", line 546, in cached_or_run run_func(*args) File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/executors/__init__.py", line 2349, in run_wrapper raise RuleException( snakemake.exceptions.RuleException: KeyError in line 11 of /home/cristian/Documentos/bitextor/files/tmp/min_snakemake_5_30_2/Snakefile: 3 File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/executors/__init__.py", line 2318, in run_wrapper File "/home/cristian/Documentos/bitextor/files/tmp/min_snakemake_5_30_2/Snakefile", line 11, in __rule_shard During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/usr/lib/python3.8/concurrent/futures/_base.py", line 328, in _invoke_callbacks callback(self) File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/executors/__init__.py", line 572, in _callback error_callback(job) File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/scheduler.py", line 579, in _error self._handle_error(job) File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/scheduler.py", line 589, in _handle_error self.running.remove(job) KeyError: shard
FileNotFoundError
def register_running(self, jobs): self._running.update(jobs) self._ready_jobs -= jobs for job in jobs: try: del self._n_until_ready[job] except KeyError: # already gone pass
def register_running(self, jobs): self._ready_jobs -= jobs for job in jobs: try: del self._n_until_ready[job] except KeyError: # already gone pass
https://github.com/snakemake/snakemake/issues/806
Building DAG of jobs... Using shell: /usr/bin/bash Provided cores: 4 Rules claiming more threads will be scaled down. Job counts: count jobs 1 all 1 corpus 2 shard 4 [Wed Dec 16 11:46:00 2020] checkpoint shard: output: data/batches.es jobid: 2 wildcards: lang=es Downstream jobs will be updated after completion. [Wed Dec 16 11:46:00 2020] checkpoint shard: output: data/batches.en jobid: 3 wildcards: lang=en Downstream jobs will be updated after completion. Updating job 1 (corpus). Updating job 0 (all). [Wed Dec 16 11:46:36 2020] Finished job 2. 1 of 4 steps (25%) done [Wed Dec 16 11:46:36 2020] checkpoint shard: output: data/batches.en jobid: 3 wildcards: lang=en Downstream jobs will be updated after completion. Updating job 4 (corpus). Updating job 5 (all). [Wed Dec 16 11:47:00 2020] Finished job 3. 2 of 40 steps (5%) done Traceback (most recent call last): File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/__init__.py", line 687, in snakemake success = workflow.execute( File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/workflow.py", line 1006, in execute success = scheduler.schedule() File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/scheduler.py", line 470, in schedule run = self.job_selector(needrun) File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/scheduler.py", line 764, in job_selector_greedy c = list(map(self.job_reward, jobs)) # job rewards File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/scheduler.py", line 847, in job_reward input_size = job.inputsize File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/jobs.py", line 378, in inputsize self._inputsize = sum(f.size for f in self.input) File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/jobs.py", line 378, in <genexpr> self._inputsize = sum(f.size for f in self.input) File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/io.py", line 242, in wrapper return func(self, *args, **kwargs) File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/io.py", line 257, in wrapper return func(self, *args, **kwargs) File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/io.py", line 556, in size return self.size_local File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/io.py", line 561, in size_local self.check_broken_symlink() File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/io.py", line 566, in check_broken_symlink if not self.exists_local and os.lstat(self.file): FileNotFoundError: [Errno 2] No such file or directory: 'data/corpus.txt' [Wed Dec 16 11:47:36 2020] Error in rule shard: jobid: 3 output: data/batches.en shell: for i in $(seq 0 $(( 2 % 5 + 1))); do # 0 .. 3 -> 4 for j in $(seq 0 $(( 1 % 2 + 1))); do # 0 .. 2 -> 3 mkdir -p data/en/$i/$j echo 'hello en' > data/en/$i/$j/text if [[ "en" == "es" ]]; then sleep 3 else sleep 5 fi done done ls -d data/en/*/* > data/batches.en (one of the commands exited with non-zero exit code; note that snakemake uses bash strict mode!) Removing output files of failed job shard since they might be corrupted: data/batches.en exception calling callback for <Future at 0x7f745430c3a0 state=finished raised RuleException> Traceback (most recent call last): File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/executors/__init__.py", line 2318, in run_wrapper run( File "/home/cristian/Documentos/bitextor/files/tmp/min_snakemake_5_30_2/Snakefile", line 37, in __rule_shard checkpoints.shard.get(lang=trg_lang).output[0].open() as trg_f: File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/shell.py", line 202, in __new__ del cls._processes[jobid] KeyError: 3 During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/executors/__init__.py", line 560, in _callback raise ex File "/usr/lib/python3.8/concurrent/futures/thread.py", line 57, in run result = self.fn(*self.args, **self.kwargs) File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/executors/__init__.py", line 546, in cached_or_run run_func(*args) File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/executors/__init__.py", line 2349, in run_wrapper raise RuleException( snakemake.exceptions.RuleException: KeyError in line 11 of /home/cristian/Documentos/bitextor/files/tmp/min_snakemake_5_30_2/Snakefile: 3 File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/executors/__init__.py", line 2318, in run_wrapper File "/home/cristian/Documentos/bitextor/files/tmp/min_snakemake_5_30_2/Snakefile", line 11, in __rule_shard During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/usr/lib/python3.8/concurrent/futures/_base.py", line 328, in _invoke_callbacks callback(self) File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/executors/__init__.py", line 572, in _callback error_callback(job) File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/scheduler.py", line 579, in _error self._handle_error(job) File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/scheduler.py", line 589, in _handle_error self.running.remove(job) KeyError: shard
FileNotFoundError
def finish(self, job, update_dynamic=True): """Finish a given job (e.g. remove from ready jobs, mark depending jobs as ready).""" self._running.remove(job) # turn off this job's Reason self.reason(job).mark_finished() try: self._ready_jobs.remove(job) except KeyError: pass if job.is_group(): jobs = job else: jobs = [job] self._finished.update(jobs) updated_dag = False if update_dynamic: updated_dag = self.update_checkpoint_dependencies(jobs) depending = [ j for job in jobs for j in self.depending[job] if not self.in_until(job) and self.needrun(j) ] if not updated_dag: # Mark depending jobs as ready. # Skip jobs that are marked as until jobs. # This is not necessary if the DAG has been fully updated above. for job in depending: self._n_until_ready[job] -= 1 potential_new_ready_jobs = self.update_ready(depending) for job in jobs: if update_dynamic and job.dynamic_output: logger.info("Dynamically updating jobs") newjob = self.update_dynamic(job) if newjob: # simulate that this job ran and was finished before self.omitforce.add(newjob) self._needrun.add(newjob) self._finished.add(newjob) updated_dag = True self.postprocess() self.handle_protected(newjob) self.handle_touch(newjob) if updated_dag: # We might have new jobs, so we need to ensure that all conda envs # and singularity images are set up. if self.workflow.use_singularity: self.pull_container_imgs() if self.workflow.use_conda: self.create_conda_envs() potential_new_ready_jobs = True return potential_new_ready_jobs
def finish(self, job, update_dynamic=True): """Finish a given job (e.g. remove from ready jobs, mark depending jobs as ready).""" # turn off this job's Reason self.reason(job).mark_finished() try: self._ready_jobs.remove(job) except KeyError: pass if job.is_group(): jobs = job else: jobs = [job] self._finished.update(jobs) updated_dag = False if update_dynamic: updated_dag = self.update_checkpoint_dependencies(jobs) # mark depending jobs as ready # skip jobs that are marked as until jobs depending = [ j for job in jobs for j in self.depending[job] if not self.in_until(job) and self.needrun(j) ] for job in depending: self._n_until_ready[job] -= 1 potential_new_ready_jobs = self.update_ready(depending) for job in jobs: if update_dynamic and job.dynamic_output: logger.info("Dynamically updating jobs") newjob = self.update_dynamic(job) if newjob: # simulate that this job ran and was finished before self.omitforce.add(newjob) self._needrun.add(newjob) self._finished.add(newjob) updated_dag = True self.postprocess() self.handle_protected(newjob) self.handle_touch(newjob) if updated_dag: # We might have new jobs, so we need to ensure that all conda envs # and singularity images are set up. if self.workflow.use_singularity: self.pull_container_imgs() if self.workflow.use_conda: self.create_conda_envs() potential_new_ready_jobs = True return potential_new_ready_jobs
https://github.com/snakemake/snakemake/issues/806
Building DAG of jobs... Using shell: /usr/bin/bash Provided cores: 4 Rules claiming more threads will be scaled down. Job counts: count jobs 1 all 1 corpus 2 shard 4 [Wed Dec 16 11:46:00 2020] checkpoint shard: output: data/batches.es jobid: 2 wildcards: lang=es Downstream jobs will be updated after completion. [Wed Dec 16 11:46:00 2020] checkpoint shard: output: data/batches.en jobid: 3 wildcards: lang=en Downstream jobs will be updated after completion. Updating job 1 (corpus). Updating job 0 (all). [Wed Dec 16 11:46:36 2020] Finished job 2. 1 of 4 steps (25%) done [Wed Dec 16 11:46:36 2020] checkpoint shard: output: data/batches.en jobid: 3 wildcards: lang=en Downstream jobs will be updated after completion. Updating job 4 (corpus). Updating job 5 (all). [Wed Dec 16 11:47:00 2020] Finished job 3. 2 of 40 steps (5%) done Traceback (most recent call last): File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/__init__.py", line 687, in snakemake success = workflow.execute( File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/workflow.py", line 1006, in execute success = scheduler.schedule() File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/scheduler.py", line 470, in schedule run = self.job_selector(needrun) File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/scheduler.py", line 764, in job_selector_greedy c = list(map(self.job_reward, jobs)) # job rewards File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/scheduler.py", line 847, in job_reward input_size = job.inputsize File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/jobs.py", line 378, in inputsize self._inputsize = sum(f.size for f in self.input) File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/jobs.py", line 378, in <genexpr> self._inputsize = sum(f.size for f in self.input) File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/io.py", line 242, in wrapper return func(self, *args, **kwargs) File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/io.py", line 257, in wrapper return func(self, *args, **kwargs) File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/io.py", line 556, in size return self.size_local File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/io.py", line 561, in size_local self.check_broken_symlink() File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/io.py", line 566, in check_broken_symlink if not self.exists_local and os.lstat(self.file): FileNotFoundError: [Errno 2] No such file or directory: 'data/corpus.txt' [Wed Dec 16 11:47:36 2020] Error in rule shard: jobid: 3 output: data/batches.en shell: for i in $(seq 0 $(( 2 % 5 + 1))); do # 0 .. 3 -> 4 for j in $(seq 0 $(( 1 % 2 + 1))); do # 0 .. 2 -> 3 mkdir -p data/en/$i/$j echo 'hello en' > data/en/$i/$j/text if [[ "en" == "es" ]]; then sleep 3 else sleep 5 fi done done ls -d data/en/*/* > data/batches.en (one of the commands exited with non-zero exit code; note that snakemake uses bash strict mode!) Removing output files of failed job shard since they might be corrupted: data/batches.en exception calling callback for <Future at 0x7f745430c3a0 state=finished raised RuleException> Traceback (most recent call last): File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/executors/__init__.py", line 2318, in run_wrapper run( File "/home/cristian/Documentos/bitextor/files/tmp/min_snakemake_5_30_2/Snakefile", line 37, in __rule_shard checkpoints.shard.get(lang=trg_lang).output[0].open() as trg_f: File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/shell.py", line 202, in __new__ del cls._processes[jobid] KeyError: 3 During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/executors/__init__.py", line 560, in _callback raise ex File "/usr/lib/python3.8/concurrent/futures/thread.py", line 57, in run result = self.fn(*self.args, **self.kwargs) File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/executors/__init__.py", line 546, in cached_or_run run_func(*args) File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/executors/__init__.py", line 2349, in run_wrapper raise RuleException( snakemake.exceptions.RuleException: KeyError in line 11 of /home/cristian/Documentos/bitextor/files/tmp/min_snakemake_5_30_2/Snakefile: 3 File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/executors/__init__.py", line 2318, in run_wrapper File "/home/cristian/Documentos/bitextor/files/tmp/min_snakemake_5_30_2/Snakefile", line 11, in __rule_shard During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/usr/lib/python3.8/concurrent/futures/_base.py", line 328, in _invoke_callbacks callback(self) File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/executors/__init__.py", line 572, in _callback error_callback(job) File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/scheduler.py", line 579, in _error self._handle_error(job) File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/scheduler.py", line 589, in _handle_error self.running.remove(job) KeyError: shard
FileNotFoundError
def replace_job(self, job, newjob, recursive=True): """Replace given job with new job.""" add_to_targetjobs = job in self.targetjobs jobid = self.jobid(job) depending = list(self.depending[job].items()) if self.finished(job): self._finished.add(newjob) self.delete_job(job, recursive=recursive) self._jobid[newjob] = jobid if add_to_targetjobs: self.targetjobs.add(newjob) self.cache_job(newjob) self.update([newjob]) logger.debug("Replace {} with dynamic branch {}".format(job, newjob)) for job_, files in depending: # if not job_.dynamic_input: logger.debug("updating depending job {}".format(job_)) self.dependencies[job_][newjob].update(files) self.depending[newjob][job_].update(files)
def replace_job(self, job, newjob, recursive=True): """Replace given job with new job.""" add_to_targetjobs = job in self.targetjobs depending = list(self.depending[job].items()) if self.finished(job): self._finished.add(newjob) self.delete_job(job, recursive=recursive) if add_to_targetjobs: self.targetjobs.add(newjob) self.cache_job(newjob) self.update([newjob]) logger.debug("Replace {} with dynamic branch {}".format(job, newjob)) for job_, files in depending: # if not job_.dynamic_input: logger.debug("updating depending job {}".format(job_)) self.dependencies[job_][newjob].update(files) self.depending[newjob][job_].update(files)
https://github.com/snakemake/snakemake/issues/806
Building DAG of jobs... Using shell: /usr/bin/bash Provided cores: 4 Rules claiming more threads will be scaled down. Job counts: count jobs 1 all 1 corpus 2 shard 4 [Wed Dec 16 11:46:00 2020] checkpoint shard: output: data/batches.es jobid: 2 wildcards: lang=es Downstream jobs will be updated after completion. [Wed Dec 16 11:46:00 2020] checkpoint shard: output: data/batches.en jobid: 3 wildcards: lang=en Downstream jobs will be updated after completion. Updating job 1 (corpus). Updating job 0 (all). [Wed Dec 16 11:46:36 2020] Finished job 2. 1 of 4 steps (25%) done [Wed Dec 16 11:46:36 2020] checkpoint shard: output: data/batches.en jobid: 3 wildcards: lang=en Downstream jobs will be updated after completion. Updating job 4 (corpus). Updating job 5 (all). [Wed Dec 16 11:47:00 2020] Finished job 3. 2 of 40 steps (5%) done Traceback (most recent call last): File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/__init__.py", line 687, in snakemake success = workflow.execute( File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/workflow.py", line 1006, in execute success = scheduler.schedule() File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/scheduler.py", line 470, in schedule run = self.job_selector(needrun) File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/scheduler.py", line 764, in job_selector_greedy c = list(map(self.job_reward, jobs)) # job rewards File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/scheduler.py", line 847, in job_reward input_size = job.inputsize File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/jobs.py", line 378, in inputsize self._inputsize = sum(f.size for f in self.input) File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/jobs.py", line 378, in <genexpr> self._inputsize = sum(f.size for f in self.input) File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/io.py", line 242, in wrapper return func(self, *args, **kwargs) File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/io.py", line 257, in wrapper return func(self, *args, **kwargs) File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/io.py", line 556, in size return self.size_local File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/io.py", line 561, in size_local self.check_broken_symlink() File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/io.py", line 566, in check_broken_symlink if not self.exists_local and os.lstat(self.file): FileNotFoundError: [Errno 2] No such file or directory: 'data/corpus.txt' [Wed Dec 16 11:47:36 2020] Error in rule shard: jobid: 3 output: data/batches.en shell: for i in $(seq 0 $(( 2 % 5 + 1))); do # 0 .. 3 -> 4 for j in $(seq 0 $(( 1 % 2 + 1))); do # 0 .. 2 -> 3 mkdir -p data/en/$i/$j echo 'hello en' > data/en/$i/$j/text if [[ "en" == "es" ]]; then sleep 3 else sleep 5 fi done done ls -d data/en/*/* > data/batches.en (one of the commands exited with non-zero exit code; note that snakemake uses bash strict mode!) Removing output files of failed job shard since they might be corrupted: data/batches.en exception calling callback for <Future at 0x7f745430c3a0 state=finished raised RuleException> Traceback (most recent call last): File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/executors/__init__.py", line 2318, in run_wrapper run( File "/home/cristian/Documentos/bitextor/files/tmp/min_snakemake_5_30_2/Snakefile", line 37, in __rule_shard checkpoints.shard.get(lang=trg_lang).output[0].open() as trg_f: File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/shell.py", line 202, in __new__ del cls._processes[jobid] KeyError: 3 During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/executors/__init__.py", line 560, in _callback raise ex File "/usr/lib/python3.8/concurrent/futures/thread.py", line 57, in run result = self.fn(*self.args, **self.kwargs) File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/executors/__init__.py", line 546, in cached_or_run run_func(*args) File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/executors/__init__.py", line 2349, in run_wrapper raise RuleException( snakemake.exceptions.RuleException: KeyError in line 11 of /home/cristian/Documentos/bitextor/files/tmp/min_snakemake_5_30_2/Snakefile: 3 File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/executors/__init__.py", line 2318, in run_wrapper File "/home/cristian/Documentos/bitextor/files/tmp/min_snakemake_5_30_2/Snakefile", line 11, in __rule_shard During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/usr/lib/python3.8/concurrent/futures/_base.py", line 328, in _invoke_callbacks callback(self) File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/executors/__init__.py", line 572, in _callback error_callback(job) File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/scheduler.py", line 579, in _error self._handle_error(job) File "/home/cristian/Documentos/bitextor/files/tmp/env_snake_performance_snakemake_fix_checkpoints/lib/python3.8/site-packages/snakemake/scheduler.py", line 589, in _handle_error self.running.remove(job) KeyError: shard
FileNotFoundError
def finished(self, job, keep_metadata=True): if not keep_metadata: for f in job.expanded_output: self._delete_record(self._incomplete_path, f) return version = str(job.rule.version) if job.rule.version is not None else None code = self._code(job.rule) input = self._input(job) log = self._log(job) params = self._params(job) shellcmd = job.shellcmd conda_env = self._conda_env(job) fallback_time = time.time() for f in job.expanded_output: rec_path = self._record_path(self._incomplete_path, f) starttime = os.path.getmtime(rec_path) if os.path.exists(rec_path) else None # Sometimes finished is called twice, if so, lookup the previous starttime if not os.path.exists(rec_path): starttime = self._read_record(self._metadata_path, f).get("starttime", None) endtime = f.mtime.local_or_remote() if f.exists else fallback_time self._record( self._metadata_path, { "version": version, "code": code, "rule": job.rule.name, "input": input, "log": log, "params": params, "shellcmd": shellcmd, "incomplete": False, "starttime": starttime, "endtime": endtime, "job_hash": hash(job), "conda_env": conda_env, "container_img_url": job.container_img_url, }, f, ) self._delete_record(self._incomplete_path, f)
def finished(self, job, keep_metadata=True): if not keep_metadata: for f in job.expanded_output: self._delete_record(self._incomplete_path, f) return version = str(job.rule.version) if job.rule.version is not None else None code = self._code(job.rule) input = self._input(job) log = self._log(job) params = self._params(job) shellcmd = job.shellcmd conda_env = self._conda_env(job) fallback_time = time.time() for f in job.expanded_output: rec_path = self._record_path(self._incomplete_path, f) starttime = os.path.getmtime(rec_path) if os.path.exists(rec_path) else None endtime = f.mtime.local_or_remote() if f.exists else fallback_time self._record( self._metadata_path, { "version": version, "code": code, "rule": job.rule.name, "input": input, "log": log, "params": params, "shellcmd": shellcmd, "incomplete": False, "starttime": starttime, "endtime": endtime, "job_hash": hash(job), "conda_env": conda_env, "container_img_url": job.container_img_url, }, f, ) self._delete_record(self._incomplete_path, f)
https://github.com/snakemake/snakemake/issues/741
user@:/analysis$ snakemake --cores 1 -pr --report test.html all Building DAG of jobs... Creating report... Adding shell_rule.txt (0 MB). Adding run_rule.txt (0 MB). Traceback (most recent call last): File "/usr/local/lib/python3.8/dist-packages/snakemake/__init__.py", line 668, in snakemake success = workflow.execute( File "/usr/local/lib/python3.8/dist-packages/snakemake/workflow.py", line 816, in execute auto_report(dag, report, stylesheet=report_stylesheet) File "/usr/local/lib/python3.8/dist-packages/snakemake/report/__init__.py", line 758, in auto_report rec.starttime = min(rec.starttime, meta["starttime"]) TypeError: '<' not supported between instances of 'NoneType' and 'int'
TypeError
def outputs_older_than_script_or_notebook(self): """return output that's older than script, i.e. script has changed""" path = self.rule.script or self.rule.notebook if not path: return if self.rule.basedir: # needed if rule is included from another subdirectory path = os.path.relpath(os.path.join(self.rule.basedir, path)) assert os.path.exists(path), "cannot find {0}".format(path) script_mtime = os.lstat(path).st_mtime for f in self.expanded_output: if f.exists: if not f.is_newer(script_mtime): yield f
def outputs_older_than_script_or_notebook(self): """return output that's older than script, i.e. script has changed""" path = self.rule.script or self.rule.notebook if not path: return assert os.path.exists(path) # to make sure lstat works script_mtime = os.lstat(path).st_mtime for f in self.expanded_output: if f.exists: if not f.is_newer(script_mtime): yield f
https://github.com/snakemake/snakemake/issues/417
$ snakemake --list-code-changes Building DAG of jobs... Traceback (most recent call last): File "/Users/haje01/.pyenv/versions/3.7.2/envs/ml2/lib/python3.7/site-packages/snakemake/__init__.py", line 654, in snakemake keepincomplete=keep_incomplete, File "/Users/haje01/.pyenv/versions/3.7.2/envs/ml2/lib/python3.7/site-packages/snakemake/workflow.py", line 764, in execute items.extend(list(j.outputs_older_than_script_or_notebook())) File "/Users/haje01/.pyenv/versions/3.7.2/envs/ml2/lib/python3.7/site-packages/snakemake/jobs.py", line 208, in outputs_older_than_script_or_notebook assert os.path.exists(path) # to make sure lstat works AssertionError
AssertionError
def __init__(self, max_wait_time, nthreads=8): self.mtime = dict() self.mtime_target = PassthroughDict( self.mtime ) # filled in case of symlink with mtime of target self.exists_local = dict() self.exists_remote = dict() self.size = dict() # Indicator whether an inventory has been created for the root of a given IOFile. # In case of remote objects the root is the bucket or server host. self.has_inventory = set() self.remaining_wait_time = max_wait_time self.max_wait_time = max_wait_time self.queue = queue.Queue() self.threads = [] self.active = False self.nthreads = nthreads self.activate()
def __init__(self, max_wait_time): self.mtime = dict() self.exists_local = ExistsDict(self) self.exists_remote = ExistsDict(self) self.size = dict() # Indicator whether an inventory has been created for the root of a given IOFile. # In case of remote objects the root is the bucket or server host. self.has_inventory = set() self.active = True self.remaining_wait_time = max_wait_time self.max_wait_time = max_wait_time
https://github.com/snakemake/snakemake/issues/611
Building DAG of jobs... Traceback (most recent call last): File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/__init__.py", line 709, in snakemake keepincomplete=keep_incomplete, File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/workflow.py", line 670, in execute dag.init() File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 177, in init job = self.update(self.file2jobs(file), file=file, progress=progress) File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 715, in update progress=progress, File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 792, in update_ file.inventory() File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/io.py", line 210, in inventory self._local_inventory(cache) File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/io.py", line 224, in _local_inventory with os.scandir(path) as scan: FileNotFoundError: [Errno 2] No such file or directory: '<analysis_patch>/tmp_batch113/codons'
FileNotFoundError
def needs_inventory(self, path): return path and path not in self.has_inventory
def needs_inventory(self, path): root = self.get_inventory_root(path) return root and root not in self.has_inventory
https://github.com/snakemake/snakemake/issues/611
Building DAG of jobs... Traceback (most recent call last): File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/__init__.py", line 709, in snakemake keepincomplete=keep_incomplete, File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/workflow.py", line 670, in execute dag.init() File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 177, in init job = self.update(self.file2jobs(file), file=file, progress=progress) File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 715, in update progress=progress, File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 792, in update_ file.inventory() File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/io.py", line 210, in inventory self._local_inventory(cache) File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/io.py", line 224, in _local_inventory with os.scandir(path) as scan: FileNotFoundError: [Errno 2] No such file or directory: '<analysis_patch>/tmp_batch113/codons'
FileNotFoundError
def in_inventory(self, path): return path and path in self.has_inventory
def in_inventory(self, path): root = self.get_inventory_root(path) return root and root in self.has_inventory
https://github.com/snakemake/snakemake/issues/611
Building DAG of jobs... Traceback (most recent call last): File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/__init__.py", line 709, in snakemake keepincomplete=keep_incomplete, File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/workflow.py", line 670, in execute dag.init() File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 177, in init job = self.update(self.file2jobs(file), file=file, progress=progress) File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 715, in update progress=progress, File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 792, in update_ file.inventory() File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/io.py", line 210, in inventory self._local_inventory(cache) File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/io.py", line 224, in _local_inventory with os.scandir(path) as scan: FileNotFoundError: [Errno 2] No such file or directory: '<analysis_patch>/tmp_batch113/codons'
FileNotFoundError
def clear(self): self.mtime.clear() self.mtime_target.clear() self.size.clear() self.exists_local.clear() self.exists_remote.clear() self.has_inventory.clear() self.remaining_wait_time = self.max_wait_time
def clear(self): self.mtime.clear() self.size.clear() self.exists_local.clear() self.exists_remote.clear() self.has_inventory.clear() self.remaining_wait_time = self.max_wait_time
https://github.com/snakemake/snakemake/issues/611
Building DAG of jobs... Traceback (most recent call last): File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/__init__.py", line 709, in snakemake keepincomplete=keep_incomplete, File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/workflow.py", line 670, in execute dag.init() File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 177, in init job = self.update(self.file2jobs(file), file=file, progress=progress) File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 715, in update progress=progress, File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 792, in update_ file.inventory() File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/io.py", line 210, in inventory self._local_inventory(cache) File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/io.py", line 224, in _local_inventory with os.scandir(path) as scan: FileNotFoundError: [Errno 2] No such file or directory: '<analysis_patch>/tmp_batch113/codons'
FileNotFoundError
def deactivate(self): self.active = False for t in self.threads: t.join() self.threads = [] self.clear()
def deactivate(self): self.clear() self.active = False
https://github.com/snakemake/snakemake/issues/611
Building DAG of jobs... Traceback (most recent call last): File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/__init__.py", line 709, in snakemake keepincomplete=keep_incomplete, File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/workflow.py", line 670, in execute dag.init() File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 177, in init job = self.update(self.file2jobs(file), file=file, progress=progress) File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 715, in update progress=progress, File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 792, in update_ file.inventory() File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/io.py", line 210, in inventory self._local_inventory(cache) File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/io.py", line 224, in _local_inventory with os.scandir(path) as scan: FileNotFoundError: [Errno 2] No such file or directory: '<analysis_patch>/tmp_batch113/codons'
FileNotFoundError
def __new__(cls, file): # Remove trailing slashes. obj = str.__new__(cls, file) obj._is_function = isfunction(file) or ismethod(file) obj._is_function = obj._is_function or ( isinstance(file, AnnotatedString) and bool(file.callable) ) obj._file = file obj.rule = None obj._regex = None if obj.is_remote: obj.remote_object._iofile = obj obj.set_inventory_paths() return obj
def __new__(cls, file): # Remove trailing slashes. obj = str.__new__(cls, file) obj._is_function = isfunction(file) or ismethod(file) obj._is_function = obj._is_function or ( isinstance(file, AnnotatedString) and bool(file.callable) ) obj._file = file obj.rule = None obj._regex = None if obj.is_remote: obj.remote_object._iofile = obj return obj
https://github.com/snakemake/snakemake/issues/611
Building DAG of jobs... Traceback (most recent call last): File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/__init__.py", line 709, in snakemake keepincomplete=keep_incomplete, File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/workflow.py", line 670, in execute dag.init() File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 177, in init job = self.update(self.file2jobs(file), file=file, progress=progress) File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 715, in update progress=progress, File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 792, in update_ file.inventory() File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/io.py", line 210, in inventory self._local_inventory(cache) File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/io.py", line 224, in _local_inventory with os.scandir(path) as scan: FileNotFoundError: [Errno 2] No such file or directory: '<analysis_patch>/tmp_batch113/codons'
FileNotFoundError
def iocache(raise_error=False): def inner_iocache(func): @functools.wraps(func) def wrapper(self, *args, **kwargs): iocache = self.rule.workflow.iocache if not iocache.active or self.inventory_path is None: return func(self, *args, **kwargs) cache = getattr(iocache, func.__name__) # first check if file is present in cache if self.inventory_path in cache: res = cache[self.inventory_path] # check if the folder was cached elif iocache.in_inventory(self.inventory_root): # as the folder was cached, we do know that the file does not exist if raise_error: # make sure that the cache behaves the same as non-cached results raise FileNotFoundError( "No such file or directory: {}".format(self.file) ) else: return False elif self._is_function: raise ValueError( "This IOFile is specified as a function and " "may not be used directly." ) else: res = IOCACHE_DEFERRED if res is IOCACHE_DEFERRED: # determine values that are not yet cached self._add_to_inventory(func.__name__) res = cache[self.inventory_path] # makes sure that cache behaves same as non-cached results if res is IOCACHE_BROKENSYMLINK: raise WorkflowError( "File {} seems to be a broken symlink.".format(self.file) ) elif res is IOCACHE_NOTEXIST: raise FileNotFoundError("File {} does not exist.".format(self.file)) elif res is IOCACHE_NOPERMISSION: raise PermissionError( "File {} does not have the required permissions.".format(self.file) ) return res return wrapper return inner_iocache
def iocache(func): @functools.wraps(func) def wrapper(self, *args, **kwargs): if self.rule.workflow.iocache.active: cache = getattr(self.rule.workflow.iocache, func.__name__) normalized = self.rstrip("/") if normalized in cache: return cache[normalized] v = func(self, *args, **kwargs) cache[normalized] = v return v else: return func(self, *args, **kwargs) return wrapper
https://github.com/snakemake/snakemake/issues/611
Building DAG of jobs... Traceback (most recent call last): File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/__init__.py", line 709, in snakemake keepincomplete=keep_incomplete, File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/workflow.py", line 670, in execute dag.init() File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 177, in init job = self.update(self.file2jobs(file), file=file, progress=progress) File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 715, in update progress=progress, File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 792, in update_ file.inventory() File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/io.py", line 210, in inventory self._local_inventory(cache) File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/io.py", line 224, in _local_inventory with os.scandir(path) as scan: FileNotFoundError: [Errno 2] No such file or directory: '<analysis_patch>/tmp_batch113/codons'
FileNotFoundError
def wrapper(self, *args, **kwargs): iocache = self.rule.workflow.iocache if not iocache.active or self.inventory_path is None: return func(self, *args, **kwargs) cache = getattr(iocache, func.__name__) # first check if file is present in cache if self.inventory_path in cache: res = cache[self.inventory_path] # check if the folder was cached elif iocache.in_inventory(self.inventory_root): # as the folder was cached, we do know that the file does not exist if raise_error: # make sure that the cache behaves the same as non-cached results raise FileNotFoundError("No such file or directory: {}".format(self.file)) else: return False elif self._is_function: raise ValueError( "This IOFile is specified as a function and may not be used directly." ) else: res = IOCACHE_DEFERRED if res is IOCACHE_DEFERRED: # determine values that are not yet cached self._add_to_inventory(func.__name__) res = cache[self.inventory_path] # makes sure that cache behaves same as non-cached results if res is IOCACHE_BROKENSYMLINK: raise WorkflowError("File {} seems to be a broken symlink.".format(self.file)) elif res is IOCACHE_NOTEXIST: raise FileNotFoundError("File {} does not exist.".format(self.file)) elif res is IOCACHE_NOPERMISSION: raise PermissionError( "File {} does not have the required permissions.".format(self.file) ) return res
def wrapper(self, *args, **kwargs): if self.rule.workflow.iocache.active: cache = getattr(self.rule.workflow.iocache, func.__name__) normalized = self.rstrip("/") if normalized in cache: return cache[normalized] v = func(self, *args, **kwargs) cache[normalized] = v return v else: return func(self, *args, **kwargs)
https://github.com/snakemake/snakemake/issues/611
Building DAG of jobs... Traceback (most recent call last): File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/__init__.py", line 709, in snakemake keepincomplete=keep_incomplete, File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/workflow.py", line 670, in execute dag.init() File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 177, in init job = self.update(self.file2jobs(file), file=file, progress=progress) File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 715, in update progress=progress, File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 792, in update_ file.inventory() File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/io.py", line 210, in inventory self._local_inventory(cache) File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/io.py", line 224, in _local_inventory with os.scandir(path) as scan: FileNotFoundError: [Errno 2] No such file or directory: '<analysis_patch>/tmp_batch113/codons'
FileNotFoundError
def inventory(self): """Starting from the given file, try to cache as much existence and modification date information of this and other files as possible. """ cache = self.rule.workflow.iocache if ( cache.active and not self.inventory_path is None and cache.needs_inventory(self.inventory_root) ): # info not yet in inventory, let's discover as much as we can if self.is_remote: self.remote_object.inventory(cache) else: self._local_inventory(cache)
def inventory(self): """Starting from the given file, try to cache as much existence and modification date information of this and other files as possible. """ cache = self.rule.workflow.iocache if cache.active and cache.needs_inventory(self): if self.is_remote: # info not yet in inventory, let's discover as much as we can self.remote_object.inventory(cache) elif not ON_WINDOWS: # we don't want to mess with different path representations on windows self._local_inventory(cache)
https://github.com/snakemake/snakemake/issues/611
Building DAG of jobs... Traceback (most recent call last): File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/__init__.py", line 709, in snakemake keepincomplete=keep_incomplete, File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/workflow.py", line 670, in execute dag.init() File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 177, in init job = self.update(self.file2jobs(file), file=file, progress=progress) File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 715, in update progress=progress, File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 792, in update_ file.inventory() File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/io.py", line 210, in inventory self._local_inventory(cache) File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/io.py", line 224, in _local_inventory with os.scandir(path) as scan: FileNotFoundError: [Errno 2] No such file or directory: '<analysis_patch>/tmp_batch113/codons'
FileNotFoundError
def _local_inventory(self, cache): # for local files, perform BFS via os.scandir to determine existence of files # obtaining mtime and size of the files is deferred for parallel execution # as this can be a slow step on network filesystems path = self.inventory_root if not os.path.exists(path): cache.has_inventory.add(path) return start = time.time() logger.debug("Inventory started of {}".format(path)) pbuffer = [] counter = 0 try: for entry in os.scandir(path): is_file = self._local_inventory_direntry_quick(cache, entry) if is_file is True: counter += 1 pbuffer.append(entry.path) if len(pbuffer) > 100: cache.submit(self._local_inventory_path_complete, pbuffer) pbuffer = [] if pbuffer: cache.submit(self._local_inventory_path_complete, pbuffer) cache.has_inventory.add(path) logger.debug( "Inventory of {} completed in {:.1f} seconds. {} files added to stat queue ({} tasks in queue).".format( path, time.time() - start, counter, cache.queue.qsize() ) ) except OSError as e: logger.debug( "Inventory of {} failed. Continuing without inventory caching. Error message: {}.".format( path, str(e) ) )
def _local_inventory(self, cache): # for local files, perform BFS via os.scandir to determine existence of files if cache.remaining_wait_time <= 0: # No more time to create inventory. return start_time = time.time() root = cache.get_inventory_root(self) if root == self: # there is no root directory that could be used return if os.path.exists(root): queue = [root] while queue: path = queue.pop(0) # path must be a dir cache.exists_local[path] = True with os.scandir(path) as scan: for entry in scan: if entry.is_dir(): queue.append(entry.path) else: # path is a file cache.exists_local[entry.path] = True cache.remaining_wait_time -= time.time() - start_time if cache.remaining_wait_time <= 0: # Stop, do not mark inventory as done below. # Otherwise, we would falsely assume that those files # are not present. return cache.has_inventory.add(root)
https://github.com/snakemake/snakemake/issues/611
Building DAG of jobs... Traceback (most recent call last): File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/__init__.py", line 709, in snakemake keepincomplete=keep_incomplete, File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/workflow.py", line 670, in execute dag.init() File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 177, in init job = self.update(self.file2jobs(file), file=file, progress=progress) File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 715, in update progress=progress, File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 792, in update_ file.inventory() File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/io.py", line 210, in inventory self._local_inventory(cache) File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/io.py", line 224, in _local_inventory with os.scandir(path) as scan: FileNotFoundError: [Errno 2] No such file or directory: '<analysis_patch>/tmp_batch113/codons'
FileNotFoundError
def update_remote_filepath(self): # if the file string is different in the iofile, update the remote object # (as in the case of wildcard expansion) remote_object = self.remote_object if remote_object._file != self._file: remote_object._iofile = self self.set_inventory_paths()
def update_remote_filepath(self): # if the file string is different in the iofile, update the remote object # (as in the case of wildcard expansion) remote_object = self.remote_object if remote_object._file != self._file: remote_object._iofile = self
https://github.com/snakemake/snakemake/issues/611
Building DAG of jobs... Traceback (most recent call last): File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/__init__.py", line 709, in snakemake keepincomplete=keep_incomplete, File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/workflow.py", line 670, in execute dag.init() File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 177, in init job = self.update(self.file2jobs(file), file=file, progress=progress) File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 715, in update progress=progress, File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 792, in update_ file.inventory() File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/io.py", line 210, in inventory self._local_inventory(cache) File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/io.py", line 224, in _local_inventory with os.scandir(path) as scan: FileNotFoundError: [Errno 2] No such file or directory: '<analysis_patch>/tmp_batch113/codons'
FileNotFoundError
def mtime_local(self): # do not follow symlinks for modification time lstat = os.lstat(self.file) if stat.S_ISDIR(lstat.st_mode) and os.path.exists( os.path.join(self.file, TIMESTAMP_FILENAME) ): return os.lstat(os.path.join(self.file, TIMESTAMP_FILENAME)).st_mtime else: return lstat.st_mtime
def mtime_local(self): # do not follow symlinks for modification time if os.path.isdir(self.file) and os.path.exists( os.path.join(self.file, ".snakemake_timestamp") ): return os.lstat(os.path.join(self.file, ".snakemake_timestamp")).st_mtime else: return os.lstat(self.file).st_mtime
https://github.com/snakemake/snakemake/issues/611
Building DAG of jobs... Traceback (most recent call last): File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/__init__.py", line 709, in snakemake keepincomplete=keep_incomplete, File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/workflow.py", line 670, in execute dag.init() File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 177, in init job = self.update(self.file2jobs(file), file=file, progress=progress) File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 715, in update progress=progress, File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 792, in update_ file.inventory() File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/io.py", line 210, in inventory self._local_inventory(cache) File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/io.py", line 224, in _local_inventory with os.scandir(path) as scan: FileNotFoundError: [Errno 2] No such file or directory: '<analysis_patch>/tmp_batch113/codons'
FileNotFoundError
def is_newer(self, time): """Returns true of the file is newer than time, or if it is a symlink that points to a file newer than time.""" if self.is_ancient: return False elif self.is_remote: return self.mtime > time else: return self.mtime > time or self.mtime_target > time
def is_newer(self, time): """Returns true of the file is newer than time, or if it is a symlink that points to a file newer than time.""" if self.is_ancient: return False elif self.is_remote: # If file is remote but provider does not override the implementation this # is the best we can do. return self.mtime > time else: if os.path.isdir(self.file) and os.path.exists( os.path.join(self.file, ".snakemake_timestamp") ): st_mtime_file = os.path.join(self.file, ".snakemake_timestamp") else: st_mtime_file = self.file try: return os.stat(st_mtime_file).st_mtime > time or self.mtime > time except FileNotFoundError: raise WorkflowError( "File {} not found although it existed before. Is there another active process that might have deleted it?".format( self.file ) )
https://github.com/snakemake/snakemake/issues/611
Building DAG of jobs... Traceback (most recent call last): File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/__init__.py", line 709, in snakemake keepincomplete=keep_incomplete, File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/workflow.py", line 670, in execute dag.init() File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 177, in init job = self.update(self.file2jobs(file), file=file, progress=progress) File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 715, in update progress=progress, File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 792, in update_ file.inventory() File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/io.py", line 210, in inventory self._local_inventory(cache) File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/io.py", line 224, in _local_inventory with os.scandir(path) as scan: FileNotFoundError: [Errno 2] No such file or directory: '<analysis_patch>/tmp_batch113/codons'
FileNotFoundError
def touch(self, times=None): """times must be 2-tuple: (atime, mtime)""" try: if self.is_directory: file = os.path.join(self.file, TIMESTAMP_FILENAME) # Create the flag file if it doesn't exist if not os.path.exists(file): with open(file, "w") as f: pass lutime(file, times) else: lutime(self.file, times) except OSError as e: if e.errno == 2: raise MissingOutputException( "Output file {} of rule {} shall be touched but does not exist.".format( self.file, self.rule.name ), lineno=self.rule.lineno, snakefile=self.rule.snakefile, ) else: raise e
def touch(self, times=None): """times must be 2-tuple: (atime, mtime)""" try: if self.is_directory: file = os.path.join(self.file, ".snakemake_timestamp") # Create the flag file if it doesn't exist if not os.path.exists(file): with open(file, "w") as f: pass lutime(file, times) else: lutime(self.file, times) except OSError as e: if e.errno == 2: raise MissingOutputException( "Output file {} of rule {} shall be touched but does not exist.".format( self.file, self.rule.name ), lineno=self.rule.lineno, snakefile=self.rule.snakefile, ) else: raise e
https://github.com/snakemake/snakemake/issues/611
Building DAG of jobs... Traceback (most recent call last): File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/__init__.py", line 709, in snakemake keepincomplete=keep_incomplete, File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/workflow.py", line 670, in execute dag.init() File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 177, in init job = self.update(self.file2jobs(file), file=file, progress=progress) File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 715, in update progress=progress, File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 792, in update_ file.inventory() File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/io.py", line 210, in inventory self._local_inventory(cache) File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/io.py", line 224, in _local_inventory with os.scandir(path) as scan: FileNotFoundError: [Errno 2] No such file or directory: '<analysis_patch>/tmp_batch113/codons'
FileNotFoundError
def touch_or_create(self): try: self.touch() except MissingOutputException: # first create directory if it does not yet exist dir = self.file if self.is_directory else os.path.dirname(self.file) if dir: os.makedirs(dir, exist_ok=True) # create empty file file = ( os.path.join(self.file, TIMESTAMP_FILENAME) if self.is_directory else self.file ) with open(file, "w") as f: pass
def touch_or_create(self): try: self.touch() except MissingOutputException: # first create directory if it does not yet exist dir = self.file if self.is_directory else os.path.dirname(self.file) if dir: os.makedirs(dir, exist_ok=True) # create empty file file = ( os.path.join(self.file, ".snakemake_timestamp") if self.is_directory else self.file ) with open(file, "w") as f: pass
https://github.com/snakemake/snakemake/issues/611
Building DAG of jobs... Traceback (most recent call last): File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/__init__.py", line 709, in snakemake keepincomplete=keep_incomplete, File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/workflow.py", line 670, in execute dag.init() File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 177, in init job = self.update(self.file2jobs(file), file=file, progress=progress) File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 715, in update progress=progress, File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 792, in update_ file.inventory() File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/io.py", line 210, in inventory self._local_inventory(cache) File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/io.py", line 224, in _local_inventory with os.scandir(path) as scan: FileNotFoundError: [Errno 2] No such file or directory: '<analysis_patch>/tmp_batch113/codons'
FileNotFoundError
def wrapper(self, *args, **kwargs): iocache = self.rule.workflow.iocache if not iocache.active or self.inventory_path is None: return func(self, *args, **kwargs) cache = getattr(iocache, func.__name__) # first check if file is present in cache if self.inventory_path in cache: res = cache[self.inventory_path] # check if the folder was cached elif iocache.in_inventory(self.inventory_root): # as the folder was cached, we do know that the file does not exist if raise_error: # make sure that the cache behaves the same as non-cached results raise FileNotFoundError("No such file or directory: {}".format(self.file)) else: return False elif self._is_function: raise ValueError( "This IOFile is specified as a function and may not be used directly." ) else: res = IOCACHE_DEFERRED if res is IOCACHE_DEFERRED: # determine values that are not yet cached self._add_to_inventory(func.__name__) res = cache[self.inventory_path] # makes sure that cache behaves same as non-cached results if res is IOCACHE_BROKENSYMLINK: raise WorkflowError("File {} seems to be a broken symlink.".format(self.file)) elif res is IOCACHE_NOTEXIST: raise FileNotFoundError("File {} does not exist.".format(self.file)) elif res is IOCACHE_NOPERMISSION: raise PermissionError( "File {} does not have the required permissions.".format(self.file) ) return res
def wrapper(self, *args, **kwargs): if self.is_remote: if hasattr(self.remote_object, func.__name__): return getattr(self.remote_object, func.__name__)(*args, **kwargs) return func(self, *args, **kwargs)
https://github.com/snakemake/snakemake/issues/611
Building DAG of jobs... Traceback (most recent call last): File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/__init__.py", line 709, in snakemake keepincomplete=keep_incomplete, File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/workflow.py", line 670, in execute dag.init() File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 177, in init job = self.update(self.file2jobs(file), file=file, progress=progress) File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 715, in update progress=progress, File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 792, in update_ file.inventory() File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/io.py", line 210, in inventory self._local_inventory(cache) File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/io.py", line 224, in _local_inventory with os.scandir(path) as scan: FileNotFoundError: [Errno 2] No such file or directory: '<analysis_patch>/tmp_batch113/codons'
FileNotFoundError