after_merge
stringlengths 28
79.6k
| before_merge
stringlengths 20
79.6k
| url
stringlengths 38
71
| full_traceback
stringlengths 43
922k
| traceback_type
stringclasses 555
values |
|---|---|---|---|---|
def parse_array(self, name: str, obj: JsonSchemaObject, path: List[str]) -> DataModel:
field, item_obj_names = self.parse_array_fields(name, obj, [*path, name])
self.model_resolver.add(path, name)
data_model_root = self.data_model_root_type(
name,
[field],
custom_base_class=self.base_class,
custom_template_dir=self.custom_template_dir,
extra_template_data=self.extra_template_data,
)
self.append_result(data_model_root)
return data_model_root
|
def parse_array(self, name: str, obj: JsonSchemaObject, path: List[str]) -> None:
field, item_obj_names = self.parse_array_fields(name, obj, [*path, name])
self.model_resolver.add(path, name)
data_model_root = self.data_model_root_type(
name,
[field],
custom_base_class=self.base_class,
custom_template_dir=self.custom_template_dir,
extra_template_data=self.extra_template_data,
)
self.append_result(data_model_root)
|
https://github.com/koxudaxi/datamodel-code-generator/issues/216
|
Traceback (most recent call last):
File "<input>", line 1, in <module>
File "pydantic\main.py", line 346, in pydantic.main.BaseModel.__init__
pydantic.error_wrappers.ValidationError: 2 validation errors for FileSetUpload
tags -> tag1
value is not a valid dict (type=type_error.dict)
tags -> tag2
value is not a valid dict (type=type_error.dict)
|
pydantic.error_wrappers.ValidationError
|
def get_data_type(self, obj: JsonSchemaObject) -> List[DataType]:
if obj.type is None:
return [
self.data_type(type="Any", version_compatible=True, imports_=[IMPORT_ANY])
]
if isinstance(obj.type, list):
types: List[str] = [t for t in obj.type if t != "null"]
format_ = "default"
else:
types = [obj.type]
format_ = obj.format or "default"
return [
self.data_model_type.get_data_type(
json_schema_data_formats[t][format_],
**obj.dict() if not self.field_constraints else {},
)
for t in types
]
|
def get_data_type(self, obj: JsonSchemaObject) -> List[DataType]:
if obj.type is None:
raise ValueError(f"invalid schema object {obj}")
if isinstance(obj.type, list):
types: List[str] = [t for t in obj.type if t != "null"]
format_ = "default"
else:
types = [obj.type]
format_ = obj.format or "default"
return [
self.data_model_type.get_data_type(
json_schema_data_formats[t][format_],
**obj.dict() if not self.field_constraints else {},
)
for t in types
]
|
https://github.com/koxudaxi/datamodel-code-generator/issues/205
|
Traceback (most recent call last):
File "/home/docker/venv3.6/lib/python3.6/site-packages/datamodel_code_generator/__main__.py", line 230, in main
aliases=aliases,
File "/home/docker/venv3.6/lib/python3.6/site-packages/datamodel_code_generator/__init__.py", line 180, in generate
result = parser.parse()
File "/home/docker/venv3.6/lib/python3.6/site-packages/datamodel_code_generator/parser/base.py", line 339, in parse
self.parse_raw()
File "/home/docker/venv3.6/lib/python3.6/site-packages/datamodel_code_generator/parser/openapi.py", line 25, in parse_raw
self.parse_raw_obj(obj_name, raw_obj, ['#/components', 'schemas', obj_name])
File "/home/docker/venv3.6/lib/python3.6/site-packages/datamodel_code_generator/parser/jsonschema.py", line 628, in parse_raw_obj
self.parse_object(name, obj, path)
File "/home/docker/venv3.6/lib/python3.6/site-packages/datamodel_code_generator/parser/jsonschema.py", line 391, in parse_object
fields = self.parse_object_fields(obj, path)
File "/home/docker/venv3.6/lib/python3.6/site-packages/datamodel_code_generator/parser/jsonschema.py", line 359, in parse_object_fields
field_types = self.get_data_type(field)
File "/home/docker/venv3.6/lib/python3.6/site-packages/datamodel_code_generator/parser/jsonschema.py", line 169, in get_data_type
raise ValueError(f'invalid schema object {obj}')
ValueError: invalid schema object items=None uniqueItem=None type=None format=None pattern=None minLength=None maxLength=None minimum=None maximum=None multipleOf=None exclusiveMaximum=None exclusiveMinimum=None additionalProperties=None oneOf=[] anyOf=[] allOf=[] enum=[] writeOnly=None properties=None required=[] ref=None nullable=False x_enum_varnames=[] description=None title='Bar' example=None examples=None default=None
|
ValueError
|
def get_data_type(self, obj: JsonSchemaObject) -> List[DataType]:
if obj.type is None:
raise ValueError(f"invalid schema object {obj}")
if isinstance(obj.type, list):
types: List[str] = [t for t in obj.type if t != "null"]
format_ = "default"
else:
types = [obj.type]
format_ = obj.format or "default"
return [
self.data_model_type.get_data_type(
json_schema_data_formats[t][format_], **obj.dict()
)
for t in types
]
|
def get_data_type(self, obj: JsonSchemaObject) -> DataType:
format_ = obj.format or "default"
if obj.type is None:
raise ValueError(f"invalid schema object {obj}")
return self.data_model_type.get_data_type(
json_schema_data_formats[obj.type][format_], **obj.dict()
)
|
https://github.com/koxudaxi/datamodel-code-generator/issues/102
|
Traceback (most recent call last):
File "/proj/.venv/bin/datamodel-codegen", line 8, in <module>
sys.exit(main())
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/__main__.py", line 168, in main
result = parser.parse()
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/parser/base.py", line 164, in parse
self.parse_raw()
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/parser/jsonschema.py", line 472, in parse_raw
self.parse_raw_obj(obj_name, raw_obj)
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/parser/jsonschema.py", line 455, in parse_raw_obj
obj = JsonSchemaObject.parse_obj(raw)
File "/proj/.venv/lib/python3.7/site-packages/pydantic/main.py", line 402, in parse_obj
return cls(**obj)
File "/proj/.venv/lib/python3.7/site-packages/pydantic/main.py", line 283, in __init__
raise validation_error
pydantic.error_wrappers.ValidationError: 2 validation errors for JsonSchemaObject
properties -> apiVersion -> type
str type expected (type=type_error.str)
properties -> kind -> type
str type expected (type=type_error.str)
|
pydantic.error_wrappers.ValidationError
|
def parse_any_of(self, name: str, obj: JsonSchemaObject) -> List[DataType]:
any_of_data_types: List[DataType] = []
for any_of_item in obj.anyOf:
if any_of_item.ref: # $ref
any_of_data_types.append(
self.data_type(
type=any_of_item.ref_object_name,
ref=True,
version_compatible=True,
)
)
elif not any(v for k, v in vars(any_of_item).items() if k != "type"):
# trivial types
any_of_data_types.extend(self.get_data_type(any_of_item))
elif (
any_of_item.is_array
and isinstance(any_of_item.items, JsonSchemaObject)
and not any(v for k, v in vars(any_of_item.items).items() if k != "type")
):
# trivial item types
any_of_data_types.append(
self.data_type(
type=f"List[{', '.join([t.type_hint for t in self.get_data_type(any_of_item.items)])}]",
imports_=[Import(from_="typing", import_="List")],
)
)
else:
singular_name = get_singular_name(name)
self.parse_object(singular_name, any_of_item)
any_of_data_types.append(
self.data_type(type=singular_name, ref=True, version_compatible=True)
)
return any_of_data_types
|
def parse_any_of(self, name: str, obj: JsonSchemaObject) -> List[DataType]:
any_of_data_types: List[DataType] = []
for any_of_item in obj.anyOf:
if any_of_item.ref: # $ref
any_of_data_types.append(
self.data_type(
type=any_of_item.ref_object_name,
ref=True,
version_compatible=True,
)
)
elif not any(v for k, v in vars(any_of_item).items() if k != "type"):
# trivial types
any_of_data_types.append(self.get_data_type(any_of_item))
elif (
any_of_item.is_array
and isinstance(any_of_item.items, JsonSchemaObject)
and not any(v for k, v in vars(any_of_item.items).items() if k != "type")
):
# trivial item types
any_of_data_types.append(
self.data_type(
type=f"List[{self.get_data_type(any_of_item.items).type_hint}]",
imports_=[Import(from_="typing", import_="List")],
)
)
else:
singular_name = get_singular_name(name)
self.parse_object(singular_name, any_of_item)
any_of_data_types.append(
self.data_type(type=singular_name, ref=True, version_compatible=True)
)
return any_of_data_types
|
https://github.com/koxudaxi/datamodel-code-generator/issues/102
|
Traceback (most recent call last):
File "/proj/.venv/bin/datamodel-codegen", line 8, in <module>
sys.exit(main())
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/__main__.py", line 168, in main
result = parser.parse()
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/parser/base.py", line 164, in parse
self.parse_raw()
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/parser/jsonschema.py", line 472, in parse_raw
self.parse_raw_obj(obj_name, raw_obj)
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/parser/jsonschema.py", line 455, in parse_raw_obj
obj = JsonSchemaObject.parse_obj(raw)
File "/proj/.venv/lib/python3.7/site-packages/pydantic/main.py", line 402, in parse_obj
return cls(**obj)
File "/proj/.venv/lib/python3.7/site-packages/pydantic/main.py", line 283, in __init__
raise validation_error
pydantic.error_wrappers.ValidationError: 2 validation errors for JsonSchemaObject
properties -> apiVersion -> type
str type expected (type=type_error.str)
properties -> kind -> type
str type expected (type=type_error.str)
|
pydantic.error_wrappers.ValidationError
|
def parse_object_fields(self, obj: JsonSchemaObject) -> List[DataModelField]:
properties: Dict[str, JsonSchemaObject] = (
obj.properties if obj.properties is not None else {}
)
requires: Set[str] = {*obj.required} if obj.required is not None else {*()}
fields: List[DataModelField] = []
for field_name, field in properties.items(): # type: ignore
is_list = False
field_types: List[DataType]
if field.ref:
field_types = [
self.data_type(
type=field.ref_object_name, ref=True, version_compatible=True
)
]
elif field.is_array:
class_name = self.get_class_name(field_name)
array_fields, array_field_classes = self.parse_array_fields(
class_name, field
)
field_types = array_fields[0].data_types
is_list = True
elif field.anyOf:
field_types = self.parse_any_of(field_name, field)
elif field.allOf:
field_types = self.parse_all_of(field_name, field)
elif field.is_object:
if field.properties:
class_name = self.get_class_name(field_name)
self.parse_object(class_name, field)
field_types = [
self.data_type(type=class_name, ref=True, version_compatible=True)
]
else:
field_types = [
self.data_type(
type="Dict[str, Any]",
imports_=[
Import(from_="typing", import_="Any"),
Import(from_="typing", import_="Dict"),
],
)
]
elif field.enum:
enum = self.parse_enum(field_name, field)
field_types = [
self.data_type(type=enum.name, ref=True, version_compatible=True)
]
else:
field_types = self.get_data_type(field)
required: bool = field_name in requires
fields.append(
self.data_model_field_type(
name=field_name,
example=field.examples,
description=field.description,
default=field.default,
title=field.title,
data_types=field_types,
required=required,
is_list=is_list,
)
)
return fields
|
def parse_object_fields(self, obj: JsonSchemaObject) -> List[DataModelField]:
properties: Dict[str, JsonSchemaObject] = (
obj.properties if obj.properties is not None else {}
)
requires: Set[str] = {*obj.required} if obj.required is not None else {*()}
fields: List[DataModelField] = []
for field_name, field in properties.items(): # type: ignore
is_list = False
field_types: List[DataType]
if field.ref:
field_types = [
self.data_type(
type=field.ref_object_name, ref=True, version_compatible=True
)
]
elif field.is_array:
class_name = self.get_class_name(field_name)
array_fields, array_field_classes = self.parse_array_fields(
class_name, field
)
field_types = array_fields[0].data_types
is_list = True
elif field.anyOf:
field_types = self.parse_any_of(field_name, field)
elif field.allOf:
field_types = self.parse_all_of(field_name, field)
elif field.is_object:
if field.properties:
class_name = self.get_class_name(field_name)
self.parse_object(class_name, field)
field_types = [
self.data_type(type=class_name, ref=True, version_compatible=True)
]
else:
field_types = [
self.data_type(
type="Dict[str, Any]",
imports_=[
Import(from_="typing", import_="Any"),
Import(from_="typing", import_="Dict"),
],
)
]
elif field.enum:
enum = self.parse_enum(field_name, field)
field_types = [
self.data_type(type=enum.name, ref=True, version_compatible=True)
]
else:
data_type = self.get_data_type(field)
field_types = [data_type]
required: bool = field_name in requires
fields.append(
self.data_model_field_type(
name=field_name,
example=field.examples,
description=field.description,
default=field.default,
title=field.title,
data_types=field_types,
required=required,
is_list=is_list,
)
)
return fields
|
https://github.com/koxudaxi/datamodel-code-generator/issues/102
|
Traceback (most recent call last):
File "/proj/.venv/bin/datamodel-codegen", line 8, in <module>
sys.exit(main())
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/__main__.py", line 168, in main
result = parser.parse()
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/parser/base.py", line 164, in parse
self.parse_raw()
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/parser/jsonschema.py", line 472, in parse_raw
self.parse_raw_obj(obj_name, raw_obj)
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/parser/jsonschema.py", line 455, in parse_raw_obj
obj = JsonSchemaObject.parse_obj(raw)
File "/proj/.venv/lib/python3.7/site-packages/pydantic/main.py", line 402, in parse_obj
return cls(**obj)
File "/proj/.venv/lib/python3.7/site-packages/pydantic/main.py", line 283, in __init__
raise validation_error
pydantic.error_wrappers.ValidationError: 2 validation errors for JsonSchemaObject
properties -> apiVersion -> type
str type expected (type=type_error.str)
properties -> kind -> type
str type expected (type=type_error.str)
|
pydantic.error_wrappers.ValidationError
|
def parse_array_fields(
self, name: str, obj: JsonSchemaObject
) -> Tuple[List[DataModelField], List[DataType]]:
if isinstance(obj.items, JsonSchemaObject):
items: List[JsonSchemaObject] = [obj.items]
else:
items = obj.items # type: ignore
item_obj_data_types: List[DataType] = []
is_union: bool = False
for item in items:
if item.ref:
item_obj_data_types.append(
self.data_type(
type=item.ref_object_name, ref=True, version_compatible=True
)
)
elif isinstance(item, JsonSchemaObject) and item.properties:
singular_name = get_singular_name(name)
self.parse_object(singular_name, item)
item_obj_data_types.append(
self.data_type(type=singular_name, ref=True, version_compatible=True)
)
elif item.anyOf:
item_obj_data_types.extend(self.parse_any_of(name, item))
is_union = True
elif item.allOf:
singular_name = get_singular_name(name)
item_obj_data_types.extend(self.parse_all_of(singular_name, item))
else:
item_obj_data_types.extend(self.get_data_type(item))
field = self.data_model_field_type(
data_types=item_obj_data_types,
example=obj.examples,
default=obj.default,
description=obj.default,
title=obj.title,
required=True,
is_list=True,
is_union=is_union,
)
return [field], item_obj_data_types
|
def parse_array_fields(
self, name: str, obj: JsonSchemaObject
) -> Tuple[List[DataModelField], List[DataType]]:
if isinstance(obj.items, JsonSchemaObject):
items: List[JsonSchemaObject] = [obj.items]
else:
items = obj.items # type: ignore
item_obj_data_types: List[DataType] = []
is_union: bool = False
for item in items:
if item.ref:
item_obj_data_types.append(
self.data_type(
type=item.ref_object_name, ref=True, version_compatible=True
)
)
elif isinstance(item, JsonSchemaObject) and item.properties:
singular_name = get_singular_name(name)
self.parse_object(singular_name, item)
item_obj_data_types.append(
self.data_type(type=singular_name, ref=True, version_compatible=True)
)
elif item.anyOf:
item_obj_data_types.extend(self.parse_any_of(name, item))
is_union = True
elif item.allOf:
singular_name = get_singular_name(name)
item_obj_data_types.extend(self.parse_all_of(singular_name, item))
else:
item_obj_data_types.append(self.get_data_type(item))
field = self.data_model_field_type(
data_types=item_obj_data_types,
example=obj.examples,
default=obj.default,
description=obj.default,
title=obj.title,
required=True,
is_list=True,
is_union=is_union,
)
return [field], item_obj_data_types
|
https://github.com/koxudaxi/datamodel-code-generator/issues/102
|
Traceback (most recent call last):
File "/proj/.venv/bin/datamodel-codegen", line 8, in <module>
sys.exit(main())
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/__main__.py", line 168, in main
result = parser.parse()
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/parser/base.py", line 164, in parse
self.parse_raw()
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/parser/jsonschema.py", line 472, in parse_raw
self.parse_raw_obj(obj_name, raw_obj)
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/parser/jsonschema.py", line 455, in parse_raw_obj
obj = JsonSchemaObject.parse_obj(raw)
File "/proj/.venv/lib/python3.7/site-packages/pydantic/main.py", line 402, in parse_obj
return cls(**obj)
File "/proj/.venv/lib/python3.7/site-packages/pydantic/main.py", line 283, in __init__
raise validation_error
pydantic.error_wrappers.ValidationError: 2 validation errors for JsonSchemaObject
properties -> apiVersion -> type
str type expected (type=type_error.str)
properties -> kind -> type
str type expected (type=type_error.str)
|
pydantic.error_wrappers.ValidationError
|
def parse_root_type(self, name: str, obj: JsonSchemaObject) -> None:
if obj.type:
types: List[DataType] = self.get_data_type(obj)
elif obj.anyOf:
types = self.parse_any_of(name, obj)
else:
types = [
self.data_type(type=obj.ref_object_name, ref=True, version_compatible=True)
]
data_model_root_type = self.data_model_root_type(
name,
[
self.data_model_field_type(
data_types=types,
description=obj.description,
example=obj.examples,
default=obj.default,
required=not obj.nullable,
)
],
custom_base_class=self.base_class,
custom_template_dir=self.custom_template_dir,
extra_template_data=self.extra_template_data,
)
self.append_result(data_model_root_type)
|
def parse_root_type(self, name: str, obj: JsonSchemaObject) -> None:
if obj.type:
types: List[DataType] = [self.get_data_type(obj)]
elif obj.anyOf:
types = self.parse_any_of(name, obj)
else:
types = [
self.data_type(type=obj.ref_object_name, ref=True, version_compatible=True)
]
data_model_root_type = self.data_model_root_type(
name,
[
self.data_model_field_type(
data_types=types,
description=obj.description,
example=obj.examples,
default=obj.default,
required=not obj.nullable,
)
],
custom_base_class=self.base_class,
custom_template_dir=self.custom_template_dir,
extra_template_data=self.extra_template_data,
)
self.append_result(data_model_root_type)
|
https://github.com/koxudaxi/datamodel-code-generator/issues/102
|
Traceback (most recent call last):
File "/proj/.venv/bin/datamodel-codegen", line 8, in <module>
sys.exit(main())
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/__main__.py", line 168, in main
result = parser.parse()
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/parser/base.py", line 164, in parse
self.parse_raw()
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/parser/jsonschema.py", line 472, in parse_raw
self.parse_raw_obj(obj_name, raw_obj)
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/parser/jsonschema.py", line 455, in parse_raw_obj
obj = JsonSchemaObject.parse_obj(raw)
File "/proj/.venv/lib/python3.7/site-packages/pydantic/main.py", line 402, in parse_obj
return cls(**obj)
File "/proj/.venv/lib/python3.7/site-packages/pydantic/main.py", line 283, in __init__
raise validation_error
pydantic.error_wrappers.ValidationError: 2 validation errors for JsonSchemaObject
properties -> apiVersion -> type
str type expected (type=type_error.str)
properties -> kind -> type
str type expected (type=type_error.str)
|
pydantic.error_wrappers.ValidationError
|
def create_line(cls, from_: Optional[str], imports: Set[str]) -> str:
if from_:
line = f"from {from_} "
line += f"import {', '.join(sorted(imports))}"
return line
return "\n".join(f"import {i}\n" for i in sorted(imports))
|
def create_line(cls, from_: Optional[str], imports: Set[str]) -> str:
line: str = ""
if from_: # pragma: no cover
line = f"from {from_} "
line += f"import {', '.join(sorted(imports))}"
return line
|
https://github.com/koxudaxi/datamodel-code-generator/issues/102
|
Traceback (most recent call last):
File "/proj/.venv/bin/datamodel-codegen", line 8, in <module>
sys.exit(main())
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/__main__.py", line 168, in main
result = parser.parse()
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/parser/base.py", line 164, in parse
self.parse_raw()
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/parser/jsonschema.py", line 472, in parse_raw
self.parse_raw_obj(obj_name, raw_obj)
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/parser/jsonschema.py", line 455, in parse_raw_obj
obj = JsonSchemaObject.parse_obj(raw)
File "/proj/.venv/lib/python3.7/site-packages/pydantic/main.py", line 402, in parse_obj
return cls(**obj)
File "/proj/.venv/lib/python3.7/site-packages/pydantic/main.py", line 283, in __init__
raise validation_error
pydantic.error_wrappers.ValidationError: 2 validation errors for JsonSchemaObject
properties -> apiVersion -> type
str type expected (type=type_error.str)
properties -> kind -> type
str type expected (type=type_error.str)
|
pydantic.error_wrappers.ValidationError
|
def append(self, imports: Union[Import, List[Import], None]) -> None:
if imports:
if isinstance(imports, Import):
imports = [imports]
for import_ in imports:
if import_.import_.count(".") >= 1:
self[None].add(import_.import_)
else:
self[import_.from_].add(import_.import_)
|
def append(self, imports: Union[Import, List[Import], None]) -> None:
if imports:
if isinstance(imports, Import):
imports = [imports]
for import_ in imports:
self[import_.from_].add(import_.import_)
|
https://github.com/koxudaxi/datamodel-code-generator/issues/102
|
Traceback (most recent call last):
File "/proj/.venv/bin/datamodel-codegen", line 8, in <module>
sys.exit(main())
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/__main__.py", line 168, in main
result = parser.parse()
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/parser/base.py", line 164, in parse
self.parse_raw()
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/parser/jsonschema.py", line 472, in parse_raw
self.parse_raw_obj(obj_name, raw_obj)
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/parser/jsonschema.py", line 455, in parse_raw_obj
obj = JsonSchemaObject.parse_obj(raw)
File "/proj/.venv/lib/python3.7/site-packages/pydantic/main.py", line 402, in parse_obj
return cls(**obj)
File "/proj/.venv/lib/python3.7/site-packages/pydantic/main.py", line 283, in __init__
raise validation_error
pydantic.error_wrappers.ValidationError: 2 validation errors for JsonSchemaObject
properties -> apiVersion -> type
str type expected (type=type_error.str)
properties -> kind -> type
str type expected (type=type_error.str)
|
pydantic.error_wrappers.ValidationError
|
def sort_data_models(
unsorted_data_models: List[DataModel],
sorted_data_models: Optional[SortedDataModels] = None,
require_update_action_models: Optional[List[str]] = None,
) -> Tuple[List[DataModel], SortedDataModels, List[str]]:
if sorted_data_models is None:
sorted_data_models = OrderedDict()
if require_update_action_models is None:
require_update_action_models = []
unresolved_references: List[DataModel] = []
for model in unsorted_data_models:
if not model.reference_classes:
sorted_data_models[model.name] = model
elif (
model.name in model.reference_classes and len(model.reference_classes) == 1
): # only self-referencing
sorted_data_models[model.name] = model
require_update_action_models.append(model.name)
elif (
not set(model.reference_classes) - {model.name} - set(sorted_data_models)
): # reference classes have been resolved
sorted_data_models[model.name] = model
if model.name in model.reference_classes:
require_update_action_models.append(model.name)
else:
unresolved_references.append(model)
if unresolved_references:
try:
return sort_data_models(
unresolved_references, sorted_data_models, require_update_action_models
)
except RecursionError:
unresolved_classes = ", ".join(
f"[class: {item.name} references: {item.reference_classes}]"
for item in unresolved_references
)
raise Exception(f"A Parser can not resolve classes: {unresolved_classes}.")
return unresolved_references, sorted_data_models, require_update_action_models
|
def sort_data_models(
unsorted_data_models: List[DataModel],
sorted_data_models: Optional[SortedDataModels] = None,
require_update_action_models: Optional[List[str]] = None,
) -> Tuple[List[DataModel], SortedDataModels, List[str]]:
if sorted_data_models is None:
sorted_data_models = OrderedDict()
if require_update_action_models is None:
require_update_action_models = []
unresolved_references: List[DataModel] = []
for model in unsorted_data_models:
if not model.reference_classes:
sorted_data_models[model.name] = model
elif (
model.name in model.reference_classes and len(model.reference_classes) == 1
): # only self-referencing
sorted_data_models[model.name] = model
require_update_action_models.append(model.name)
elif (
not set(model.reference_classes) - set(model.name) - set(sorted_data_models)
): # reference classes have been resolved
sorted_data_models[model.name] = model
if model.name in model.reference_classes:
require_update_action_models.append(model.name)
else:
unresolved_references.append(model)
if unresolved_references:
try:
return sort_data_models(
unresolved_references, sorted_data_models, require_update_action_models
)
except RecursionError:
unresolved_classes = ", ".join(
f"[class: {item.name} references: {item.reference_classes}]"
for item in unresolved_references
)
raise Exception(f"A Parser can not resolve classes: {unresolved_classes}.")
return unresolved_references, sorted_data_models, require_update_action_models
|
https://github.com/koxudaxi/datamodel-code-generator/issues/102
|
Traceback (most recent call last):
File "/proj/.venv/bin/datamodel-codegen", line 8, in <module>
sys.exit(main())
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/__main__.py", line 168, in main
result = parser.parse()
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/parser/base.py", line 164, in parse
self.parse_raw()
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/parser/jsonschema.py", line 472, in parse_raw
self.parse_raw_obj(obj_name, raw_obj)
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/parser/jsonschema.py", line 455, in parse_raw_obj
obj = JsonSchemaObject.parse_obj(raw)
File "/proj/.venv/lib/python3.7/site-packages/pydantic/main.py", line 402, in parse_obj
return cls(**obj)
File "/proj/.venv/lib/python3.7/site-packages/pydantic/main.py", line 283, in __init__
raise validation_error
pydantic.error_wrappers.ValidationError: 2 validation errors for JsonSchemaObject
properties -> apiVersion -> type
str type expected (type=type_error.str)
properties -> kind -> type
str type expected (type=type_error.str)
|
pydantic.error_wrappers.ValidationError
|
def get_model_by_path(schema: Dict[str, Any], keys: List[str]) -> Dict:
if len(keys) == 0:
return schema
elif len(keys) == 1:
return schema[keys[0]]
return get_model_by_path(schema[keys[0]], keys[1:])
|
def get_model_by_path(schema: Dict[str, Any], keys: List[str]) -> Dict:
if len(keys) == 1:
return schema[keys[0]]
return get_model_by_path(schema[keys[0]], keys[1:])
|
https://github.com/koxudaxi/datamodel-code-generator/issues/102
|
Traceback (most recent call last):
File "/proj/.venv/bin/datamodel-codegen", line 8, in <module>
sys.exit(main())
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/__main__.py", line 168, in main
result = parser.parse()
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/parser/base.py", line 164, in parse
self.parse_raw()
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/parser/jsonschema.py", line 472, in parse_raw
self.parse_raw_obj(obj_name, raw_obj)
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/parser/jsonschema.py", line 455, in parse_raw_obj
obj = JsonSchemaObject.parse_obj(raw)
File "/proj/.venv/lib/python3.7/site-packages/pydantic/main.py", line 402, in parse_obj
return cls(**obj)
File "/proj/.venv/lib/python3.7/site-packages/pydantic/main.py", line 283, in __init__
raise validation_error
pydantic.error_wrappers.ValidationError: 2 validation errors for JsonSchemaObject
properties -> apiVersion -> type
str type expected (type=type_error.str)
properties -> kind -> type
str type expected (type=type_error.str)
|
pydantic.error_wrappers.ValidationError
|
def parse_ref(self, obj: JsonSchemaObject) -> None:
if obj.ref:
ref: str = obj.ref
# https://swagger.io/docs/specification/using-ref/
if obj.ref.startswith("#"):
# Local Reference – $ref: '#/definitions/myElement'
pass
elif "://" in ref:
# URL Reference – $ref: 'http://path/to/your/resource' Uses the whole document located on the different server.
raise NotImplementedError(f"URL Reference is not supported. $ref:{ref}")
else:
# Remote Reference – $ref: 'document.json' Uses the whole document located on the same server and in the same location.
# TODO treat edge case
relative_path, object_path = ref.split("#/")
full_path = self.base_path / relative_path
with full_path.open() as f:
if full_path.suffix.lower() == ".json":
import json
ref_body: Dict[str, Any] = json.load(f)
else:
# expect yaml
import yaml
ref_body = yaml.safe_load(f)
object_parents = object_path.split("/")
ref_path = str(full_path) + "#/" + "/".join(object_parents[:-1])
if ref_path not in self.excludes_ref_path:
self.excludes_ref_path.add(ref_path)
models = get_model_by_path(ref_body, object_parents[:-1])
for model_name, model in models.items():
self.parse_raw_obj(model_name, model)
if obj.items:
if isinstance(obj.items, JsonSchemaObject):
self.parse_ref(obj.items)
else:
for item in obj.items:
self.parse_ref(item)
if isinstance(obj.additionalProperties, JsonSchemaObject):
self.parse_ref(obj.additionalProperties)
for item in obj.anyOf:
self.parse_ref(item)
for item in obj.allOf:
self.parse_ref(item)
if obj.properties:
for value in obj.properties.values():
self.parse_ref(value)
|
def parse_ref(self, obj: JsonSchemaObject) -> None:
if obj.ref:
ref: str = obj.ref
# https://swagger.io/docs/specification/using-ref/
if obj.ref.startswith("#"):
# Local Reference – $ref: '#/definitions/myElement'
pass
elif "://" in ref:
# URL Reference – $ref: 'http://path/to/your/resource' Uses the whole document located on the different server.
raise NotImplementedError(f"URL Reference is not supported. $ref:{ref}")
else:
# Remote Reference – $ref: 'document.json' Uses the whole document located on the same server and in the same location.
# TODO treat edge case
relative_path, object_path = ref.split("#/")
full_path = self.base_path / relative_path
with full_path.open() as f:
if full_path.suffix.lower() == ".json":
import json
ref_body: Dict[str, Any] = json.load(f)
else:
# expect yaml
import yaml
ref_body = yaml.safe_load(f)
object_parents = object_path.split("/")
ref_path = str(full_path) + "#/" + object_path
if ref_path not in self.excludes_ref_path:
self.excludes_ref_path.add(str(full_path) + "#/" + object_path)
model = get_model_by_path(ref_body, object_parents)
self.parse_raw_obj(object_parents[-1], model)
if obj.items:
if isinstance(obj.items, JsonSchemaObject):
self.parse_ref(obj.items)
else:
for item in obj.items:
self.parse_ref(item)
if isinstance(obj.additionalProperties, JsonSchemaObject):
self.parse_ref(obj.additionalProperties)
for item in obj.anyOf:
self.parse_ref(item)
for item in obj.allOf:
self.parse_ref(item)
if obj.properties:
for value in obj.properties.values():
self.parse_ref(value)
|
https://github.com/koxudaxi/datamodel-code-generator/issues/102
|
Traceback (most recent call last):
File "/proj/.venv/bin/datamodel-codegen", line 8, in <module>
sys.exit(main())
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/__main__.py", line 168, in main
result = parser.parse()
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/parser/base.py", line 164, in parse
self.parse_raw()
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/parser/jsonschema.py", line 472, in parse_raw
self.parse_raw_obj(obj_name, raw_obj)
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/parser/jsonschema.py", line 455, in parse_raw_obj
obj = JsonSchemaObject.parse_obj(raw)
File "/proj/.venv/lib/python3.7/site-packages/pydantic/main.py", line 402, in parse_obj
return cls(**obj)
File "/proj/.venv/lib/python3.7/site-packages/pydantic/main.py", line 283, in __init__
raise validation_error
pydantic.error_wrappers.ValidationError: 2 validation errors for JsonSchemaObject
properties -> apiVersion -> type
str type expected (type=type_error.str)
properties -> kind -> type
str type expected (type=type_error.str)
|
pydantic.error_wrappers.ValidationError
|
def parse_raw(self) -> None:
raw_obj: Dict[str, Any] = json.loads(self.text) # type: ignore
obj_name = raw_obj.get("title", "Model")
self.parse_raw_obj(obj_name, raw_obj)
definitions = raw_obj.get("definitions", {})
for key, model in definitions.items():
self.parse_raw_obj(key, model)
|
def parse_raw(self) -> None:
raw_obj: Dict[str, Any] = json.loads(self.text) # type: ignore
obj_name = raw_obj.get("title", "Model")
self.parse_raw_obj(obj_name, raw_obj)
|
https://github.com/koxudaxi/datamodel-code-generator/issues/102
|
Traceback (most recent call last):
File "/proj/.venv/bin/datamodel-codegen", line 8, in <module>
sys.exit(main())
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/__main__.py", line 168, in main
result = parser.parse()
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/parser/base.py", line 164, in parse
self.parse_raw()
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/parser/jsonschema.py", line 472, in parse_raw
self.parse_raw_obj(obj_name, raw_obj)
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/parser/jsonschema.py", line 455, in parse_raw_obj
obj = JsonSchemaObject.parse_obj(raw)
File "/proj/.venv/lib/python3.7/site-packages/pydantic/main.py", line 402, in parse_obj
return cls(**obj)
File "/proj/.venv/lib/python3.7/site-packages/pydantic/main.py", line 283, in __init__
raise validation_error
pydantic.error_wrappers.ValidationError: 2 validation errors for JsonSchemaObject
properties -> apiVersion -> type
str type expected (type=type_error.str)
properties -> kind -> type
str type expected (type=type_error.str)
|
pydantic.error_wrappers.ValidationError
|
def parse_root_type(self, name: str, obj: JsonSchemaObject) -> None:
if obj.type:
types: List[DataType] = self.get_data_type(obj)
elif obj.anyOf:
types = self.parse_any_of(name, obj)
elif obj.ref:
types = [
self.data_type(type=obj.ref_object_name, ref=True, version_compatible=True)
]
else:
types = [
self.data_type(type="Any", version_compatible=True, imports_=[IMPORT_ANY])
]
data_model_root_type = self.data_model_root_type(
name,
[
self.data_model_field_type(
data_types=types,
description=obj.description,
example=obj.examples,
default=obj.default,
required=not obj.nullable,
)
],
custom_base_class=self.base_class,
custom_template_dir=self.custom_template_dir,
extra_template_data=self.extra_template_data,
)
self.append_result(data_model_root_type)
|
def parse_root_type(self, name: str, obj: JsonSchemaObject) -> None:
if obj.type:
types: List[DataType] = self.get_data_type(obj)
elif obj.anyOf:
types = self.parse_any_of(name, obj)
else:
types = [
self.data_type(type=obj.ref_object_name, ref=True, version_compatible=True)
]
data_model_root_type = self.data_model_root_type(
name,
[
self.data_model_field_type(
data_types=types,
description=obj.description,
example=obj.examples,
default=obj.default,
required=not obj.nullable,
)
],
custom_base_class=self.base_class,
custom_template_dir=self.custom_template_dir,
extra_template_data=self.extra_template_data,
)
self.append_result(data_model_root_type)
|
https://github.com/koxudaxi/datamodel-code-generator/issues/102
|
Traceback (most recent call last):
File "/proj/.venv/bin/datamodel-codegen", line 8, in <module>
sys.exit(main())
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/__main__.py", line 168, in main
result = parser.parse()
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/parser/base.py", line 164, in parse
self.parse_raw()
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/parser/jsonschema.py", line 472, in parse_raw
self.parse_raw_obj(obj_name, raw_obj)
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/parser/jsonschema.py", line 455, in parse_raw_obj
obj = JsonSchemaObject.parse_obj(raw)
File "/proj/.venv/lib/python3.7/site-packages/pydantic/main.py", line 402, in parse_obj
return cls(**obj)
File "/proj/.venv/lib/python3.7/site-packages/pydantic/main.py", line 283, in __init__
raise validation_error
pydantic.error_wrappers.ValidationError: 2 validation errors for JsonSchemaObject
properties -> apiVersion -> type
str type expected (type=type_error.str)
properties -> kind -> type
str type expected (type=type_error.str)
|
pydantic.error_wrappers.ValidationError
|
def main(args: Optional[Sequence[str]] = None) -> Exit:
"""Main function."""
# add cli completion support
argcomplete.autocomplete(arg_parser)
if args is None:
args = sys.argv[1:]
namespace: Namespace = arg_parser.parse_args(args)
if namespace.version: # pragma: no cover
from datamodel_code_generator.version import version
print(version)
exit(0)
if namespace.debug: # pragma: no cover
enable_debug_message()
extra_template_data: Optional[DefaultDict[str, Dict]]
if namespace.extra_template_data is not None:
with namespace.extra_template_data as data:
extra_template_data = json.load(
data, object_hook=lambda d: defaultdict(dict, **d)
)
else:
extra_template_data = None
text: str = namespace.input.read()
input_file_type: str = namespace.input_file_type
if input_file_type == "auto":
try:
input_file_type = "openapi" if is_openapi(text) else "jsonschema"
except:
print("Invalid file format")
return Exit.ERROR
if input_file_type == "openapi":
from datamodel_code_generator.parser.openapi import OpenAPIParser
parser_class: Type[Parser] = OpenAPIParser
else:
from datamodel_code_generator.parser.jsonschema import JsonSchemaParser
parser_class = JsonSchemaParser
parser = parser_class(
BaseModel,
CustomRootType,
DataModelField,
base_class=namespace.base_class,
custom_template_dir=namespace.custom_template_dir,
extra_template_data=extra_template_data,
target_python_version=PythonVersion(namespace.target_python_version),
text=text,
dump_resolve_reference_action=dump_resolve_reference_action,
validation=namespace.validation,
)
output = Path(namespace.output) if namespace.output is not None else None
with chdir(output):
result = parser.parse()
if isinstance(result, str):
modules = {output: result}
else:
if output is None:
print("Modular references require an output directory")
return Exit.ERROR
if output.suffix:
print("Modular references require an output directory, not a file")
return Exit.ERROR
modules = {
output.joinpath(*name): body for name, body in sorted(result.items())
}
timestamp = datetime.now(timezone.utc).replace(microsecond=0).isoformat()
header = f"""\
# generated by datamodel-codegen:
# filename: {Path(namespace.input.name).name}
# timestamp: {timestamp}"""
file: Optional[IO[Any]]
for path, body in modules.items():
if path is not None:
if not path.parent.exists():
path.parent.mkdir(parents=True)
file = path.open("wt")
else:
file = None
print(header, file=file)
if body:
print("", file=file)
print(body.rstrip(), file=file)
if file is not None:
file.close()
return Exit.OK
|
def main(args: Optional[Sequence[str]] = None) -> Exit:
"""Main function."""
# add cli completion support
argcomplete.autocomplete(arg_parser)
if args is None:
args = sys.argv[1:]
namespace: Namespace = arg_parser.parse_args(args)
if namespace.version: # pragma: no cover
from datamodel_code_generator.version import version
print(version)
exit(0)
if namespace.debug: # pragma: no cover
enable_debug_message()
extra_template_data: Optional[DefaultDict[str, Dict]]
if namespace.extra_template_data is not None:
with namespace.extra_template_data as data:
extra_template_data = json.load(
data, object_hook=lambda d: defaultdict(dict, **d)
)
else:
extra_template_data = None
text: str = namespace.input.read()
input_file_type: str = namespace.input_file_type
if input_file_type == "auto":
try:
input_file_type = "openapi" if is_openapi(text) else "jsonschema"
except:
print("Invalid file format")
return Exit.ERROR
if input_file_type == "openapi":
from datamodel_code_generator.parser.openapi import OpenAPIParser
parser_class: Type[Parser] = OpenAPIParser
else:
from datamodel_code_generator.parser.jsonschema import JsonSchemaParser
parser_class = JsonSchemaParser
parser = parser_class(
BaseModel,
CustomRootType,
DataModelField,
base_class=namespace.base_class,
custom_template_dir=namespace.custom_template_dir,
extra_template_data=extra_template_data,
target_python_version=PythonVersion(namespace.target_python_version),
text=text,
dump_resolve_reference_action=dump_resolve_reference_action,
validation=namespace.validation,
)
output = Path(namespace.output) if namespace.output is not None else None
with chdir(output):
result = parser.parse()
if isinstance(result, str):
modules = {output: result}
else:
if output is None:
print("Modular references require an output directory")
return Exit.ERROR
if output.suffix:
print("Modular references require an output directory, not a file")
return Exit.ERROR
modules = {
output.joinpath(*name): body for name, body in sorted(result.items())
}
timestamp = datetime.now(timezone.utc).replace(microsecond=0).isoformat()
header = f"""\
# generated by datamodel-codegen:
# filename: {Path(namespace.input.name).name}
# timestamp: {timestamp}"""
file: Optional[IO[Any]]
for path, body in modules.items():
if path is not None:
if not path.parent.exists():
path.parent.mkdir()
file = path.open("wt")
else:
file = None
print(header, file=file)
if body:
print("", file=file)
print(body.rstrip(), file=file)
if file is not None:
file.close()
return Exit.OK
|
https://github.com/koxudaxi/datamodel-code-generator/issues/102
|
Traceback (most recent call last):
File "/proj/.venv/bin/datamodel-codegen", line 8, in <module>
sys.exit(main())
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/__main__.py", line 168, in main
result = parser.parse()
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/parser/base.py", line 164, in parse
self.parse_raw()
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/parser/jsonschema.py", line 472, in parse_raw
self.parse_raw_obj(obj_name, raw_obj)
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/parser/jsonschema.py", line 455, in parse_raw_obj
obj = JsonSchemaObject.parse_obj(raw)
File "/proj/.venv/lib/python3.7/site-packages/pydantic/main.py", line 402, in parse_obj
return cls(**obj)
File "/proj/.venv/lib/python3.7/site-packages/pydantic/main.py", line 283, in __init__
raise validation_error
pydantic.error_wrappers.ValidationError: 2 validation errors for JsonSchemaObject
properties -> apiVersion -> type
str type expected (type=type_error.str)
properties -> kind -> type
str type expected (type=type_error.str)
|
pydantic.error_wrappers.ValidationError
|
def __init__(self) -> None:
super().__init__(set)
self.alias: DefaultDict[Optional[str], Dict[str, str]] = defaultdict(dict)
|
def __init__(self) -> None:
super().__init__(set)
|
https://github.com/koxudaxi/datamodel-code-generator/issues/102
|
Traceback (most recent call last):
File "/proj/.venv/bin/datamodel-codegen", line 8, in <module>
sys.exit(main())
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/__main__.py", line 168, in main
result = parser.parse()
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/parser/base.py", line 164, in parse
self.parse_raw()
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/parser/jsonschema.py", line 472, in parse_raw
self.parse_raw_obj(obj_name, raw_obj)
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/parser/jsonschema.py", line 455, in parse_raw_obj
obj = JsonSchemaObject.parse_obj(raw)
File "/proj/.venv/lib/python3.7/site-packages/pydantic/main.py", line 402, in parse_obj
return cls(**obj)
File "/proj/.venv/lib/python3.7/site-packages/pydantic/main.py", line 283, in __init__
raise validation_error
pydantic.error_wrappers.ValidationError: 2 validation errors for JsonSchemaObject
properties -> apiVersion -> type
str type expected (type=type_error.str)
properties -> kind -> type
str type expected (type=type_error.str)
|
pydantic.error_wrappers.ValidationError
|
def create_line(self, from_: Optional[str], imports: Set[str]) -> str:
if from_:
line = f"from {from_} "
line += f"import {', '.join(self._set_alias(from_, imports))}"
return line
return "\n".join(f"import {i}" for i in self._set_alias(from_, imports))
|
def create_line(cls, from_: Optional[str], imports: Set[str]) -> str:
if from_:
line = f"from {from_} "
line += f"import {', '.join(sorted(imports))}"
return line
return "\n".join(f"import {i}" for i in sorted(imports))
|
https://github.com/koxudaxi/datamodel-code-generator/issues/102
|
Traceback (most recent call last):
File "/proj/.venv/bin/datamodel-codegen", line 8, in <module>
sys.exit(main())
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/__main__.py", line 168, in main
result = parser.parse()
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/parser/base.py", line 164, in parse
self.parse_raw()
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/parser/jsonschema.py", line 472, in parse_raw
self.parse_raw_obj(obj_name, raw_obj)
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/parser/jsonschema.py", line 455, in parse_raw_obj
obj = JsonSchemaObject.parse_obj(raw)
File "/proj/.venv/lib/python3.7/site-packages/pydantic/main.py", line 402, in parse_obj
return cls(**obj)
File "/proj/.venv/lib/python3.7/site-packages/pydantic/main.py", line 283, in __init__
raise validation_error
pydantic.error_wrappers.ValidationError: 2 validation errors for JsonSchemaObject
properties -> apiVersion -> type
str type expected (type=type_error.str)
properties -> kind -> type
str type expected (type=type_error.str)
|
pydantic.error_wrappers.ValidationError
|
def append(self, imports: Union[Import, List[Import], None]) -> None:
if imports:
if isinstance(imports, Import):
imports = [imports]
for import_ in imports:
if import_.import_.count(".") >= 1:
self[None].add(import_.import_)
else:
self[import_.from_].add(import_.import_)
if import_.alias:
self.alias[import_.from_][import_.import_] = import_.alias
|
def append(self, imports: Union[Import, List[Import], None]) -> None:
if imports:
if isinstance(imports, Import):
imports = [imports]
for import_ in imports:
if import_.import_.count(".") >= 1:
self[None].add(import_.import_)
else:
self[import_.from_].add(import_.import_)
|
https://github.com/koxudaxi/datamodel-code-generator/issues/102
|
Traceback (most recent call last):
File "/proj/.venv/bin/datamodel-codegen", line 8, in <module>
sys.exit(main())
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/__main__.py", line 168, in main
result = parser.parse()
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/parser/base.py", line 164, in parse
self.parse_raw()
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/parser/jsonschema.py", line 472, in parse_raw
self.parse_raw_obj(obj_name, raw_obj)
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/parser/jsonschema.py", line 455, in parse_raw_obj
obj = JsonSchemaObject.parse_obj(raw)
File "/proj/.venv/lib/python3.7/site-packages/pydantic/main.py", line 402, in parse_obj
return cls(**obj)
File "/proj/.venv/lib/python3.7/site-packages/pydantic/main.py", line 283, in __init__
raise validation_error
pydantic.error_wrappers.ValidationError: 2 validation errors for JsonSchemaObject
properties -> apiVersion -> type
str type expected (type=type_error.str)
properties -> kind -> type
str type expected (type=type_error.str)
|
pydantic.error_wrappers.ValidationError
|
def get_uniq_name(name: str, excludes: Set[str]) -> str:
uniq_name: str = name
count: int = 1
while uniq_name in excludes:
uniq_name = f"{name}_{count}"
count += 1
return uniq_name
|
def get_uniq_name(self, name: str) -> str:
uniq_name: str = name
count: int = 1
while uniq_name in self.created_model_names:
uniq_name = f"{name}_{count}"
count += 1
return uniq_name
|
https://github.com/koxudaxi/datamodel-code-generator/issues/102
|
Traceback (most recent call last):
File "/proj/.venv/bin/datamodel-codegen", line 8, in <module>
sys.exit(main())
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/__main__.py", line 168, in main
result = parser.parse()
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/parser/base.py", line 164, in parse
self.parse_raw()
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/parser/jsonschema.py", line 472, in parse_raw
self.parse_raw_obj(obj_name, raw_obj)
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/parser/jsonschema.py", line 455, in parse_raw_obj
obj = JsonSchemaObject.parse_obj(raw)
File "/proj/.venv/lib/python3.7/site-packages/pydantic/main.py", line 402, in parse_obj
return cls(**obj)
File "/proj/.venv/lib/python3.7/site-packages/pydantic/main.py", line 283, in __init__
raise validation_error
pydantic.error_wrappers.ValidationError: 2 validation errors for JsonSchemaObject
properties -> apiVersion -> type
str type expected (type=type_error.str)
properties -> kind -> type
str type expected (type=type_error.str)
|
pydantic.error_wrappers.ValidationError
|
def get_class_name(self, field_name: str) -> str:
upper_camel_name = snake_to_upper_camel(field_name)
return get_uniq_name(upper_camel_name, self.created_model_names)
|
def get_class_name(self, field_name: str) -> str:
upper_camel_name = snake_to_upper_camel(field_name)
return self.get_uniq_name(upper_camel_name)
|
https://github.com/koxudaxi/datamodel-code-generator/issues/102
|
Traceback (most recent call last):
File "/proj/.venv/bin/datamodel-codegen", line 8, in <module>
sys.exit(main())
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/__main__.py", line 168, in main
result = parser.parse()
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/parser/base.py", line 164, in parse
self.parse_raw()
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/parser/jsonschema.py", line 472, in parse_raw
self.parse_raw_obj(obj_name, raw_obj)
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/parser/jsonschema.py", line 455, in parse_raw_obj
obj = JsonSchemaObject.parse_obj(raw)
File "/proj/.venv/lib/python3.7/site-packages/pydantic/main.py", line 402, in parse_obj
return cls(**obj)
File "/proj/.venv/lib/python3.7/site-packages/pydantic/main.py", line 283, in __init__
raise validation_error
pydantic.error_wrappers.ValidationError: 2 validation errors for JsonSchemaObject
properties -> apiVersion -> type
str type expected (type=type_error.str)
properties -> kind -> type
str type expected (type=type_error.str)
|
pydantic.error_wrappers.ValidationError
|
def parse(
self, with_import: Optional[bool] = True, format_: Optional[bool] = True
) -> Union[str, Dict[Tuple[str, ...], str]]:
self.parse_raw()
if with_import:
if self.target_python_version == PythonVersion.PY_37:
self.imports.append(IMPORT_ANNOTATIONS)
_, sorted_data_models, require_update_action_models = sort_data_models(self.results)
results: Dict[Tuple[str, ...], str] = {}
module_key = lambda x: (*x.name.split(".")[:-1],)
# process in reverse order to correctly establish module levels
grouped_models = groupby(
sorted(sorted_data_models.values(), key=module_key, reverse=True),
key=module_key,
)
for module, models in ((k, [*v]) for k, v in grouped_models):
module_path = ".".join(module)
init = False
if module:
parent = (*module[:-1], "__init__.py")
if parent not in results:
results[parent] = ""
if (*module, "__init__.py") in results:
module = (*module, "__init__.py")
init = True
else:
module = (*module[:-1], f"{module[-1]}.py")
else:
module = ("__init__.py",)
result: List[str] = []
imports = Imports()
models_to_update: List[str] = []
for model in models:
used_import_names: Set[str] = set()
alias_map: Dict[str, str] = {}
if model.name in require_update_action_models:
models_to_update += [model.name]
imports.append(model.imports)
for field in model.fields:
type_hint = field.type_hint
if type_hint is None: # pragma: no cover
continue
for data_type in field.data_types:
if "." not in data_type.type:
continue
from_, import_ = relative(module_path, data_type.type)
alias = get_uniq_name(import_, used_import_names)
used_import_names.add(import_)
if alias != import_:
alias_map[f"{from_}/{import_}"] = alias
name = data_type.type.rsplit(".", 1)[-1]
pattern = re.compile(rf"\b{re.escape(data_type.type)}\b")
if from_ and import_:
type_hint = pattern.sub(rf"{alias}.{name}", type_hint)
else:
type_hint = pattern.sub(name, type_hint)
field.type_hint = type_hint
for ref_name in model.reference_classes:
from_, import_ = relative(module_path, ref_name)
if init:
from_ += "."
if from_ and import_:
imports.append(
Import(
from_=from_,
import_=import_,
alias=alias_map.get(f"{from_}/{import_}"),
)
)
if with_import:
result += [imports.dump(), self.imports.dump(), "\n"]
code = dump_templates(models)
result += [code]
if self.dump_resolve_reference_action is not None:
result += ["\n", self.dump_resolve_reference_action(models_to_update)]
body = "\n".join(result)
if format_:
body = format_code(body, self.target_python_version)
results[module] = body
# retain existing behaviour
if [*results] == [("__init__.py",)]:
return results[("__init__.py",)]
return results
|
def parse(
self, with_import: Optional[bool] = True, format_: Optional[bool] = True
) -> Union[str, Dict[Tuple[str, ...], str]]:
self.parse_raw()
if with_import:
if self.target_python_version == PythonVersion.PY_37:
self.imports.append(IMPORT_ANNOTATIONS)
_, sorted_data_models, require_update_action_models = sort_data_models(self.results)
results: Dict[Tuple[str, ...], str] = {}
module_key = lambda x: (*x.name.split(".")[:-1],)
# process in reverse order to correctly establish module levels
grouped_models = groupby(
sorted(sorted_data_models.values(), key=module_key, reverse=True),
key=module_key,
)
for module, models in ((k, [*v]) for k, v in grouped_models):
module_path = ".".join(module)
init = False
if module:
parent = (*module[:-1], "__init__.py")
if parent not in results:
results[parent] = ""
if (*module, "__init__.py") in results:
module = (*module, "__init__.py")
init = True
else:
module = (*module[:-1], f"{module[-1]}.py")
else:
module = ("__init__.py",)
result: List[str] = []
imports = Imports()
models_to_update: List[str] = []
for model in models:
if model.name in require_update_action_models:
models_to_update += [model.name]
imports.append(model.imports)
for field in model.fields:
type_hint = field.type_hint
if type_hint is None: # pragma: no cover
continue
for data_type in field.data_types:
if "." not in data_type.type:
continue
from_, import_ = relative(module_path, data_type.type)
name = data_type.type.rsplit(".", 1)[-1]
pattern = re.compile(rf"\b{re.escape(data_type.type)}\b")
if from_ and import_:
type_hint = pattern.sub(rf"{import_}.{name}", type_hint)
else:
type_hint = pattern.sub(name, type_hint)
field.type_hint = type_hint
for ref_name in model.reference_classes:
from_, import_ = relative(module_path, ref_name)
if init:
from_ += "."
if from_ and import_:
imports.append(Import(from_=from_, import_=import_))
if with_import:
result += [imports.dump(), self.imports.dump(), "\n"]
code = dump_templates(models)
result += [code]
if self.dump_resolve_reference_action is not None:
result += ["\n", self.dump_resolve_reference_action(models_to_update)]
body = "\n".join(result)
if format_:
body = format_code(body, self.target_python_version)
results[module] = body
# retain existing behaviour
if [*results] == [("__init__.py",)]:
return results[("__init__.py",)]
return results
|
https://github.com/koxudaxi/datamodel-code-generator/issues/102
|
Traceback (most recent call last):
File "/proj/.venv/bin/datamodel-codegen", line 8, in <module>
sys.exit(main())
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/__main__.py", line 168, in main
result = parser.parse()
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/parser/base.py", line 164, in parse
self.parse_raw()
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/parser/jsonschema.py", line 472, in parse_raw
self.parse_raw_obj(obj_name, raw_obj)
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/parser/jsonschema.py", line 455, in parse_raw_obj
obj = JsonSchemaObject.parse_obj(raw)
File "/proj/.venv/lib/python3.7/site-packages/pydantic/main.py", line 402, in parse_obj
return cls(**obj)
File "/proj/.venv/lib/python3.7/site-packages/pydantic/main.py", line 283, in __init__
raise validation_error
pydantic.error_wrappers.ValidationError: 2 validation errors for JsonSchemaObject
properties -> apiVersion -> type
str type expected (type=type_error.str)
properties -> kind -> type
str type expected (type=type_error.str)
|
pydantic.error_wrappers.ValidationError
|
def get_uniq_name(name: str, excludes: Set[str], camel: bool = False) -> str:
uniq_name: str = name
count: int = 1
while uniq_name in excludes:
if camel:
uniq_name = f"{name}{count}"
else:
uniq_name = f"{name}_{count}"
count += 1
return uniq_name
|
def get_uniq_name(name: str, excludes: Set[str]) -> str:
uniq_name: str = name
count: int = 1
while uniq_name in excludes:
uniq_name = f"{name}_{count}"
count += 1
return uniq_name
|
https://github.com/koxudaxi/datamodel-code-generator/issues/102
|
Traceback (most recent call last):
File "/proj/.venv/bin/datamodel-codegen", line 8, in <module>
sys.exit(main())
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/__main__.py", line 168, in main
result = parser.parse()
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/parser/base.py", line 164, in parse
self.parse_raw()
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/parser/jsonschema.py", line 472, in parse_raw
self.parse_raw_obj(obj_name, raw_obj)
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/parser/jsonschema.py", line 455, in parse_raw_obj
obj = JsonSchemaObject.parse_obj(raw)
File "/proj/.venv/lib/python3.7/site-packages/pydantic/main.py", line 402, in parse_obj
return cls(**obj)
File "/proj/.venv/lib/python3.7/site-packages/pydantic/main.py", line 283, in __init__
raise validation_error
pydantic.error_wrappers.ValidationError: 2 validation errors for JsonSchemaObject
properties -> apiVersion -> type
str type expected (type=type_error.str)
properties -> kind -> type
str type expected (type=type_error.str)
|
pydantic.error_wrappers.ValidationError
|
def get_class_name(self, field_name: str) -> str:
upper_camel_name = snake_to_upper_camel(field_name)
return get_uniq_name(upper_camel_name, self.created_model_names, camel=True)
|
def get_class_name(self, field_name: str) -> str:
upper_camel_name = snake_to_upper_camel(field_name)
return get_uniq_name(upper_camel_name, self.created_model_names)
|
https://github.com/koxudaxi/datamodel-code-generator/issues/102
|
Traceback (most recent call last):
File "/proj/.venv/bin/datamodel-codegen", line 8, in <module>
sys.exit(main())
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/__main__.py", line 168, in main
result = parser.parse()
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/parser/base.py", line 164, in parse
self.parse_raw()
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/parser/jsonschema.py", line 472, in parse_raw
self.parse_raw_obj(obj_name, raw_obj)
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/parser/jsonschema.py", line 455, in parse_raw_obj
obj = JsonSchemaObject.parse_obj(raw)
File "/proj/.venv/lib/python3.7/site-packages/pydantic/main.py", line 402, in parse_obj
return cls(**obj)
File "/proj/.venv/lib/python3.7/site-packages/pydantic/main.py", line 283, in __init__
raise validation_error
pydantic.error_wrappers.ValidationError: 2 validation errors for JsonSchemaObject
properties -> apiVersion -> type
str type expected (type=type_error.str)
properties -> kind -> type
str type expected (type=type_error.str)
|
pydantic.error_wrappers.ValidationError
|
def parse(
self, with_import: Optional[bool] = True, format_: Optional[bool] = True
) -> Union[str, Dict[Tuple[str, ...], str]]:
self.parse_raw()
if with_import:
if self.target_python_version == PythonVersion.PY_37:
self.imports.append(IMPORT_ANNOTATIONS)
_, sorted_data_models, require_update_action_models = sort_data_models(self.results)
results: Dict[Tuple[str, ...], str] = {}
module_key = lambda x: (*x.name.split(".")[:-1],)
# process in reverse order to correctly establish module levels
grouped_models = groupby(
sorted(sorted_data_models.values(), key=module_key, reverse=True),
key=module_key,
)
for module, models in ((k, [*v]) for k, v in grouped_models):
module_path = ".".join(module)
init = False
if module:
parent = (*module[:-1], "__init__.py")
if parent not in results:
results[parent] = ""
if (*module, "__init__.py") in results:
module = (*module, "__init__.py")
init = True
else:
module = (*module[:-1], f"{module[-1]}.py")
else:
module = ("__init__.py",)
result: List[str] = []
imports = Imports()
models_to_update: List[str] = []
for model in models:
used_import_names: Set[str] = set()
alias_map: Dict[str, Optional[str]] = {}
if model.name in require_update_action_models:
models_to_update += [model.name]
imports.append(model.imports)
for field in model.fields:
type_hint = field.type_hint
if type_hint is None: # pragma: no cover
continue
for data_type in field.data_types:
if "." not in data_type.type:
continue
from_, import_ = relative(module_path, data_type.type)
full_path = f"{from_}/{import_}"
if full_path in alias_map:
alias = alias_map[full_path] or import_
else:
alias = get_uniq_name(import_, used_import_names)
used_import_names.add(import_)
alias_map[full_path] = None if alias == import_ else alias
name = data_type.type.rsplit(".", 1)[-1]
pattern = re.compile(rf"\b{re.escape(data_type.type)}\b")
if from_ and import_:
type_hint = pattern.sub(rf"{alias}.{name}", type_hint)
else:
type_hint = pattern.sub(name, type_hint)
field.type_hint = type_hint
for ref_name in model.reference_classes:
from_, import_ = relative(module_path, ref_name)
if init:
from_ += "."
if from_ and import_:
imports.append(
Import(
from_=from_,
import_=import_,
alias=alias_map.get(f"{from_}/{import_}"),
)
)
if with_import:
result += [imports.dump(), self.imports.dump(), "\n"]
code = dump_templates(models)
result += [code]
if self.dump_resolve_reference_action is not None:
result += ["\n", self.dump_resolve_reference_action(models_to_update)]
body = "\n".join(result)
if format_:
body = format_code(body, self.target_python_version)
results[module] = body
# retain existing behaviour
if [*results] == [("__init__.py",)]:
return results[("__init__.py",)]
return results
|
def parse(
self, with_import: Optional[bool] = True, format_: Optional[bool] = True
) -> Union[str, Dict[Tuple[str, ...], str]]:
self.parse_raw()
if with_import:
if self.target_python_version == PythonVersion.PY_37:
self.imports.append(IMPORT_ANNOTATIONS)
_, sorted_data_models, require_update_action_models = sort_data_models(self.results)
results: Dict[Tuple[str, ...], str] = {}
module_key = lambda x: (*x.name.split(".")[:-1],)
# process in reverse order to correctly establish module levels
grouped_models = groupby(
sorted(sorted_data_models.values(), key=module_key, reverse=True),
key=module_key,
)
for module, models in ((k, [*v]) for k, v in grouped_models):
module_path = ".".join(module)
init = False
if module:
parent = (*module[:-1], "__init__.py")
if parent not in results:
results[parent] = ""
if (*module, "__init__.py") in results:
module = (*module, "__init__.py")
init = True
else:
module = (*module[:-1], f"{module[-1]}.py")
else:
module = ("__init__.py",)
result: List[str] = []
imports = Imports()
models_to_update: List[str] = []
for model in models:
used_import_names: Set[str] = set()
alias_map: Dict[str, str] = {}
if model.name in require_update_action_models:
models_to_update += [model.name]
imports.append(model.imports)
for field in model.fields:
type_hint = field.type_hint
if type_hint is None: # pragma: no cover
continue
for data_type in field.data_types:
if "." not in data_type.type:
continue
from_, import_ = relative(module_path, data_type.type)
alias = get_uniq_name(import_, used_import_names)
used_import_names.add(import_)
if alias != import_:
alias_map[f"{from_}/{import_}"] = alias
name = data_type.type.rsplit(".", 1)[-1]
pattern = re.compile(rf"\b{re.escape(data_type.type)}\b")
if from_ and import_:
type_hint = pattern.sub(rf"{alias}.{name}", type_hint)
else:
type_hint = pattern.sub(name, type_hint)
field.type_hint = type_hint
for ref_name in model.reference_classes:
from_, import_ = relative(module_path, ref_name)
if init:
from_ += "."
if from_ and import_:
imports.append(
Import(
from_=from_,
import_=import_,
alias=alias_map.get(f"{from_}/{import_}"),
)
)
if with_import:
result += [imports.dump(), self.imports.dump(), "\n"]
code = dump_templates(models)
result += [code]
if self.dump_resolve_reference_action is not None:
result += ["\n", self.dump_resolve_reference_action(models_to_update)]
body = "\n".join(result)
if format_:
body = format_code(body, self.target_python_version)
results[module] = body
# retain existing behaviour
if [*results] == [("__init__.py",)]:
return results[("__init__.py",)]
return results
|
https://github.com/koxudaxi/datamodel-code-generator/issues/102
|
Traceback (most recent call last):
File "/proj/.venv/bin/datamodel-codegen", line 8, in <module>
sys.exit(main())
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/__main__.py", line 168, in main
result = parser.parse()
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/parser/base.py", line 164, in parse
self.parse_raw()
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/parser/jsonschema.py", line 472, in parse_raw
self.parse_raw_obj(obj_name, raw_obj)
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/parser/jsonschema.py", line 455, in parse_raw_obj
obj = JsonSchemaObject.parse_obj(raw)
File "/proj/.venv/lib/python3.7/site-packages/pydantic/main.py", line 402, in parse_obj
return cls(**obj)
File "/proj/.venv/lib/python3.7/site-packages/pydantic/main.py", line 283, in __init__
raise validation_error
pydantic.error_wrappers.ValidationError: 2 validation errors for JsonSchemaObject
properties -> apiVersion -> type
str type expected (type=type_error.str)
properties -> kind -> type
str type expected (type=type_error.str)
|
pydantic.error_wrappers.ValidationError
|
def parse_enum(self, name: str, obj: JsonSchemaObject) -> DataModel:
enum_fields = []
for i, enum_part in enumerate(obj.enum): # type: ignore
if obj.type == "string" or (
isinstance(obj.type, list) and "string" in obj.type
):
default = f"'{enum_part}'"
field_name = enum_part
else:
default = enum_part
if obj.x_enum_varnames:
field_name = obj.x_enum_varnames[i]
else:
field_name = f"{obj.type}_{enum_part}"
enum_fields.append(self.data_model_field_type(name=field_name, default=default))
enum = Enum(self.get_class_name(name), fields=enum_fields)
self.append_result(enum)
return enum
|
def parse_enum(self, name: str, obj: JsonSchemaObject) -> DataModel:
enum_fields = []
for i, enum_part in enumerate(obj.enum): # type: ignore
if obj.type == "string":
default = f"'{enum_part}'"
field_name = enum_part
else:
default = enum_part
if obj.x_enum_varnames:
field_name = obj.x_enum_varnames[i]
else:
field_name = f"{obj.type}_{enum_part}"
enum_fields.append(self.data_model_field_type(name=field_name, default=default))
enum = Enum(self.get_class_name(name), fields=enum_fields)
self.append_result(enum)
return enum
|
https://github.com/koxudaxi/datamodel-code-generator/issues/102
|
Traceback (most recent call last):
File "/proj/.venv/bin/datamodel-codegen", line 8, in <module>
sys.exit(main())
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/__main__.py", line 168, in main
result = parser.parse()
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/parser/base.py", line 164, in parse
self.parse_raw()
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/parser/jsonschema.py", line 472, in parse_raw
self.parse_raw_obj(obj_name, raw_obj)
File "/proj/.venv/lib/python3.7/site-packages/datamodel_code_generator/parser/jsonschema.py", line 455, in parse_raw_obj
obj = JsonSchemaObject.parse_obj(raw)
File "/proj/.venv/lib/python3.7/site-packages/pydantic/main.py", line 402, in parse_obj
return cls(**obj)
File "/proj/.venv/lib/python3.7/site-packages/pydantic/main.py", line 283, in __init__
raise validation_error
pydantic.error_wrappers.ValidationError: 2 validation errors for JsonSchemaObject
properties -> apiVersion -> type
str type expected (type=type_error.str)
properties -> kind -> type
str type expected (type=type_error.str)
|
pydantic.error_wrappers.ValidationError
|
def new_load_metric_vars(self, metric_vars_file):
"""
Load the metric variables for a check from a metric check variables file
:param metric_vars_file: the path and filename to the metric variables files
:type metric_vars_file: str
:return: the metric_vars module object or ``False``
:rtype: list
"""
metric_vars = False
metric_vars_got = False
if os.path.isfile(metric_vars_file):
logger.info(
"loading metric variables from metric_check_file - %s"
% (str(metric_vars_file))
)
else:
logger.error(
"error :: loading metric variables from metric_check_file - file not found - %s"
% (str(metric_vars_file))
)
return False
metric_vars = []
with open(metric_vars_file) as f:
for line in f:
no_new_line = line.replace("\n", "")
no_equal_line = no_new_line.replace(" = ", ",")
array = str(no_equal_line.split(",", 1))
add_line = literal_eval(array)
metric_vars.append(add_line)
string_keys = ["metric", "anomaly_dir", "added_by"]
float_keys = ["value"]
int_keys = ["from_timestamp", "metric_timestamp", "added_at", "full_duration"]
array_keys = ["algorithms", "triggered_algorithms"]
boolean_keys = ["graphite_metric", "run_crucible_tests"]
metric_vars_array = []
for var_array in metric_vars:
key = None
value = None
if var_array[0] in string_keys:
key = var_array[0]
value_str = str(var_array[1]).replace("'", "")
value = str(value_str)
if var_array[0] == "metric":
metric = value
if var_array[0] in float_keys:
key = var_array[0]
value_str = str(var_array[1]).replace("'", "")
value = float(value_str)
if var_array[0] in int_keys:
key = var_array[0]
value_str = str(var_array[1]).replace("'", "")
value = int(value_str)
if var_array[0] in array_keys:
key = var_array[0]
array_value_str = str(var_array[1]).replace("'", "")
value = literal_eval(str(var_array[1]))
# value = value_str
if var_array[0] in boolean_keys:
key = var_array[0]
if str(var_array[1]) == "True":
value = True
else:
value = False
if key:
metric_vars_array.append([key, value])
if len(metric_vars_array) == 0:
logger.error(
"error :: loading metric variables - none found"
% (str(metric_vars_file))
)
return False
if settings.ENABLE_DEBUG:
logger.info(
"debug :: metric_vars determined - metric variable - metric - %s"
% str(metric_vars.metric)
)
logger.info("debug :: metric_vars for %s" % str(metric))
logger.info("debug :: %s" % str(metric_vars_array))
return metric_vars_array
|
def new_load_metric_vars(metric_vars_file):
"""
Load the metric variables for a check from a metric check variables file
:param metric_vars_file: the path and filename to the metric variables files
:type metric_vars_file: str
:return: the metric_vars module object or ``False``
:rtype: list
"""
metric_vars = False
metric_vars_got = False
if os.path.isfile(metric_vars_file):
logger.info(
"loading metric variables from metric_check_file - %s"
% (str(metric_vars_file))
)
metric_vars = []
with open(metric_vars_file) as f:
for line in f:
add_line = line.replace("\n", "")
metric_vars.append(add_line)
# Bug #1460: panorama check file fails
with open(metric_vars_file) as f:
try:
metric_vars = imp.load_source("metric_vars", "", f)
metric_vars_got = True
except:
current_logger.info(traceback.format_exc())
msg = "failed to import metric variables - metric_check_file"
current_logger.error("error :: %s - %s" % (msg, str(metric_vars_file)))
metric_vars = False
if settings.ENABLE_DEBUG and metric_vars_got:
current_logger.info(
"metric_vars determined - metric variable - metric - %s"
% str(metric_vars.metric)
)
else:
current_logger.error(
"error :: metric_vars_file not found - %s" % (str(metric_vars_file))
)
return metric_vars
|
https://github.com/earthgecko/skyline/issues/24
|
2016-08-22 16:42:05 :: 7874 :: Traceback (most recent call last):
File "/opt/skyline/github/skyline/skyline/panorama/panorama.py", line 297, in spin_process
metric_vars.from_timestamp
AttributeError: 'module' object has no attribute 'from_timestamp'
2016-08-22 16:42:05 :: 7874 :: error :: failed to read from_timestamp variable from check file - /opt/skyline/panaroma/check/1471884121.stats.statsd.graphiteStats.flush_length.txt
|
AttributeError
|
def spin_process(self, i, metric_check_file):
"""
Assign a metric anomaly to process.
:param i: python process id
:param metric_check_file: full path to the metric check file
:return: returns True
"""
def get_an_engine():
try:
engine, log_msg, trace = get_engine(skyline_app)
return engine, log_msg, trace
except:
logger.error(traceback.format_exc())
log_msg = "error :: failed to get MySQL engine in spin_process"
logger.error("error :: failed to get MySQL engine in spin_process")
return None, log_msg, trace
child_process_pid = os.getpid()
logger.info("child_process_pid - %s" % str(child_process_pid))
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info("debug :: processing metric check - %s" % metric_check_file)
if not os.path.isfile(str(metric_check_file)):
logger.error(
"error :: file not found - metric_check_file - %s"
% (str(metric_check_file))
)
return
check_file_name = os.path.basename(str(metric_check_file))
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info("debug :: check_file_name - %s" % check_file_name)
check_file_timestamp = check_file_name.split(".", 1)[0]
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info("debug :: check_file_timestamp - %s" % str(check_file_timestamp))
check_file_metricname_txt = check_file_name.split(".", 1)[1]
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info(
"debug :: check_file_metricname_txt - %s" % check_file_metricname_txt
)
check_file_metricname = check_file_metricname_txt.replace(".txt", "")
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info("debug :: check_file_metricname - %s" % check_file_metricname)
check_file_metricname_dir = check_file_metricname.replace(".", "/")
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info(
"debug :: check_file_metricname_dir - %s" % check_file_metricname_dir
)
metric_failed_check_dir = "%s/%s/%s" % (
failed_checks_dir,
check_file_metricname_dir,
check_file_timestamp,
)
failed_check_file = "%s/%s" % (metric_failed_check_dir, check_file_name)
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info("debug :: failed_check_file - %s" % failed_check_file)
try:
# Load and validate metric variables
# @modified 20161231 - Feature #1830: Ionosphere alerts
# Bug #1460: panorama check file fails
# Panorama check file fails #24
# Get rid of the skyline_functions imp as imp is deprecated in py3 anyway
# Use def new_load_metric_vars(self, metric_vars_file):
# metric_vars = load_metric_vars(skyline_app, str(metric_check_file))
metric_vars_array = self.new_load_metric_vars(str(metric_check_file))
except:
logger.info(traceback.format_exc())
logger.error(
"error :: failed to load metric variables from check file - %s"
% (metric_check_file)
)
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
# Test metric variables
# We use a pythonic methodology to test if the variables are defined,
# this ensures that if any of the variables are not set for some reason
# we can handle unexpected data or situations gracefully and try and
# ensure that the process does not hang.
metric = None
try:
# metric_vars.metric
# metric = str(metric_vars.metric)
key = "metric"
value_list = [
var_array[1] for var_array in metric_vars_array if var_array[0] == key
]
metric = str(value_list[0])
base_name = metric
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info("debug :: metric variable - metric - %s" % metric)
except:
logger.info(traceback.format_exc())
logger.error(
"error :: failed to read metric variable from check file - %s"
% (metric_check_file)
)
metric = None
if not metric:
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
value = None
try:
# metric_vars.value
# value = str(metric_vars.value)
key = "value"
value_list = [
var_array[1] for var_array in metric_vars_array if var_array[0] == key
]
value = float(value_list[0])
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info("debug :: metric variable - value - %s" % (value))
except:
logger.error(
"error :: failed to read value variable from check file - %s"
% (metric_check_file)
)
value = None
if not value:
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
try:
# metric_vars.from_timestamp
# from_timestamp = str(metric_vars.from_timestamp)
key = "from_timestamp"
value_list = [
var_array[1] for var_array in metric_vars_array if var_array[0] == key
]
from_timestamp = int(value_list[0])
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info(
"debug :: metric variable - from_timestamp - %s" % from_timestamp
)
except:
# @added 20160822 - Bug #1460: panorama check file fails
# Added exception handling here
logger.info(traceback.format_exc())
logger.error(
"error :: failed to read from_timestamp variable from check file - %s"
% (metric_check_file)
)
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
metric_timestamp = None
try:
# metric_vars.metric_timestamp
# metric_timestamp = str(metric_vars.metric_timestamp)
key = "metric_timestamp"
value_list = [
var_array[1] for var_array in metric_vars_array if var_array[0] == key
]
metric_timestamp = int(value_list[0])
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info(
"debug :: metric variable - metric_timestamp - %s" % metric_timestamp
)
except:
logger.error(
"error :: failed to read metric_timestamp variable from check file - %s"
% (metric_check_file)
)
metric_timestamp = None
if not metric_timestamp:
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
try:
# metric_vars.algorithms
# algorithms = metric_vars.algorithms
key = "algorithms"
value_list = [
var_array[1] for var_array in metric_vars_array if var_array[0] == key
]
algorithms = value_list[0]
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info("debug :: metric variable - algorithms - %s" % str(algorithms))
except:
logger.error(
"error :: failed to read algorithms variable from check file setting to all - %s"
% (metric_check_file)
)
algorithms = "all"
try:
# metric_vars.triggered_algorithms
# triggered_algorithms = metric_vars.triggered_algorithms
key = "triggered_algorithms"
value_list = [
var_array[1] for var_array in metric_vars_array if var_array[0] == key
]
algorithms = value_list[0]
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info(
"debug :: metric variable - triggered_algorithms - %s"
% str(triggered_algorithms)
)
except:
logger.error(
"error :: failed to read triggered_algorithms variable from check file setting to all - %s"
% (metric_check_file)
)
triggered_algorithms = "all"
added_by = None
try:
# metric_vars.added_by
# added_by = str(metric_vars.added_by)
key = "added_by"
value_list = [
var_array[1] for var_array in metric_vars_array if var_array[0] == key
]
added_by = str(value_list[0])
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info("debug :: metric variable - added_by - %s" % added_by)
except:
logger.error(
"error :: failed to read added_by variable from check file - %s"
% (metric_check_file)
)
added_by = None
if not added_by:
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
try:
# metric_vars.added_at
# added_at = str(metric_vars.added_at)
key = "added_at"
value_list = [
var_array[1] for var_array in metric_vars_array if var_array[0] == key
]
added_at = int(value_list[0])
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info("debug :: metric variable - added_at - %s" % added_at)
except:
logger.error(
"error :: failed to read added_at variable from check file setting to all - %s"
% (metric_check_file)
)
added_at = metric_timestamp
# @added 20161228 - Feature #1828: ionosphere - mirage Redis data features
# Added full_duration which needs to be recorded to allow Mirage metrics
# to be profiled on Redis timeseries data at FULL_DURATION
full_duration = None
try:
# metric_vars.full_duration
# full_duration = str(metric_vars.full_duration)
key = "full_duration"
value_list = [
var_array[1] for var_array in metric_vars_array if var_array[0] == key
]
full_duration = int(value_list[0])
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info("debug :: metric variable - full_duration - %s" % full_duration)
except:
logger.error(
"error :: failed to read full_duration variable from check file - %s"
% (metric_check_file)
)
full_duration = None
if not full_duration:
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
now = time()
anomaly_age = int(now) - int(metric_timestamp)
if anomaly_age > max_age_seconds:
logger.info(
"Ionosphere check max age exceeded - %s - %s seconds old, older than %s seconds discarding"
% (metric, str(anomaly_age), str(max_age_seconds))
)
with open(metric_check_file, "rt") as fr:
metric_check_file_contents = fr.readlines()
logger.info(
"debug :: metric check file contents\n%s"
% (str(metric_check_file_contents))
)
self.remove_metric_check_file(str(metric_check_file))
return
# @added 20161222 - ionosphere should extract features for every anomaly
# check that is sent through and calculate a feature_profile ready for
# submission by the user if they so choose. Further ionosphere could
# make itself more useful by comparing any training data profiles to
# further anomalies, however the feature profiles for subsequent
# anomalies may be similar enough to match a few times and each a closer
# match to the next.
training_metric = False
# Check if the metric has ionosphere_enabled, if not remove the check
# file but not the data directory
# @modified 20161230 - Feature #1830: Ionosphere alerts
# Use SQLAlchemy method
# query = "SELECT ionosphere_enabled FROM metrics WHERE metric='%s'" % metric
# result = mysql_select(skyline_app, query)
# if str(result[0]) != '1':
# logger.info('Ionosphere not enabled on %s' % (metric))
# # @modified 20161222 - do not remove metric file until features
# # calculated
# # self.remove_metric_check_file(str(metric_check_file))
# # return
# training_metric = True
try:
engine, log_msg, trace = get_an_engine()
logger.info(log_msg)
except:
logger.error(traceback.format_exc())
logger.error(
"error :: could not get a MySQL engine to determine ionosphere_enabled"
)
if not engine:
logger.error("error :: engine not obtained to determine ionosphere_enabled")
# Get the metrics_table metadata
metrics_table = None
try:
metrics_table, log_msg, trace = metrics_table_meta(skyline_app, engine)
logger.info("metrics_table OK for %s" % base_name)
except:
logger.error(traceback.format_exc())
logger.error("error :: failed to get metrics_table meta for %s" % base_name)
metrics_id = None
metric_ionosphere_enabled = None
try:
connection = engine.connect()
# stmt = select([metrics_table.c.ionosphere_enabled]).where(metrics_table.c.metric == str(metric))
stmt = select([metrics_table]).where(metrics_table.c.metric == base_name)
result = connection.execute(stmt)
row = result.fetchone()
metrics_id = row["id"]
metric_ionosphere_enabled = row["ionosphere_enabled"]
connection.close()
if metric_ionosphere_enabled is not None:
training_metric = False
else:
# @modified 20161222 - do not remove metric file until features
# calculated
# self.remove_metric_check_file(str(metric_check_file))
# return
training_metric = True
except:
logger.error(traceback.format_exc())
logger.error(
"error :: could not determine ionosphere_enabled from metrics table for - %s"
% base_name
)
metric_ionosphere_enabled = None
training_metric = True
logger.info(
"ionosphere_enabled is %s for metric id %s - %s"
% (str(metric_ionosphere_enabled), str(metrics_id), base_name)
)
if training_metric:
logger.info("Ionosphere is not enabled on %s" % (base_name))
else:
logger.info("Ionosphere is enabled on %s" % (base_name))
# @added 20161210 - Branch #922: ionosphere
# Task #1658: Patterning Skyline Ionosphere
# Only continue if there is a training data json timeseries file
metric_timeseries_dir = base_name.replace(".", "/")
metric_training_data_dir = "%s/%s/%s" % (
settings.IONOSPHERE_DATA_FOLDER,
metric_timestamp,
metric_timeseries_dir,
)
anomaly_json = "%s/%s.json" % (metric_training_data_dir, base_name)
if os.path.isfile(anomaly_json):
logger.info("training data ts json available - %s" % (anomaly_json))
else:
logger.error(
"error :: training data ts json was not found - %s" % (anomaly_json)
)
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
# @added 20161228 - Feature #1828: ionosphere - mirage Redis data features
# The timeseries full_duration needs to be recorded to allow Mirage metrics to
# be profiled on Redis timeseries data at FULL_DURATION
# e.g. mirage.redis.24h.json
if training_metric:
logger.info("training metric - %s" % (base_name))
if added_by == "mirage":
logger.info("checking training data Redis json is available")
# Always calculate features for both the SECOND_ORDER_RESOLUTION_SECONDS
# timeseries data and the FULL_DURATION Redis timeseries data.
# It is always preferable to create a features profile on a FULL_DURATION
# data set, unless the user is flagging the actual Mirage timeseries as
# not anomalous. In the Mirage context the not anomalous may often be more
# "visibile" in the FULL_DURATION view and if so should be matched on the
# FULL_DURATION timeseries data, even if it is a Mirage metric.
# Features profiles can be created for a Mirage metric on both the
# FULL_DURATION and the SECOND_ORDER_RESOLUTION_SECONDS data sets, however
# only one should be needed.
# A features profile should always be created at the highest resolution
# possible, FULL_DURATION data, wherever possible.
try:
full_duration_hours = str(int(settings.FULL_DURATION / 3600))
redis_anomaly_json = "%s/%s.mirage.redis.%sh.json" % (
metric_training_data_dir,
base_name,
full_duration_hours,
)
if os.path.isfile(redis_anomaly_json):
logger.info(
"training data Redis full duration ts json available - %s"
% (redis_anomaly_json)
)
else:
logger.error(
"error :: training data Redis full duration json was not found - %s"
% (redis_anomaly_json)
)
except:
logger.error(traceback.format_exc())
logger.error(
"error :: training data Redis full duration json was not found - %s"
% (redis_anomaly_json)
)
# @added 20161209 - Branch #922: ionosphere
# Task #1658: Patterning Skyline Ionosphere
# Use SQLAlchemy, mysql.connector is still upstairs ^^ but starting the
# move to SQLAlchemy now that all the webapp Ionosphere SQLAlchemy
# patterns work and the database lay out if defined we can begin on the
# data side. Ionosphere was put together backwards, like tsfresh was
# learnt. It was the people input first here in many ways, which is
# exactly how it was suppose to be.
# This is now the Ionosphere meat.
# Get a MySQL engine only if not training_metric
if not training_metric:
if not metrics_id:
logger.error("error :: metric id not known")
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return False
logger.info("getting MySQL engine")
try:
engine, log_msg, trace = get_an_engine()
logger.info(log_msg)
except:
logger.error(traceback.format_exc())
logger.error("error :: could not get a MySQL engine to get fp_ids")
if not engine:
logger.error("error :: engine not obtained to get fp_ids")
try:
ionosphere_table, log_msg, trace = ionosphere_table_meta(
skyline_app, engine
)
logger.info(log_msg)
logger.info("ionosphere_table OK")
except:
logger.error(traceback.format_exc())
logger.error(
"error :: failed to get ionosphere_table meta for %s" % base_name
)
# Determine the fp_ids that exist for the metric
fp_ids = []
fp_ids_found = False
try:
connection = engine.connect()
stmt = select([ionosphere_table]).where(
ionosphere_table.c.metric_id == metrics_id
)
result = connection.execute(stmt)
for row in result:
if int(row["full_duration"]) == int(full_duration):
fp_id = row["id"]
fp_ids.append(int(fp_id))
logger.info(
"using fp id %s matched full_duration %s - %s"
% (str(fp_id), str(full_duration), base_name)
)
else:
logger.info(
"not using fp id %s not matched full_duration %s - %s"
% (str(fp_id), str(full_duration), base_name)
)
connection.close()
fp_count = len(fp_ids)
logger.info("determined %s fp ids for %s" % (str(fp_count), base_name))
except:
logger.error(traceback.format_exc())
logger.error(
"error :: could not determine fp ids from DB for %s" % base_name
)
fp_count = 0
if len(fp_ids) == 0:
logger.error("error :: there are no fp ids for %s" % base_name)
else:
fp_ids_found = True
# @added 20161221 - TODO: why not calculate the features of every
# anomaly so the the use does not have to do it and wait for the
# features to be calculated.
# Check the features were calculated by the webapp
calculated_feature_file = "%s/%s.tsfresh.input.csv.features.transposed.csv" % (
metric_training_data_dir,
base_name,
)
calculated_feature_file_found = False
if os.path.isfile(calculated_feature_file):
logger.info("calculated features available - %s" % (calculated_feature_file))
calculated_feature_file_found = True
if not calculated_feature_file_found:
if training_metric:
# Allow Graphite resources to be created if they are not an alert
# was not sent therefore features do not need to be calculated
check_time = int(time())
check_age = check_time - int(added_at)
if check_age < 5:
sleep(5)
graphite_file_count = len(
[
f
for f in os.listdir(metric_training_data_dir)
if f.endswith(".png")
and os.path.isfile(os.path.join(metric_training_data_dir, f))
]
)
if graphite_file_count == 0:
logger.info(
"not calculating features no anomaly Graphite alert resources created in %s"
% (metric_training_data_dir)
)
self.remove_metric_check_file(str(metric_check_file))
return
else:
logger.info(
"anomaly Graphite alert resources found in %s"
% (metric_training_data_dir)
)
context = skyline_app
f_calc = None
if not calculated_feature_file_found:
try:
(
fp_csv,
successful,
fp_exists,
fp_id,
log_msg,
traceback_format_exc,
f_calc,
) = calculate_features_profile(
skyline_app, metric_timestamp, base_name, context
)
except:
logger.error(traceback.format_exc())
logger.error("error :: failed to calculate features")
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
if os.path.isfile(calculated_feature_file):
logger.info("calculated features available - %s" % (calculated_feature_file))
calculated_feature_file_found = True
if isinstance(f_calc, float):
f_calc_time = "%.2f" % f_calc
send_metric_name = (
"%s.features_calculation_time" % skyline_app_graphite_namespace
)
send_graphite_metric(skyline_app, send_metric_name, f_calc_time)
if training_metric:
logger.info("training metric done")
self.remove_metric_check_file(str(metric_check_file))
# TODO: make ionosphere more useful, compare any other
# available training_metric profiles here and match, not in the
# db context, in the training context.
return
# @added 20161210 - Branch #922: ionosphere
# Task #1658: Patterning Skyline Ionosphere
# Calculate features for the current timeseries if there are fp ids
# Just call it via the webapp... fewer lines of code and already done in
# webapp/ionosphere_backend.py and webapp/features_proifle.py
if not calculated_feature_file_found:
webapp_url = "%s/ionosphere?timestamp=%s&metric=%s&calc_features=true" % (
settings.SKYLINE_URL,
metric_timestamp,
base_name,
)
r = None
http_status_code = 0
if settings.WEBAPP_AUTH_ENABLED:
# 10 second timout is sufficient locally under normal circumstances
# as tsfresh has yet to have been take longer than 6 seconds if so
# by the time the next request is made, the features file should
# exist. So this is limited psuedo-idempotency.
timeout_and_auth = "timeout=10, auth=(%s, %s))" % (
settings.WEBAPP_AUTH_USER,
settings.WEBAPP_AUTH_USER_PASSWORD,
)
else:
timeout_and_auth = "timeout=10"
if fp_ids_found:
for _ in range(2):
try:
r = requests.get(webapp_url, timeout_and_auth)
http_status_code = r.status_code
except:
logger.error("error :: could not retrieve %s" % webapp_url)
sleep(5)
continue
else:
break
else:
logger.error(traceback.format_exc())
logger.error(
"error :: could not retrieve %s after 3 tries" % webapp_url
)
if int(http_status_code) == 200:
if os.path.isfile(calculated_feature_file):
logger.info(
"calculated features available - %s" % (calculated_feature_file)
)
calculated_feature_file_found = True
else:
logger.error(
"error :: calculated features not available - %s"
% (calculated_feature_file)
)
# send an Ionosphere alert or add a thunder branch alert, one
# one thing at a time. You cannot rush timeseries.
self.remove_metric_check_file(str(metric_check_file))
return
# @modified 20161213 - Branch #1790: test_tsfresh
# TODO: Match the test_tsfresh method
# Create an array of the calculated features
calculated_features = []
if calculated_feature_file_found:
count_id = 0
with open(calculated_feature_file, "rb") as fr:
reader = csv.reader(fr, delimiter=",")
for i, line in enumerate(reader):
if str(line[0]) != "":
if "," in line[0]:
feature_name = '"%s"' % str(line[0])
else:
feature_name = str(line[0])
count_id += 1
value = float(line[1])
calculated_features.append([feature_name, value])
if len(calculated_features) == 0:
logger.error(
"error :: no calculated features were determined from - %s"
% (calculated_feature_file)
)
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
# Compare calculated features to feature values for each fp id
not_anomalous = False
if calculated_feature_file_found:
for fp_id in fp_ids:
if not metrics_id:
logger.error("error :: metric id not known")
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return False
features_count = None
fp_features = []
# Get features for fp_id from z_fp_<metric_id> table where the
# features profile is the same full_duration
metric_fp_table = "z_fp_%s" % str(metrics_id)
try:
engine, log_msg, trace = get_an_engine()
except:
logger.error(traceback.format_exc())
logger.error(
"error :: could not get a MySQL engine for feature_id and values from %s"
% metric_fp_table
)
if not engine:
logger.error(
"error :: engine not obtained for feature_id and values from %s"
% metric_fp_table
)
try:
stmt = "SELECT feature_id, value FROM %s WHERE fp_id=%s" % (
metric_fp_table,
str(fp_id),
)
connection = engine.connect()
for row in engine.execute(stmt):
fp_feature_id = int(row["feature_id"])
fp_value = float(row["value"])
fp_features.append([fp_feature_id, fp_value])
connection.close()
features_count = len(fp_features)
logger.info(
"determined %s features for fp_id %s"
% (str(features_count), str(fp_id))
)
except:
logger.error(traceback.format_exc())
logger.error(
"error :: could not determine feature_id, value from %s"
% metric_fp_table
)
# Convert feature names in calculated_features to their id
logger.info("converting tsfresh feature names to Skyline feature ids")
calc_features_by_id = []
for feature_name, value in calculated_features:
for skyline_feature_id, name in TSFRESH_FEATURES:
if feature_name == name:
calc_features_by_id.append([skyline_feature_id, float(value)])
# Determine what features each data has, extract only values for
# common features.
logger.info("determining common features")
relevant_fp_feature_values = []
relevant_calc_feature_values = []
for skyline_feature_id, value in calc_features_by_id:
for fp_feature_id, fp_value in fp_features:
if skyline_feature_id == fp_feature_id:
relevant_fp_feature_values.append(fp_value)
relevant_calc_feature_values.append(value)
# Determine the sum of each set
relevant_fp_feature_values_count = len(relevant_fp_feature_values)
relevant_calc_feature_values_count = len(relevant_calc_feature_values)
if relevant_fp_feature_values_count != relevant_calc_feature_values_count:
logger.error("error :: mismatch in number of common features")
logger.error(
"error :: relevant_fp_feature_values_count - %s"
% str(relevant_fp_feature_values_count)
)
logger.error(
"error :: relevant_calc_feature_values_count - %s"
% str(relevant_calc_feature_values_count)
)
continue
else:
logger.info(
"comparing on %s common features"
% str(relevant_fp_feature_values_count)
)
if relevant_fp_feature_values_count == 0:
logger.error("error :: relevant_fp_feature_values_count is zero")
continue
# Determine the sum of each set
sum_fp_values = sum(relevant_fp_feature_values)
sum_calc_values = sum(relevant_calc_feature_values)
logger.info(
"sum of the values of the %s common features in features profile - %s"
% (str(relevant_fp_feature_values_count), str(sum_fp_values))
)
logger.info(
"sum of the values of the %s common features in the calculated features - %s"
% (str(relevant_calc_feature_values_count), str(sum_calc_values))
)
# Determine whether each set is positive or negative
# # if the same carry on
# # if both negative, make then both positive
# Sum fp values, Sum calculated - handle negatives like features_sum :: -3389570699080000.0000000000
# Determine whether each set is positive or negative
# # if the same carry on
# # if both negative, make then both positive postive_sums
fp_sum_array = [sum_fp_values]
calc_sum_array = [sum_calc_values]
almost_equal = None
try:
np.testing.assert_array_almost_equal(fp_sum_array, calc_sum_array)
almost_equal = True
except:
almost_equal = False
if almost_equal:
not_anomalous = True
logger.info(
"common features sums are almost equal, not anomalous"
% str(relevant_fp_feature_values_count)
)
percent_different = 100
sums_array = np.array([sum_fp_values, sum_calc_values], dtype=float)
try:
calc_percent_different = np.diff(sums_array) / sums_array[:-1] * 100.0
percent_different = calc_percent_different[0]
logger.info(
"percent_different between common features sums - %s"
% str(percent_different)
)
except:
logger.error(traceback.format_exc())
logger.error("error :: failed to calculate percent_different")
continue
# @added 20161229 - Feature #1830: Ionosphere alerts
# Update the features profile checked count and time
logger.info("updating checked details in db for %s" % (str(fp_id)))
# update matched_count in ionosphere_table
checked_timestamp = int(time())
try:
engine, log_msg, trace = get_an_engine()
except:
logger.error(traceback.format_exc())
logger.error(
"error :: could not get a MySQL engine to update checked details in db for %s"
% (str(fp_id))
)
if not engine:
logger.error(
"error :: engine not obtained to update checked details in db for %s"
% (str(fp_id))
)
try:
connection = engine.connect()
connection.execute(
ionosphere_table.update(ionosphere_table.c.id == fp_id).values(
checked_count=ionosphere_table.c.checked_count + 1,
last_checked=checked_timestamp,
)
)
connection.close()
logger.info("updated checked_count for %s" % str(fp_id))
except:
logger.error(traceback.format_exc())
logger.error(
"error :: could not update checked_count and last_checked for %s "
% str(fp_id)
)
# if diff_in_sums <= 1%:
if percent_different < 0:
new_pdiff = percent_different * -1
percent_different = new_pdiff
if percent_different < settings.IONOSPHERE_FEATURES_PERCENT_SIMILAR:
not_anomalous = True
# log
logger.info("not anomalous - features profile match - %s" % base_name)
logger.info(
"calculated features sum are within %s percent of fp_id %s with %s, not anomalous"
% (
str(settings.IONOSPHERE_FEATURES_PERCENT_SIMILAR),
str(fp_id),
str(percent_different),
)
)
# update matched_count in ionosphere_table
matched_timestamp = int(time())
try:
engine, log_msg, trace = get_an_engine()
except:
logger.error(traceback.format_exc())
logger.error(
"error :: could not get a MySQL engine to update matched details in db for %s"
% (str(fp_id))
)
if not engine:
logger.error(
"error :: engine not obtained to update matched details in db for %s"
% (str(fp_id))
)
try:
connection = engine.connect()
connection.execute(
ionosphere_table.update(ionosphere_table.c.id == fp_id).values(
matched_count=ionosphere_table.c.matched_count + 1,
last_matched=matched_timestamp,
)
)
connection.close()
logger.info("updated matched_count for %s" % str(fp_id))
except:
logger.error(traceback.format_exc())
logger.error(
"error :: could not update matched_count and last_matched for %s "
% str(fp_id)
)
# https://docs.scipy.org/doc/numpy/reference/generated/numpy.testing.assert_almost_equal.html
# @added 20161214 - Add a between timeframe option, e.g. if
# fp match, only see this as not anomalous if hour (and or min)
# is between x and y - handle rollovers, cron log archives, etc.
logger.info(
"debug :: %s is a features profile for %s" % (str(fp_id), base_name)
)
if not not_anomalous:
logger.info("anomalous - no feature profiles were matched - %s" % base_name)
# Send to panorama as Analyzer and Mirage will only alert on the
# anomaly, they will not push it to Panorama
if settings.PANORAMA_ENABLED:
if not os.path.exists(settings.PANORAMA_CHECK_PATH):
mkdir_p(settings.PANORAMA_CHECK_PATH)
# Note:
# The values are enclosed is single quoted intentionally
# as the imp.load_source used results in a shift in the
# decimal position when double quoted, e.g.
# value = "5622.0" gets imported as
# 2016-03-02 12:53:26 :: 28569 :: metric variable - value - 562.2
# single quoting results in the desired,
# 2016-03-02 13:16:17 :: 1515 :: metric variable - value - 5622.0
added_at = str(int(time()))
source = "graphite"
panaroma_anomaly_data = (
"metric = '%s'\n"
"value = '%s'\n"
"from_timestamp = '%s'\n"
"metric_timestamp = '%s'\n"
"algorithms = %s\n"
"triggered_algorithms = %s\n"
"app = '%s'\n"
"source = '%s'\n"
"added_by = '%s'\n"
"added_at = '%s'\n"
% (
base_name,
str(value),
from_timestamp,
metric_timestamp,
str(settings.ALGORITHMS),
triggered_algorithms,
skyline_app,
source,
this_host,
added_at,
)
)
# Create an anomaly file with details about the anomaly
panaroma_anomaly_file = "%s/%s.%s.txt" % (
settings.PANORAMA_CHECK_PATH,
added_at,
base_name,
)
try:
write_data_to_file(
skyline_app, panaroma_anomaly_file, "w", panaroma_anomaly_data
)
logger.info(
"added panorama anomaly file :: %s" % (panaroma_anomaly_file)
)
self.sent_to_panorama.append(base_name)
except:
logger.error(
"error :: failed to add panorama anomaly file :: %s"
% (panaroma_anomaly_file)
)
logger.info(traceback.format_exc())
else:
logger.info(
"not adding panorama anomaly file for Mirage metric - %s" % (metric)
)
# alert ... hmmm the harder part, maybe not all the resources
# are already created, so just determining ALERTS and firing a
# trigger_alert (pull in alerter.py and mirage_alerters.py?)
# OR send back to app via Redis
cache_key = "ionosphere.%s.alert.%s.%s" % (
added_by,
metric_timestamp,
base_name,
)
try:
self.redis_conn.setex(
cache_key,
300,
[
float(value),
base_name,
int(metric_timestamp),
triggered_algorithms,
full_duration,
],
)
logger.info(
"add Redis alert key - %s - [%s, '%s', %s, %s]"
% (
cache_key,
str(value),
base_name,
str(int(metric_timestamp)),
str(triggered_algorithms),
)
)
except:
logger.error(traceback.format_exc())
logger.error(
"error :: failed to add Redis key - %s - [%s, '%s', %s, %s]"
% (
cache_key,
str(value),
base_name,
str(int(metric_timestamp)),
str(triggered_algorithms),
)
)
# TO BE REMOVED
self.remove_metric_check_file(str(metric_check_file))
return
|
def spin_process(self, i, metric_check_file):
"""
Assign a metric anomaly to process.
:param i: python process id
:param metric_check_file: full path to the metric check file
:return: returns True
"""
def get_an_engine():
try:
engine, log_msg, trace = get_engine(skyline_app)
return engine, log_msg, trace
except:
logger.error(traceback.format_exc())
log_msg = "error :: failed to get MySQL engine in spin_process"
logger.error("error :: failed to get MySQL engine in spin_process")
return None, log_msg, trace
child_process_pid = os.getpid()
logger.info("child_process_pid - %s" % str(child_process_pid))
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info("debug :: processing metric check - %s" % metric_check_file)
if not os.path.isfile(str(metric_check_file)):
logger.error(
"error :: file not found - metric_check_file - %s"
% (str(metric_check_file))
)
return
check_file_name = os.path.basename(str(metric_check_file))
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info("debug :: check_file_name - %s" % check_file_name)
check_file_timestamp = check_file_name.split(".", 1)[0]
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info("debug :: check_file_timestamp - %s" % str(check_file_timestamp))
check_file_metricname_txt = check_file_name.split(".", 1)[1]
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info(
"debug :: check_file_metricname_txt - %s" % check_file_metricname_txt
)
check_file_metricname = check_file_metricname_txt.replace(".txt", "")
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info("debug :: check_file_metricname - %s" % check_file_metricname)
check_file_metricname_dir = check_file_metricname.replace(".", "/")
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info(
"debug :: check_file_metricname_dir - %s" % check_file_metricname_dir
)
metric_failed_check_dir = "%s/%s/%s" % (
failed_checks_dir,
check_file_metricname_dir,
check_file_timestamp,
)
failed_check_file = "%s/%s" % (metric_failed_check_dir, check_file_name)
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info("debug :: failed_check_file - %s" % failed_check_file)
# Load and validate metric variables
current_metrics_var = None
try:
metric_vars = load_metric_vars(skyline_app, str(metric_check_file))
# @added 20161230 - Panorama check file fails #24
# Could this do it
current_metrics_var = metric_vars
except:
logger.info(traceback.format_exc())
logger.error(
"error :: failed to load metric variables from check file - %s"
% (metric_check_file)
)
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
# Test metric variables
# We use a pythonic methodology to test if the variables are defined,
# this ensures that if any of the variables are not set for some reason
# we can handle unexpected data or situations gracefully and try and
# ensure that the process does not hang.
try:
metric_vars.metric
metric = str(metric_vars.metric)
base_name = metric
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info("debug :: metric variable - metric - %s" % metric)
except:
logger.error(
"error :: failed to read metric variable from check file - %s"
% (metric_check_file)
)
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
try:
metric_vars.value
value = str(metric_vars.value)
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info("debug :: metric variable - value - %s" % (value))
except:
logger.error(
"error :: failed to read value variable from check file - %s"
% (metric_check_file)
)
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
try:
metric_vars.from_timestamp
from_timestamp = str(metric_vars.from_timestamp)
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info(
"debug :: metric variable - from_timestamp - %s" % from_timestamp
)
except:
# @added 20160822 - Bug #1460: panorama check file fails
# Added exception handling here
logger.info(traceback.format_exc())
logger.error(
"error :: failed to read from_timestamp variable from check file - %s"
% (metric_check_file)
)
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
try:
metric_vars.metric_timestamp
metric_timestamp = str(metric_vars.metric_timestamp)
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info(
"debug :: metric variable - metric_timestamp - %s" % metric_timestamp
)
except:
logger.error(
"error :: failed to read metric_timestamp variable from check file - %s"
% (metric_check_file)
)
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
try:
metric_vars.algorithms
algorithms = metric_vars.algorithms
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info("debug :: metric variable - algorithms - %s" % str(algorithms))
except:
logger.error(
"error :: failed to read algorithms variable from check file setting to all - %s"
% (metric_check_file)
)
algorithms = "all"
try:
metric_vars.triggered_algorithms
triggered_algorithms = metric_vars.triggered_algorithms
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info(
"debug :: metric variable - triggered_algorithms - %s"
% str(triggered_algorithms)
)
except:
logger.error(
"error :: failed to read triggered_algorithms variable from check file setting to all - %s"
% (metric_check_file)
)
triggered_algorithms = "all"
try:
metric_vars.added_by
added_by = str(metric_vars.added_by)
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info("debug :: metric variable - added_by - %s" % added_by)
except:
logger.error(
"error :: failed to read added_by variable from check file - %s"
% (metric_check_file)
)
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
try:
metric_vars.added_at
added_at = str(metric_vars.added_at)
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info("debug :: metric variable - added_at - %s" % added_at)
except:
logger.error(
"error :: failed to read added_at variable from check file setting to all - %s"
% (metric_check_file)
)
added_by = "all"
# @added 20161228 - Feature #1828: ionosphere - mirage Redis data features
# Added full_duration which needs to be recorded to allow Mirage metrics
# to be profiled on Redis timeseries data at FULL_DURATION
try:
metric_vars.full_duration
full_duration = str(metric_vars.full_duration)
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info("debug :: metric variable - full_duration - %s" % full_duration)
except:
logger.error(
"error :: failed to read full_duration variable from check file - %s"
% (metric_check_file)
)
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
now = time()
anomaly_age = int(now) - int(metric_timestamp)
if anomaly_age > max_age_seconds:
logger.info(
"Ionosphere check max age exceeded - %s - %s seconds old, older than %s seconds discarding"
% (metric, str(anomaly_age), str(max_age_seconds))
)
with open(metric_check_file, "rt") as fr:
metric_check_file_contents = fr.readlines()
logger.info(
"debug :: metric check file contents\n%s"
% (str(metric_check_file_contents))
)
self.remove_metric_check_file(str(metric_check_file))
return
# @added 20161222 - ionosphere should extract features for every anomaly
# check that is sent through and calculate a feature_profile ready for
# submission by the user if they so choose. Further ionosphere could
# make itself more useful by comparing any training data profiles to
# further anomalies, however the feature profiles for subsequent
# anomalies may be similar enough to match a few times and each a closer
# match to the next.
training_metric = False
# Check if the metric has ionosphere_enabled, if not remove the check
# file but not the data directory
# @modified 20161230 - Feature #1830: Ionosphere alerts
# Use SQLAlchemy method
# query = "SELECT ionosphere_enabled FROM metrics WHERE metric='%s'" % metric
# result = mysql_select(skyline_app, query)
# if str(result[0]) != '1':
# logger.info('Ionosphere not enabled on %s' % (metric))
# # @modified 20161222 - do not remove metric file until features
# # calculated
# # self.remove_metric_check_file(str(metric_check_file))
# # return
# training_metric = True
try:
engine, log_msg, trace = get_an_engine()
logger.info(log_msg)
except:
logger.error(traceback.format_exc())
logger.error(
"error :: could not get a MySQL engine to determine ionosphere_enabled"
)
if not engine:
logger.error("error :: engine not obtained to determine ionosphere_enabled")
# Get the metrics_table metadata
metrics_table = None
try:
metrics_table, log_msg, trace = metrics_table_meta(skyline_app, engine)
logger.info("metrics_table OK for %s" % base_name)
except:
logger.error(traceback.format_exc())
logger.error("error :: failed to get metrics_table meta for %s" % base_name)
metrics_id = None
metric_ionosphere_enabled = None
try:
connection = engine.connect()
# stmt = select([metrics_table.c.ionosphere_enabled]).where(metrics_table.c.metric == str(metric))
stmt = select([metrics_table]).where(metrics_table.c.metric == base_name)
result = connection.execute(stmt)
row = result.fetchone()
metrics_id = row["id"]
metric_ionosphere_enabled = row["ionosphere_enabled"]
connection.close()
if metric_ionosphere_enabled is not None:
training_metric = False
else:
# @modified 20161222 - do not remove metric file until features
# calculated
# self.remove_metric_check_file(str(metric_check_file))
# return
training_metric = True
except:
logger.error(traceback.format_exc())
logger.error(
"error :: could not determine ionosphere_enabled from metrics table for - %s"
% base_name
)
metric_ionosphere_enabled = None
training_metric = True
logger.info(
"ionosphere_enabled is %s for metric id %s - %s"
% (str(metric_ionosphere_enabled), str(metrics_id), base_name)
)
if training_metric:
logger.info("Ionosphere is not enabled on %s" % (base_name))
else:
logger.info("Ionosphere is enabled on %s" % (base_name))
# @added 20161210 - Branch #922: ionosphere
# Task #1658: Patterning Skyline Ionosphere
# Only continue if there is a training data json timeseries file
metric_timeseries_dir = base_name.replace(".", "/")
metric_training_data_dir = "%s/%s/%s" % (
settings.IONOSPHERE_DATA_FOLDER,
metric_timestamp,
metric_timeseries_dir,
)
anomaly_json = "%s/%s.json" % (metric_training_data_dir, base_name)
if os.path.isfile(anomaly_json):
logger.info("training data ts json available - %s" % (anomaly_json))
else:
logger.error(
"error :: training data ts json was not found - %s" % (anomaly_json)
)
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
# @added 20161228 - Feature #1828: ionosphere - mirage Redis data features
# The timeseries full_duration needs to be recorded to allow Mirage metrics to
# be profiled on Redis timeseries data at FULL_DURATION
# e.g. mirage.redis.24h.json
if training_metric:
logger.info("training metric - %s" % (base_name))
if added_by == "mirage":
logger.info("checking training data Redis json is available")
# Always calculate features for both the SECOND_ORDER_RESOLUTION_SECONDS
# timeseries data and the FULL_DURATION Redis timeseries data.
# It is always preferable to create a features profile on a FULL_DURATION
# data set, unless the user is flagging the actual Mirage timeseries as
# not anomalous. In the Mirage context the not anomalous may often be more
# "visibile" in the FULL_DURATION view and if so should be matched on the
# FULL_DURATION timeseries data, even if it is a Mirage metric.
# Features profiles can be created for a Mirage metric on both the
# FULL_DURATION and the SECOND_ORDER_RESOLUTION_SECONDS data sets, however
# only one should be needed.
# A features profile should always be created at the highest resolution
# possible, FULL_DURATION data, wherever possible.
try:
full_duration_hours = str(int(settings.FULL_DURATION / 3600))
redis_anomaly_json = "%s/%s.mirage.redis.%sh.json" % (
metric_training_data_dir,
base_name,
full_duration_hours,
)
if os.path.isfile(redis_anomaly_json):
logger.info(
"training data Redis full duration ts json available - %s"
% (redis_anomaly_json)
)
else:
logger.error(
"error :: training data Redis full duration json was not found - %s"
% (redis_anomaly_json)
)
except:
logger.error(traceback.format_exc())
logger.error(
"error :: training data Redis full duration json was not found - %s"
% (redis_anomaly_json)
)
# @added 20161209 - Branch #922: ionosphere
# Task #1658: Patterning Skyline Ionosphere
# Use SQLAlchemy, mysql.connector is still upstairs ^^ but starting the
# move to SQLAlchemy now that all the webapp Ionosphere SQLAlchemy
# patterns work and the database lay out if defined we can begin on the
# data side. Ionosphere was put together backwards, like tsfresh was
# learnt. It was the people input first here in many ways, which is
# exactly how it was suppose to be.
# This is now the Ionosphere meat.
# Get a MySQL engine only if not training_metric
if not training_metric:
if not metrics_id:
logger.error("error :: metric id not known")
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return False
logger.info("getting MySQL engine")
try:
engine, log_msg, trace = get_an_engine()
logger.info(log_msg)
except:
logger.error(traceback.format_exc())
logger.error("error :: could not get a MySQL engine to get fp_ids")
if not engine:
logger.error("error :: engine not obtained to get fp_ids")
try:
ionosphere_table, log_msg, trace = ionosphere_table_meta(
skyline_app, engine
)
logger.info(log_msg)
logger.info("ionosphere_table OK")
except:
logger.error(traceback.format_exc())
logger.error(
"error :: failed to get ionosphere_table meta for %s" % base_name
)
# Determine the fp_ids that exist for the metric
fp_ids = []
fp_ids_found = False
try:
connection = engine.connect()
stmt = select([ionosphere_table]).where(
ionosphere_table.c.metric_id == metrics_id
)
result = connection.execute(stmt)
for row in result:
if int(row["full_duration"]) == int(full_duration):
fp_id = row["id"]
fp_ids.append(int(fp_id))
logger.info(
"using fp id %s matched full_duration %s - %s"
% (str(fp_id), str(full_duration), base_name)
)
else:
logger.info(
"not using fp id %s not matched full_duration %s - %s"
% (str(fp_id), str(full_duration), base_name)
)
connection.close()
fp_count = len(fp_ids)
logger.info("determined %s fp ids for %s" % (str(fp_count), base_name))
except:
logger.error(traceback.format_exc())
logger.error(
"error :: could not determine fp ids from DB for %s" % base_name
)
fp_count = 0
if len(fp_ids) == 0:
logger.error("error :: there are no fp ids for %s" % base_name)
else:
fp_ids_found = True
# @added 20161221 - TODO: why not calculate the features of every
# anomaly so the the use does not have to do it and wait for the
# features to be calculated.
# Check the features were calculated by the webapp
calculated_feature_file = "%s/%s.tsfresh.input.csv.features.transposed.csv" % (
metric_training_data_dir,
base_name,
)
calculated_feature_file_found = False
if os.path.isfile(calculated_feature_file):
logger.info("calculated features available - %s" % (calculated_feature_file))
calculated_feature_file_found = True
if not calculated_feature_file_found:
if training_metric:
# Allow Graphite resources to be created if they are not an alert
# was not sent therefore features do not need to be calculated
check_time = int(time())
check_age = check_time - int(added_at)
if check_age < 5:
sleep(5)
graphite_file_count = len(
[
f
for f in os.listdir(metric_training_data_dir)
if f.endswith(".png")
and os.path.isfile(os.path.join(metric_training_data_dir, f))
]
)
if graphite_file_count == 0:
logger.info(
"not calculating features no anomaly Graphite alert resources created in %s"
% (metric_training_data_dir)
)
self.remove_metric_check_file(str(metric_check_file))
return
else:
logger.info(
"anomaly Graphite alert resources found in %s"
% (metric_training_data_dir)
)
context = skyline_app
f_calc = None
if not calculated_feature_file_found:
try:
(
fp_csv,
successful,
fp_exists,
fp_id,
log_msg,
traceback_format_exc,
f_calc,
) = calculate_features_profile(
skyline_app, metric_timestamp, base_name, context
)
except:
logger.error(traceback.format_exc())
logger.error("error :: failed to calculate features")
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
if os.path.isfile(calculated_feature_file):
logger.info("calculated features available - %s" % (calculated_feature_file))
calculated_feature_file_found = True
if isinstance(f_calc, float):
f_calc_time = "%.2f" % f_calc
send_metric_name = (
"%s.features_calculation_time" % skyline_app_graphite_namespace
)
send_graphite_metric(skyline_app, send_metric_name, f_calc_time)
if training_metric:
logger.info("training metric done")
self.remove_metric_check_file(str(metric_check_file))
# TODO: make ionosphere more useful, compare any other
# available training_metric profiles here and match, not in the
# db context, in the training context.
return
# @added 20161210 - Branch #922: ionosphere
# Task #1658: Patterning Skyline Ionosphere
# Calculate features for the current timeseries if there are fp ids
# Just call it via the webapp... fewer lines of code and already done in
# webapp/ionosphere_backend.py and webapp/features_proifle.py
if not calculated_feature_file_found:
webapp_url = "%s/ionosphere?timestamp=%s&metric=%s&calc_features=true" % (
settings.SKYLINE_URL,
metric_timestamp,
base_name,
)
r = None
http_status_code = 0
if settings.WEBAPP_AUTH_ENABLED:
# 10 second timout is sufficient locally under normal circumstances
# as tsfresh has yet to have been take longer than 6 seconds if so
# by the time the next request is made, the features file should
# exist. So this is limited psuedo-idempotency.
timeout_and_auth = "timeout=10, auth=(%s, %s))" % (
settings.WEBAPP_AUTH_USER,
settings.WEBAPP_AUTH_USER_PASSWORD,
)
else:
timeout_and_auth = "timeout=10"
if fp_ids_found:
for _ in range(2):
try:
r = requests.get(webapp_url, timeout_and_auth)
http_status_code = r.status_code
except:
logger.error("error :: could not retrieve %s" % webapp_url)
sleep(5)
continue
else:
break
else:
logger.error(traceback.format_exc())
logger.error(
"error :: could not retrieve %s after 3 tries" % webapp_url
)
if int(http_status_code) == 200:
if os.path.isfile(calculated_feature_file):
logger.info(
"calculated features available - %s" % (calculated_feature_file)
)
calculated_feature_file_found = True
else:
logger.error(
"error :: calculated features not available - %s"
% (calculated_feature_file)
)
# send an Ionosphere alert or add a thunder branch alert, one
# one thing at a time. You cannot rush timeseries.
self.remove_metric_check_file(str(metric_check_file))
return
# @modified 20161213 - Branch #1790: test_tsfresh
# TODO: Match the test_tsfresh method
# Create an array of the calculated features
calculated_features = []
if calculated_feature_file_found:
count_id = 0
with open(calculated_feature_file, "rb") as fr:
reader = csv.reader(fr, delimiter=",")
for i, line in enumerate(reader):
if str(line[0]) != "":
if "," in line[0]:
feature_name = '"%s"' % str(line[0])
else:
feature_name = str(line[0])
count_id += 1
value = float(line[1])
calculated_features.append([feature_name, value])
if len(calculated_features) == 0:
logger.error(
"error :: no calculated features were determined from - %s"
% (calculated_feature_file)
)
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
# Compare calculated features to feature values for each fp id
not_anomalous = False
if calculated_feature_file_found:
for fp_id in fp_ids:
if not metrics_id:
logger.error("error :: metric id not known")
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return False
features_count = None
fp_features = []
# Get features for fp_id from z_fp_<metric_id> table where the
# features profile is the same full_duration
metric_fp_table = "z_fp_%s" % str(metrics_id)
try:
engine, log_msg, trace = get_an_engine()
except:
logger.error(traceback.format_exc())
logger.error(
"error :: could not get a MySQL engine for feature_id and values from %s"
% metric_fp_table
)
if not engine:
logger.error(
"error :: engine not obtained for feature_id and values from %s"
% metric_fp_table
)
try:
stmt = "SELECT feature_id, value FROM %s WHERE fp_id=%s" % (
metric_fp_table,
str(fp_id),
)
connection = engine.connect()
for row in engine.execute(stmt):
fp_feature_id = int(row["feature_id"])
fp_value = float(row["value"])
fp_features.append([fp_feature_id, fp_value])
connection.close()
features_count = len(fp_features)
logger.info(
"determined %s features for fp_id %s"
% (str(features_count), str(fp_id))
)
except:
logger.error(traceback.format_exc())
logger.error(
"error :: could not determine feature_id, value from %s"
% metric_fp_table
)
# Convert feature names in calculated_features to their id
logger.info("converting tsfresh feature names to Skyline feature ids")
calc_features_by_id = []
for feature_name, value in calculated_features:
for skyline_feature_id, name in TSFRESH_FEATURES:
if feature_name == name:
calc_features_by_id.append([skyline_feature_id, float(value)])
# Determine what features each data has, extract only values for
# common features.
logger.info("determining common features")
relevant_fp_feature_values = []
relevant_calc_feature_values = []
for skyline_feature_id, value in calc_features_by_id:
for fp_feature_id, fp_value in fp_features:
if skyline_feature_id == fp_feature_id:
relevant_fp_feature_values.append(fp_value)
relevant_calc_feature_values.append(value)
# Determine the sum of each set
relevant_fp_feature_values_count = len(relevant_fp_feature_values)
relevant_calc_feature_values_count = len(relevant_calc_feature_values)
if relevant_fp_feature_values_count != relevant_calc_feature_values_count:
logger.error("error :: mismatch in number of common features")
logger.error(
"error :: relevant_fp_feature_values_count - %s"
% str(relevant_fp_feature_values_count)
)
logger.error(
"error :: relevant_calc_feature_values_count - %s"
% str(relevant_calc_feature_values_count)
)
continue
else:
logger.info(
"comparing on %s common features"
% str(relevant_fp_feature_values_count)
)
if relevant_fp_feature_values_count == 0:
logger.error("error :: relevant_fp_feature_values_count is zero")
continue
# Determine the sum of each set
sum_fp_values = sum(relevant_fp_feature_values)
sum_calc_values = sum(relevant_calc_feature_values)
logger.info(
"sum of the values of the %s common features in features profile - %s"
% (str(relevant_fp_feature_values_count), str(sum_fp_values))
)
logger.info(
"sum of the values of the %s common features in the calculated features - %s"
% (str(relevant_calc_feature_values_count), str(sum_calc_values))
)
# Determine whether each set is positive or negative
# # if the same carry on
# # if both negative, make then both positive
# Sum fp values, Sum calculated - handle negatives like features_sum :: -3389570699080000.0000000000
# Determine whether each set is positive or negative
# # if the same carry on
# # if both negative, make then both positive postive_sums
fp_sum_array = [sum_fp_values]
calc_sum_array = [sum_calc_values]
almost_equal = None
try:
np.testing.assert_array_almost_equal(fp_sum_array, calc_sum_array)
almost_equal = True
except:
almost_equal = False
if almost_equal:
not_anomalous = True
logger.info(
"common features sums are almost equal, not anomalous"
% str(relevant_fp_feature_values_count)
)
percent_different = 100
sums_array = np.array([sum_fp_values, sum_calc_values], dtype=float)
try:
calc_percent_different = np.diff(sums_array) / sums_array[:-1] * 100.0
percent_different = calc_percent_different[0]
logger.info(
"percent_different between common features sums - %s"
% str(percent_different)
)
except:
logger.error(traceback.format_exc())
logger.error("error :: failed to calculate percent_different")
continue
# @added 20161229 - Feature #1830: Ionosphere alerts
# Update the features profile checked count and time
logger.info("updating checked details in db for %s" % (str(fp_id)))
# update matched_count in ionosphere_table
checked_timestamp = int(time())
try:
engine, log_msg, trace = get_an_engine()
except:
logger.error(traceback.format_exc())
logger.error(
"error :: could not get a MySQL engine to update checked details in db for %s"
% (str(fp_id))
)
if not engine:
logger.error(
"error :: engine not obtained to update checked details in db for %s"
% (str(fp_id))
)
try:
connection = engine.connect()
connection.execute(
ionosphere_table.update(ionosphere_table.c.id == fp_id).values(
checked_count=ionosphere_table.c.checked_count + 1,
last_checked=checked_timestamp,
)
)
connection.close()
logger.info("updated checked_count for %s" % str(fp_id))
except:
logger.error(traceback.format_exc())
logger.error(
"error :: could not update checked_count and last_checked for %s "
% str(fp_id)
)
# if diff_in_sums <= 1%:
if percent_different < 0:
new_pdiff = percent_different * -1
percent_different = new_pdiff
if percent_different < settings.IONOSPHERE_FEATURES_PERCENT_SIMILAR:
not_anomalous = True
# log
logger.info("not anomalous - features profile match - %s" % base_name)
logger.info(
"calculated features sum are within %s percent of fp_id %s with %s, not anomalous"
% (
str(settings.IONOSPHERE_FEATURES_PERCENT_SIMILAR),
str(fp_id),
str(percent_different),
)
)
# update matched_count in ionosphere_table
matched_timestamp = int(time())
try:
engine, log_msg, trace = get_an_engine()
except:
logger.error(traceback.format_exc())
logger.error(
"error :: could not get a MySQL engine to update matched details in db for %s"
% (str(fp_id))
)
if not engine:
logger.error(
"error :: engine not obtained to update matched details in db for %s"
% (str(fp_id))
)
try:
connection = engine.connect()
connection.execute(
ionosphere_table.update(ionosphere_table.c.id == fp_id).values(
matched_count=ionosphere_table.c.matched_count + 1,
last_matched=matched_timestamp,
)
)
connection.close()
logger.info("updated matched_count for %s" % str(fp_id))
except:
logger.error(traceback.format_exc())
logger.error(
"error :: could not update matched_count and last_matched for %s "
% str(fp_id)
)
# https://docs.scipy.org/doc/numpy/reference/generated/numpy.testing.assert_almost_equal.html
# @added 20161214 - Add a between timeframe option, e.g. if
# fp match, only see this as not anomalous if hour (and or min)
# is between x and y - handle rollovers, cron log archives, etc.
logger.info(
"debug :: %s is a features profile for %s" % (str(fp_id), base_name)
)
if not not_anomalous:
logger.info("anomalous - no feature profiles were matched - %s" % base_name)
# Send to panorama as Analyzer and Mirage will only alert on the
# anomaly, they will not push it to Panorama
if settings.PANORAMA_ENABLED:
if not os.path.exists(settings.PANORAMA_CHECK_PATH):
mkdir_p(settings.PANORAMA_CHECK_PATH)
# Note:
# The values are enclosed is single quoted intentionally
# as the imp.load_source used results in a shift in the
# decimal position when double quoted, e.g.
# value = "5622.0" gets imported as
# 2016-03-02 12:53:26 :: 28569 :: metric variable - value - 562.2
# single quoting results in the desired,
# 2016-03-02 13:16:17 :: 1515 :: metric variable - value - 5622.0
added_at = str(int(time()))
source = "graphite"
panaroma_anomaly_data = (
"metric = '%s'\n"
"value = '%s'\n"
"from_timestamp = '%s'\n"
"metric_timestamp = '%s'\n"
"algorithms = %s\n"
"triggered_algorithms = %s\n"
"app = '%s'\n"
"source = '%s'\n"
"added_by = '%s'\n"
"added_at = '%s'\n"
% (
base_name,
str(value),
from_timestamp,
metric_timestamp,
str(settings.ALGORITHMS),
triggered_algorithms,
skyline_app,
source,
this_host,
added_at,
)
)
# Create an anomaly file with details about the anomaly
panaroma_anomaly_file = "%s/%s.%s.txt" % (
settings.PANORAMA_CHECK_PATH,
added_at,
base_name,
)
try:
write_data_to_file(
skyline_app, panaroma_anomaly_file, "w", panaroma_anomaly_data
)
logger.info(
"added panorama anomaly file :: %s" % (panaroma_anomaly_file)
)
self.sent_to_panorama.append(base_name)
except:
logger.error(
"error :: failed to add panorama anomaly file :: %s"
% (panaroma_anomaly_file)
)
logger.info(traceback.format_exc())
else:
logger.info(
"not adding panorama anomaly file for Mirage metric - %s" % (metric)
)
# alert ... hmmm the harder part, maybe not all the resources
# are already created, so just determining ALERTS and firing a
# trigger_alert (pull in alerter.py and mirage_alerters.py?)
# OR send back to app via Redis
cache_key = "ionosphere.%s.alert.%s.%s" % (
added_by,
metric_timestamp,
base_name,
)
try:
self.redis_conn.setex(
cache_key,
300,
[
float(value),
base_name,
int(metric_timestamp),
triggered_algorithms,
full_duration,
],
)
logger.info(
"add Redis alert key - %s - [%s, '%s', %s, %s]"
% (
cache_key,
str(value),
base_name,
str(int(metric_timestamp)),
str(triggered_algorithms),
)
)
except:
logger.error(traceback.format_exc())
logger.error(
"error :: failed to add Redis key - %s - [%s, '%s', %s, %s]"
% (
cache_key,
str(value),
base_name,
str(int(metric_timestamp)),
str(triggered_algorithms),
)
)
# TO BE REMOVED
self.remove_metric_check_file(str(metric_check_file))
return
|
https://github.com/earthgecko/skyline/issues/24
|
2016-08-22 16:42:05 :: 7874 :: Traceback (most recent call last):
File "/opt/skyline/github/skyline/skyline/panorama/panorama.py", line 297, in spin_process
metric_vars.from_timestamp
AttributeError: 'module' object has no attribute 'from_timestamp'
2016-08-22 16:42:05 :: 7874 :: error :: failed to read from_timestamp variable from check file - /opt/skyline/panaroma/check/1471884121.stats.statsd.graphiteStats.flush_length.txt
|
AttributeError
|
def spin_process(self, i, metric_check_file):
"""
Assign a metric anomaly to process.
:param i: python process id
:param metric_check_file: full path to the metric check file
:return: returns True
"""
child_process_pid = os.getpid()
if settings.ENABLE_PANORAMA_DEBUG:
logger.info("debug :: child_process_pid - %s" % str(child_process_pid))
if settings.ENABLE_PANORAMA_DEBUG:
logger.info("debug :: processing metric check - %s" % metric_check_file)
if not os.path.isfile(str(metric_check_file)):
logger.error(
"error :: file not found - metric_check_file - %s"
% (str(metric_check_file))
)
return
check_file_name = os.path.basename(str(metric_check_file))
if settings.ENABLE_PANORAMA_DEBUG:
logger.info("debug :: check_file_name - %s" % check_file_name)
check_file_timestamp = check_file_name.split(".", 1)[0]
if settings.ENABLE_PANORAMA_DEBUG:
logger.info("debug :: check_file_timestamp - %s" % str(check_file_timestamp))
check_file_metricname_txt = check_file_name.split(".", 1)[1]
if settings.ENABLE_PANORAMA_DEBUG:
logger.info(
"debug :: check_file_metricname_txt - %s" % check_file_metricname_txt
)
check_file_metricname = check_file_metricname_txt.replace(".txt", "")
if settings.ENABLE_PANORAMA_DEBUG:
logger.info("debug :: check_file_metricname - %s" % check_file_metricname)
check_file_metricname_dir = check_file_metricname.replace(".", "/")
if settings.ENABLE_PANORAMA_DEBUG:
logger.info(
"debug :: check_file_metricname_dir - %s" % check_file_metricname_dir
)
metric_failed_check_dir = "%s/%s/%s" % (
failed_checks_dir,
check_file_metricname_dir,
check_file_timestamp,
)
failed_check_file = "%s/%s" % (metric_failed_check_dir, check_file_name)
if settings.ENABLE_PANORAMA_DEBUG:
logger.info("debug :: failed_check_file - %s" % failed_check_file)
# Load and validate metric variables
try:
# @modified 20170101 - Feature #1830: Ionosphere alerts
# Bug #1460: panorama check file fails
# Panorama check file fails #24
# Get rid of the skyline_functions imp as imp is deprecated in py3 anyway
# Use def new_load_metric_vars(self, metric_vars_file):
# metric_vars = load_metric_vars(skyline_app, str(metric_check_file))
metric_vars_array = self.new_load_metric_vars(str(metric_check_file))
except:
logger.info(traceback.format_exc())
logger.error(
"error :: failed to load metric variables from check file - %s"
% (metric_check_file)
)
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
# Test metric variables
# We use a pythonic methodology to test if the variables are defined,
# this ensures that if any of the variables are not set for some reason
# we can handle unexpected data or situations gracefully and try and
# ensure that the process does not hang.
metric = None
try:
# metric_vars.metric
# metric = str(metric_vars.metric)
key = "metric"
value_list = [
var_array[1] for var_array in metric_vars_array if var_array[0] == key
]
metric = str(value_list[0])
if settings.ENABLE_PANORAMA_DEBUG:
logger.info("debug :: metric variable - metric - %s" % metric)
except:
logger.error(
"error :: failed to read metric variable from check file - %s"
% (metric_check_file)
)
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
if not metric:
logger.error(
"error :: failed to load metric variable from check file - %s"
% (metric_check_file)
)
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
value = None
try:
# metric_vars.value
# value = str(metric_vars.value)
key = "value"
value_list = [
var_array[1] for var_array in metric_vars_array if var_array[0] == key
]
value = float(value_list[0])
if settings.ENABLE_PANORAMA_DEBUG:
logger.info("debug :: metric variable - value - %s" % (value))
except:
logger.error(
"error :: failed to read value variable from check file - %s"
% (metric_check_file)
)
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
if not value:
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
from_timestamp = None
try:
# metric_vars.from_timestamp
# from_timestamp = str(metric_vars.from_timestamp)
key = "from_timestamp"
value_list = [
var_array[1] for var_array in metric_vars_array if var_array[0] == key
]
from_timestamp = int(value_list[0])
if settings.ENABLE_PANORAMA_DEBUG:
logger.info(
"debug :: metric variable - from_timestamp - %s" % from_timestamp
)
except:
# @added 20160822 - Bug #1460: panorama check file fails
# Added exception handling here
logger.info(traceback.format_exc())
logger.error(
"error :: failed to read from_timestamp variable from check file - %s"
% (metric_check_file)
)
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
if not from_timestamp:
logger.error(
"error :: failed to load from_timestamp variable from check file - %s"
% (metric_check_file)
)
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
metric_timestamp = None
try:
# metric_vars.metric_timestamp
# metric_timestamp = str(metric_vars.metric_timestamp)
key = "metric_timestamp"
value_list = [
var_array[1] for var_array in metric_vars_array if var_array[0] == key
]
metric_timestamp = int(value_list[0])
if settings.ENABLE_PANORAMA_DEBUG:
logger.info(
"debug :: metric variable - metric_timestamp - %s" % metric_timestamp
)
except:
logger.error(
"error :: failed to read metric_timestamp variable from check file - %s"
% (metric_check_file)
)
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
if not metric_timestamp:
logger.error(
"error :: failed to load metric_timestamp variable from check file - %s"
% (metric_check_file)
)
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
algorithms = None
try:
# metric_vars.algorithms
# algorithms = metric_vars.algorithms
key = "algorithms"
value_list = [
var_array[1] for var_array in metric_vars_array if var_array[0] == key
]
algorithms = value_list[0]
if settings.ENABLE_PANORAMA_DEBUG:
logger.info("debug :: metric variable - algorithms - %s" % str(algorithms))
except:
logger.error(
"error :: failed to read algorithms variable from check file setting to all - %s"
% (metric_check_file)
)
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
if not algorithms:
logger.error(
"error :: failed to load algorithms variable from check file - %s"
% (metric_check_file)
)
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
triggered_algorithms = None
try:
# metric_vars.triggered_algorithms
# triggered_algorithms = metric_vars.triggered_algorithms
key = "triggered_algorithms"
value_list = [
var_array[1] for var_array in metric_vars_array if var_array[0] == key
]
triggered_algorithms = value_list[0]
if settings.ENABLE_PANORAMA_DEBUG:
logger.info(
"debug :: metric variable - triggered_algorithms - %s"
% str(triggered_algorithms)
)
except:
logger.error(
"error :: failed to read triggered_algorithms variable from check file setting to all - %s"
% (metric_check_file)
)
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
if not triggered_algorithms:
logger.error(
"error :: failed to load triggered_algorithms variable from check file - %s"
% (metric_check_file)
)
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
app = None
try:
# metric_vars.app
# app = str(metric_vars.app)
key = "app"
value_list = [
var_array[1] for var_array in metric_vars_array if var_array[0] == key
]
app = str(value_list[0])
if settings.ENABLE_PANORAMA_DEBUG:
logger.info("debug :: metric variable - app - %s" % app)
except:
logger.error(
"error :: failed to read app variable from check file setting to all - %s"
% (metric_check_file)
)
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
if not app:
logger.error(
"error :: failed to load app variable from check file - %s"
% (metric_check_file)
)
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
source = None
try:
# metric_vars.source
# source = str(metric_vars.source)
key = "source"
value_list = [
var_array[1] for var_array in metric_vars_array if var_array[0] == key
]
source = str(value_list[0])
if settings.ENABLE_PANORAMA_DEBUG:
logger.info("debug :: metric variable - source - %s" % source)
except:
logger.error(
"error :: failed to read source variable from check file setting to all - %s"
% (metric_check_file)
)
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
if not app:
logger.error(
"error :: failed to load app variable from check file - %s"
% (metric_check_file)
)
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
added_by = None
try:
# metric_vars.added_by
# added_by = str(metric_vars.added_by)
key = "added_by"
value_list = [
var_array[1] for var_array in metric_vars_array if var_array[0] == key
]
added_by = str(value_list[0])
if settings.ENABLE_PANORAMA_DEBUG:
logger.info("debug :: metric variable - added_by - %s" % added_by)
except:
logger.error(
"error :: failed to read added_by variable from check file setting to all - %s"
% (metric_check_file)
)
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
if not added_by:
logger.error(
"error :: failed to load added_by variable from check file - %s"
% (metric_check_file)
)
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
added_at = None
try:
# metric_vars.added_at
# added_at = str(metric_vars.added_at)
key = "added_at"
value_list = [
var_array[1] for var_array in metric_vars_array if var_array[0] == key
]
added_at = str(value_list[0])
if settings.ENABLE_PANORAMA_DEBUG:
logger.info("debug :: metric variable - added_at - %s" % added_at)
except:
logger.error(
"error :: failed to read added_at variable from check file setting to all - %s"
% (metric_check_file)
)
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
if not added_at:
logger.error(
"error :: failed to load added_at variable from check file - %s"
% (metric_check_file)
)
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
record_anomaly = True
cache_key = "%s.last_check.%s.%s" % (skyline_app, app, metric)
if settings.ENABLE_PANORAMA_DEBUG:
logger.info(
"debug :: cache_key - %s.last_check.%s.%s" % (skyline_app, app, metric)
)
try:
last_check = self.redis_conn.get(cache_key)
except Exception as e:
logger.error(
"error :: could not query cache_key - %s.last_check.%s.%s - %s"
% (skyline_app, app, metric, e)
)
last_check = None
if last_check:
record_anomaly = False
logger.info(
"Panorama metric key not expired - %s.last_check.%s.%s"
% (skyline_app, app, metric)
)
# @added 20160907 - Handle Panorama stampede on restart after not running #26
# Allow to expire check if greater than PANORAMA_CHECK_MAX_AGE
if max_age:
now = time()
anomaly_age = int(now) - int(metric_timestamp)
if anomaly_age > max_age_seconds:
record_anomaly = False
logger.info(
"Panorama check max age exceeded - %s - %s seconds old, older than %s seconds discarding"
% (metric, str(anomaly_age), str(max_age_seconds))
)
if not record_anomaly:
logger.info("not recording anomaly for - %s" % (metric))
if os.path.isfile(str(metric_check_file)):
try:
os.remove(str(metric_check_file))
logger.info("metric_check_file removed - %s" % str(metric_check_file))
except OSError:
pass
return
# Determine id of something thing
def determine_id(table, key, value):
"""
Get the id of something from Redis or the database and insert a new
record if one does not exist for the value.
:param table: table name
:param key: key name
:param value: value name
:type table: str
:type key: str
:type value: str
:return: int or boolean
"""
query_cache_key = "%s.mysql_ids.%s.%s.%s" % (skyline_app, table, key, value)
determined_id = None
redis_determined_id = None
if settings.ENABLE_PANORAMA_DEBUG:
logger.info("debug :: query_cache_key - %s" % (query_cache_key))
try:
redis_known_id = self.redis_conn.get(query_cache_key)
except:
redis_known_id = None
if redis_known_id:
unpacker = Unpacker(use_list=False)
unpacker.feed(redis_known_id)
redis_determined_id = list(unpacker)
if redis_determined_id:
determined_id = int(redis_determined_id[0])
if determined_id:
if determined_id > 0:
return determined_id
# Query MySQL
query = "select id FROM %s WHERE %s='%s'" % (table, key, value)
results = self.mysql_select(query)
determined_id = 0
if results:
determined_id = int(results[0][0])
if determined_id > 0:
# Set the key for a week
if not redis_determined_id:
try:
self.redis_conn.setex(query_cache_key, 604800, packb(determined_id))
logger.info(
"set redis query_cache_key - %s - id: %s"
% (query_cache_key, str(determined_id))
)
except Exception as e:
logger.error(traceback.format_exc())
logger.error(
"error :: failed to set query_cache_key - %s - id: %s"
% (query_cache_key, str(determined_id))
)
return int(determined_id)
# INSERT because no known id
insert_query = "insert into %s (%s) VALUES ('%s')" % (table, key, value)
logger.info("inserting %s into %s table" % (value, table))
try:
results = self.mysql_insert(insert_query)
except:
logger.error(traceback.format_exc())
logger.error(
"error :: failed to determine the id of %s from the insert" % (value)
)
raise
determined_id = 0
if results:
determined_id = int(results)
else:
logger.error("error :: results not set")
raise
if determined_id > 0:
# Set the key for a week
if not redis_determined_id:
try:
self.redis_conn.setex(query_cache_key, 604800, packb(determined_id))
logger.info(
"set redis query_cache_key - %s - id: %s"
% (query_cache_key, str(determined_id))
)
except Exception as e:
logger.error(traceback.format_exc())
logger.error("%s" % str(e))
logger.error(
"error :: failed to set query_cache_key - %s - id: %s"
% (query_cache_key, str(determined_id))
)
return determined_id
logger.error("error :: failed to determine the inserted id for %s" % value)
return False
try:
added_by_host_id = determine_id("hosts", "host", added_by)
except:
logger.error("error :: failed to determine id of %s" % (added_by))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return False
try:
app_id = determine_id("apps", "app", app)
except:
logger.error("error :: failed to determine id of %s" % (app))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return False
try:
source_id = determine_id("sources", "source", source)
except:
logger.error("error :: failed to determine id of %s" % (source))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return False
try:
metric_id = determine_id("metrics", "metric", metric)
except:
logger.error("error :: failed to determine id of %s" % (metric))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return False
algorithms_ids_csv = ""
for algorithm in algorithms:
try:
algorithm_id = determine_id("algorithms", "algorithm", algorithm)
except:
logger.error("error :: failed to determine id of %s" % (algorithm))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return False
if algorithms_ids_csv == "":
algorithms_ids_csv = str(algorithm_id)
else:
new_algorithms_ids_csv = "%s,%s" % (algorithms_ids_csv, str(algorithm_id))
algorithms_ids_csv = new_algorithms_ids_csv
triggered_algorithms_ids_csv = ""
for triggered_algorithm in triggered_algorithms:
try:
triggered_algorithm_id = determine_id(
"algorithms", "algorithm", triggered_algorithm
)
except:
logger.error(
"error :: failed to determine id of %s" % (triggered_algorithm)
)
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return False
if triggered_algorithms_ids_csv == "":
triggered_algorithms_ids_csv = str(triggered_algorithm_id)
else:
new_triggered_algorithms_ids_csv = "%s,%s" % (
triggered_algorithms_ids_csv,
str(triggered_algorithm_id),
)
triggered_algorithms_ids_csv = new_triggered_algorithms_ids_csv
logger.info("inserting anomaly")
try:
full_duration = int(metric_timestamp) - int(from_timestamp)
if settings.ENABLE_PANORAMA_DEBUG:
logger.info("debug :: full_duration - %s" % str(full_duration))
except:
logger.error("error :: failed to determine full_duration")
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return False
try:
anomalous_datapoint = round(float(value), 6)
if settings.ENABLE_PANORAMA_DEBUG:
logger.info("debug :: anomalous_datapoint - %s" % str(anomalous_datapoint))
except:
logger.error("error :: failed to determine anomalous_datapoint")
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return False
try:
columns = "%s, %s, %s, %s, %s, %s, %s, %s, %s" % (
"metric_id",
"host_id",
"app_id",
"source_id",
"anomaly_timestamp",
"anomalous_datapoint",
"full_duration",
"algorithms_run",
"triggered_algorithms",
)
if settings.ENABLE_PANORAMA_DEBUG:
logger.info("debug :: columns - %s" % str(columns))
except:
logger.error("error :: failed to construct columns string")
logger.info(traceback.format_exc())
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return False
try:
query = (
"insert into anomalies (%s) VALUES (%d, %d, %d, %d, %s, %.6f, %d, '%s', '%s')"
% (
columns,
metric_id,
added_by_host_id,
app_id,
source_id,
metric_timestamp,
anomalous_datapoint,
full_duration,
algorithms_ids_csv,
triggered_algorithms_ids_csv,
)
)
except:
logger.error("error :: failed to construct insert query")
logger.info(traceback.format_exc())
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return False
if settings.ENABLE_PANORAMA_DEBUG:
logger.info("debug :: anomaly insert - %s" % str(query))
try:
anomaly_id = self.mysql_insert(query)
logger.info(
"anomaly id - %d - created for %s at %s"
% (anomaly_id, metric, metric_timestamp)
)
except:
logger.error(
"error :: failed to insert anomaly %s at %s"
% (anomaly_id, metric, metric_timestamp)
)
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return False
# Set anomaly record cache key
try:
self.redis_conn.setex(cache_key, settings.PANORAMA_EXPIRY_TIME, packb(value))
logger.info(
"set cache_key - %s.last_check.%s.%s - %s"
% (skyline_app, app, metric, str(settings.PANORAMA_EXPIRY_TIME))
)
except Exception as e:
logger.error(
"error :: could not query cache_key - %s.last_check.%s.%s - %s"
% (skyline_app, app, metric, e)
)
if os.path.isfile(str(metric_check_file)):
try:
os.remove(str(metric_check_file))
logger.info("metric_check_file removed - %s" % str(metric_check_file))
except OSError:
pass
return anomaly_id
|
def spin_process(self, i, metric_check_file):
"""
Assign a metric anomaly to process.
:param i: python process id
:param metric_check_file: full path to the metric check file
:return: returns True
"""
child_process_pid = os.getpid()
if settings.ENABLE_PANORAMA_DEBUG:
logger.info("debug :: child_process_pid - %s" % str(child_process_pid))
if settings.ENABLE_PANORAMA_DEBUG:
logger.info("debug :: processing metric check - %s" % metric_check_file)
if not os.path.isfile(str(metric_check_file)):
logger.error(
"error :: file not found - metric_check_file - %s"
% (str(metric_check_file))
)
return
check_file_name = os.path.basename(str(metric_check_file))
if settings.ENABLE_PANORAMA_DEBUG:
logger.info("debug :: check_file_name - %s" % check_file_name)
check_file_timestamp = check_file_name.split(".", 1)[0]
if settings.ENABLE_PANORAMA_DEBUG:
logger.info("debug :: check_file_timestamp - %s" % str(check_file_timestamp))
check_file_metricname_txt = check_file_name.split(".", 1)[1]
if settings.ENABLE_PANORAMA_DEBUG:
logger.info(
"debug :: check_file_metricname_txt - %s" % check_file_metricname_txt
)
check_file_metricname = check_file_metricname_txt.replace(".txt", "")
if settings.ENABLE_PANORAMA_DEBUG:
logger.info("debug :: check_file_metricname - %s" % check_file_metricname)
check_file_metricname_dir = check_file_metricname.replace(".", "/")
if settings.ENABLE_PANORAMA_DEBUG:
logger.info(
"debug :: check_file_metricname_dir - %s" % check_file_metricname_dir
)
metric_failed_check_dir = "%s/%s/%s" % (
failed_checks_dir,
check_file_metricname_dir,
check_file_timestamp,
)
failed_check_file = "%s/%s" % (metric_failed_check_dir, check_file_name)
if settings.ENABLE_PANORAMA_DEBUG:
logger.info("debug :: failed_check_file - %s" % failed_check_file)
# Load and validate metric variables
try:
metric_vars = load_metric_vars(skyline_app, str(metric_check_file))
except:
logger.info(traceback.format_exc())
logger.error(
"error :: failed to load metric variables from check file - %s"
% (metric_check_file)
)
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
# Test metric variables
# We use a pythonic methodology to test if the variables are defined,
# this ensures that if any of the variables are not set for some reason
# we can handle unexpected data or situations gracefully and try and
# ensure that the process does not hang.
try:
metric_vars.metric
metric = str(metric_vars.metric)
if settings.ENABLE_PANORAMA_DEBUG:
logger.info("debug :: metric variable - metric - %s" % metric)
except:
logger.error(
"error :: failed to read metric variable from check file - %s"
% (metric_check_file)
)
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
try:
metric_vars.value
value = str(metric_vars.value)
if settings.ENABLE_PANORAMA_DEBUG:
logger.info("debug :: metric variable - value - %s" % (value))
except:
logger.error(
"error :: failed to read value variable from check file - %s"
% (metric_check_file)
)
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
try:
metric_vars.from_timestamp
from_timestamp = str(metric_vars.from_timestamp)
if settings.ENABLE_PANORAMA_DEBUG:
logger.info(
"debug :: metric variable - from_timestamp - %s" % from_timestamp
)
except:
# @added 20160822 - Bug #1460: panorama check file fails
# Added exception handling here
logger.info(traceback.format_exc())
logger.error(
"error :: failed to read from_timestamp variable from check file - %s"
% (metric_check_file)
)
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
try:
metric_vars.metric_timestamp
metric_timestamp = str(metric_vars.metric_timestamp)
if settings.ENABLE_PANORAMA_DEBUG:
logger.info(
"debug :: metric variable - metric_timestamp - %s" % metric_timestamp
)
except:
logger.error(
"error :: failed to read metric_timestamp variable from check file - %s"
% (metric_check_file)
)
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
try:
metric_vars.algorithms
algorithms = metric_vars.algorithms
if settings.ENABLE_PANORAMA_DEBUG:
logger.info("debug :: metric variable - algorithms - %s" % str(algorithms))
except:
logger.error(
"error :: failed to read algorithms variable from check file setting to all - %s"
% (metric_check_file)
)
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
try:
metric_vars.triggered_algorithms
triggered_algorithms = metric_vars.triggered_algorithms
if settings.ENABLE_PANORAMA_DEBUG:
logger.info(
"debug :: metric variable - triggered_algorithms - %s"
% str(triggered_algorithms)
)
except:
logger.error(
"error :: failed to read triggered_algorithms variable from check file setting to all - %s"
% (metric_check_file)
)
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
try:
metric_vars.app
app = str(metric_vars.app)
if settings.ENABLE_PANORAMA_DEBUG:
logger.info("debug :: metric variable - app - %s" % app)
except:
logger.error(
"error :: failed to read app variable from check file setting to all - %s"
% (metric_check_file)
)
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
try:
metric_vars.source
source = str(metric_vars.source)
if settings.ENABLE_PANORAMA_DEBUG:
logger.info("debug :: metric variable - source - %s" % source)
except:
logger.error(
"error :: failed to read source variable from check file setting to all - %s"
% (metric_check_file)
)
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
try:
metric_vars.added_by
added_by = str(metric_vars.added_by)
if settings.ENABLE_PANORAMA_DEBUG:
logger.info("debug :: metric variable - added_by - %s" % added_by)
except:
logger.error(
"error :: failed to read added_by variable from check file setting to all - %s"
% (metric_check_file)
)
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
try:
metric_vars.added_at
added_at = str(metric_vars.added_at)
if settings.ENABLE_PANORAMA_DEBUG:
logger.info("debug :: metric variable - added_at - %s" % added_at)
except:
logger.error(
"error :: failed to read added_at variable from check file setting to all - %s"
% (metric_check_file)
)
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
record_anomaly = True
cache_key = "%s.last_check.%s.%s" % (skyline_app, app, metric)
if settings.ENABLE_PANORAMA_DEBUG:
logger.info(
"debug :: cache_key - %s.last_check.%s.%s" % (skyline_app, app, metric)
)
try:
last_check = self.redis_conn.get(cache_key)
except Exception as e:
logger.error(
"error :: could not query cache_key - %s.last_check.%s.%s - %s"
% (skyline_app, app, metric, e)
)
last_check = None
if last_check:
record_anomaly = False
logger.info(
"Panorama metric key not expired - %s.last_check.%s.%s"
% (skyline_app, app, metric)
)
# @added 20160907 - Handle Panorama stampede on restart after not running #26
# Allow to expire check if greater than PANORAMA_CHECK_MAX_AGE
if max_age:
now = time()
anomaly_age = int(now) - int(metric_timestamp)
if anomaly_age > max_age_seconds:
record_anomaly = False
logger.info(
"Panorama check max age exceeded - %s - %s seconds old, older than %s seconds discarding"
% (metric, str(anomaly_age), str(max_age_seconds))
)
if not record_anomaly:
logger.info("not recording anomaly for - %s" % (metric))
if os.path.isfile(str(metric_check_file)):
try:
os.remove(str(metric_check_file))
logger.info("metric_check_file removed - %s" % str(metric_check_file))
except OSError:
pass
return
# Determine id of something thing
def determine_id(table, key, value):
"""
Get the id of something from Redis or the database and insert a new
record if one does not exist for the value.
:param table: table name
:param key: key name
:param value: value name
:type table: str
:type key: str
:type value: str
:return: int or boolean
"""
query_cache_key = "%s.mysql_ids.%s.%s.%s" % (skyline_app, table, key, value)
determined_id = None
redis_determined_id = None
if settings.ENABLE_PANORAMA_DEBUG:
logger.info("debug :: query_cache_key - %s" % (query_cache_key))
try:
redis_known_id = self.redis_conn.get(query_cache_key)
except:
redis_known_id = None
if redis_known_id:
unpacker = Unpacker(use_list=False)
unpacker.feed(redis_known_id)
redis_determined_id = list(unpacker)
if redis_determined_id:
determined_id = int(redis_determined_id[0])
if determined_id:
if determined_id > 0:
return determined_id
# Query MySQL
query = "select id FROM %s WHERE %s='%s'" % (table, key, value)
results = self.mysql_select(query)
determined_id = 0
if results:
determined_id = int(results[0][0])
if determined_id > 0:
# Set the key for a week
if not redis_determined_id:
try:
self.redis_conn.setex(query_cache_key, 604800, packb(determined_id))
logger.info(
"set redis query_cache_key - %s - id: %s"
% (query_cache_key, str(determined_id))
)
except Exception as e:
logger.error(traceback.format_exc())
logger.error(
"error :: failed to set query_cache_key - %s - id: %s"
% (query_cache_key, str(determined_id))
)
return int(determined_id)
# INSERT because no known id
insert_query = "insert into %s (%s) VALUES ('%s')" % (table, key, value)
logger.info("inserting %s into %s table" % (value, table))
try:
results = self.mysql_insert(insert_query)
except:
logger.error(traceback.format_exc())
logger.error(
"error :: failed to determine the id of %s from the insert" % (value)
)
raise
determined_id = 0
if results:
determined_id = int(results)
else:
logger.error("error :: results not set")
raise
if determined_id > 0:
# Set the key for a week
if not redis_determined_id:
try:
self.redis_conn.setex(query_cache_key, 604800, packb(determined_id))
logger.info(
"set redis query_cache_key - %s - id: %s"
% (query_cache_key, str(determined_id))
)
except Exception as e:
logger.error(traceback.format_exc())
logger.error("%s" % str(e))
logger.error(
"error :: failed to set query_cache_key - %s - id: %s"
% (query_cache_key, str(determined_id))
)
return determined_id
logger.error("error :: failed to determine the inserted id for %s" % value)
return False
try:
added_by_host_id = determine_id("hosts", "host", added_by)
except:
logger.error("error :: failed to determine id of %s" % (added_by))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return False
try:
app_id = determine_id("apps", "app", app)
except:
logger.error("error :: failed to determine id of %s" % (app))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return False
try:
source_id = determine_id("sources", "source", source)
except:
logger.error("error :: failed to determine id of %s" % (source))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return False
try:
metric_id = determine_id("metrics", "metric", metric)
except:
logger.error("error :: failed to determine id of %s" % (metric))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return False
algorithms_ids_csv = ""
for algorithm in algorithms:
try:
algorithm_id = determine_id("algorithms", "algorithm", algorithm)
except:
logger.error("error :: failed to determine id of %s" % (algorithm))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return False
if algorithms_ids_csv == "":
algorithms_ids_csv = str(algorithm_id)
else:
new_algorithms_ids_csv = "%s,%s" % (algorithms_ids_csv, str(algorithm_id))
algorithms_ids_csv = new_algorithms_ids_csv
triggered_algorithms_ids_csv = ""
for triggered_algorithm in triggered_algorithms:
try:
triggered_algorithm_id = determine_id(
"algorithms", "algorithm", triggered_algorithm
)
except:
logger.error(
"error :: failed to determine id of %s" % (triggered_algorithm)
)
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return False
if triggered_algorithms_ids_csv == "":
triggered_algorithms_ids_csv = str(triggered_algorithm_id)
else:
new_triggered_algorithms_ids_csv = "%s,%s" % (
triggered_algorithms_ids_csv,
str(triggered_algorithm_id),
)
triggered_algorithms_ids_csv = new_triggered_algorithms_ids_csv
logger.info("inserting anomaly")
try:
full_duration = int(metric_timestamp) - int(from_timestamp)
if settings.ENABLE_PANORAMA_DEBUG:
logger.info("debug :: full_duration - %s" % str(full_duration))
except:
logger.error("error :: failed to determine full_duration")
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return False
try:
anomalous_datapoint = round(float(value), 6)
if settings.ENABLE_PANORAMA_DEBUG:
logger.info("debug :: anomalous_datapoint - %s" % str(anomalous_datapoint))
except:
logger.error("error :: failed to determine anomalous_datapoint")
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return False
try:
columns = "%s, %s, %s, %s, %s, %s, %s, %s, %s" % (
"metric_id",
"host_id",
"app_id",
"source_id",
"anomaly_timestamp",
"anomalous_datapoint",
"full_duration",
"algorithms_run",
"triggered_algorithms",
)
if settings.ENABLE_PANORAMA_DEBUG:
logger.info("debug :: columns - %s" % str(columns))
except:
logger.error("error :: failed to construct columns string")
logger.info(traceback.format_exc())
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return False
try:
query = (
"insert into anomalies (%s) VALUES (%d, %d, %d, %d, %s, %.6f, %d, '%s', '%s')"
% (
columns,
metric_id,
added_by_host_id,
app_id,
source_id,
metric_timestamp,
anomalous_datapoint,
full_duration,
algorithms_ids_csv,
triggered_algorithms_ids_csv,
)
)
except:
logger.error("error :: failed to construct insert query")
logger.info(traceback.format_exc())
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return False
if settings.ENABLE_PANORAMA_DEBUG:
logger.info("debug :: anomaly insert - %s" % str(query))
try:
anomaly_id = self.mysql_insert(query)
logger.info(
"anomaly id - %d - created for %s at %s"
% (anomaly_id, metric, metric_timestamp)
)
except:
logger.error(
"error :: failed to insert anomaly %s at %s"
% (anomaly_id, metric, metric_timestamp)
)
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return False
# Set anomaly record cache key
try:
self.redis_conn.setex(cache_key, settings.PANORAMA_EXPIRY_TIME, packb(value))
logger.info(
"set cache_key - %s.last_check.%s.%s - %s"
% (skyline_app, app, metric, str(settings.PANORAMA_EXPIRY_TIME))
)
except Exception as e:
logger.error(
"error :: could not query cache_key - %s.last_check.%s.%s - %s"
% (skyline_app, app, metric, e)
)
if os.path.isfile(str(metric_check_file)):
try:
os.remove(str(metric_check_file))
logger.info("metric_check_file removed - %s" % str(metric_check_file))
except OSError:
pass
return anomaly_id
|
https://github.com/earthgecko/skyline/issues/24
|
2016-08-22 16:42:05 :: 7874 :: Traceback (most recent call last):
File "/opt/skyline/github/skyline/skyline/panorama/panorama.py", line 297, in spin_process
metric_vars.from_timestamp
AttributeError: 'module' object has no attribute 'from_timestamp'
2016-08-22 16:42:05 :: 7874 :: error :: failed to read from_timestamp variable from check file - /opt/skyline/panaroma/check/1471884121.stats.statsd.graphiteStats.flush_length.txt
|
AttributeError
|
def ionosphere_metric_data(requested_timestamp, data_for_metric, context):
"""
Get a list of all training data folders and metrics
"""
# @added 20170104 - Feature #1842: Ionosphere - Graphite now graphs
# Feature #1830: Ionosphere alerts
# Use the new_load_metric_vars method
def new_load_metric_vars(metric_vars_file):
"""
Load the metric variables for a check from a metric check variables file
:param metric_vars_file: the path and filename to the metric variables files
:type metric_vars_file: str
:return: the metric_vars module object or ``False``
:rtype: list
"""
if path.isfile(metric_vars_file):
logger.info(
"loading metric variables from metric_check_file - %s"
% (str(metric_vars_file))
)
else:
logger.error(
"error :: loading metric variables from metric_check_file - file not found - %s"
% (str(metric_vars_file))
)
return False
metric_vars = []
with open(metric_vars_file) as f:
for line in f:
no_new_line = line.replace("\n", "")
no_equal_line = no_new_line.replace(" = ", ",")
array = str(no_equal_line.split(",", 1))
add_line = literal_eval(array)
metric_vars.append(add_line)
string_keys = ["metric", "anomaly_dir", "added_by", "app", "source"]
float_keys = ["value"]
int_keys = ["from_timestamp", "metric_timestamp", "added_at", "full_duration"]
array_keys = ["algorithms", "triggered_algorithms"]
boolean_keys = ["graphite_metric", "run_crucible_tests"]
metric_vars_array = []
for var_array in metric_vars:
key = None
value = None
if var_array[0] in string_keys:
key = var_array[0]
value_str = str(var_array[1]).replace("'", "")
value = str(value_str)
if var_array[0] == "metric":
metric = value
if var_array[0] in float_keys:
key = var_array[0]
value_str = str(var_array[1]).replace("'", "")
value = float(value_str)
if var_array[0] in int_keys:
key = var_array[0]
value_str = str(var_array[1]).replace("'", "")
value = int(value_str)
if var_array[0] in array_keys:
key = var_array[0]
value = literal_eval(str(var_array[1]))
if var_array[0] in boolean_keys:
key = var_array[0]
if str(var_array[1]) == "True":
value = True
else:
value = False
if key:
metric_vars_array.append([key, value])
if len(metric_vars_array) == 0:
logger.error(
"error :: loading metric variables - none found"
% (str(metric_vars_file))
)
return False
if settings.ENABLE_DEBUG:
logger.info(
"debug :: metric_vars determined - metric variable - metric - %s"
% str(metric_vars.metric)
)
logger.info("debug :: metric_vars for %s" % str(metric))
logger.info("debug :: %s" % str(metric_vars_array))
return metric_vars_array
base_name = data_for_metric.replace(settings.FULL_NAMESPACE, "", 1)
if context == "training_data":
log_context = "training data"
if context == "features_profiles":
log_context = "features profile data"
logger.info(
"%s requested for %s at %s"
% (context, str(base_name), str(requested_timestamp))
)
metric_paths = []
images = []
timeseries_dir = base_name.replace(".", "/")
if context == "training_data":
metric_data_dir = "%s/%s/%s" % (
settings.IONOSPHERE_DATA_FOLDER,
str(requested_timestamp),
timeseries_dir,
)
if context == "features_profiles":
metric_data_dir = "%s/%s/%s" % (
settings.IONOSPHERE_PROFILES_FOLDER,
timeseries_dir,
str(requested_timestamp),
)
human_date = time.strftime(
"%Y-%m-%d %H:%M:%S %Z (%A)", time.localtime(int(requested_timestamp))
)
metric_var_filename = "%s.txt" % str(base_name)
metric_vars_file = False
ts_json_filename = "%s.json" % str(base_name)
ts_json_file = "none"
td_files = listdir(metric_data_dir)
for i_file in td_files:
metric_file = path.join(metric_data_dir, i_file)
metric_paths.append([i_file, metric_file])
if i_file.endswith(".png"):
images.append(str(metric_file))
if i_file == metric_var_filename:
metric_vars_file = str(metric_file)
if i_file == ts_json_filename:
ts_json_file = str(metric_file)
metric_vars_ok = False
metric_vars = ["error: could not read metrics vars file", metric_vars_file]
if path.isfile(metric_vars_file):
try:
# @modified 20170104 - Feature #1842: Ionosphere - Graphite now graphs
# Feature #1830: Ionosphere alerts
# Use the new_load_metric_vars method
# metric_vars = []
# with open(metric_vars_file) as f:
# for line in f:
# add_line = line.replace('\n', '')
# metric_vars.append(add_line)
metric_vars = new_load_metric_vars(metric_vars_file)
metric_vars_ok = True
except:
metric_vars_ok = False
logger.error(traceback.format_exc())
logger.error(
"error :: failed to load metric_vars from: %s" % str(metric_vars_file)
)
ts_json_ok = False
ts_json = ["error: could not timeseries json file", ts_json_file]
if path.isfile(ts_json_file):
try:
ts_json = []
with open(ts_json_file) as f:
for line in f:
ts_json.append(line)
ts_json_ok = True
except:
ts_json_ok = False
data_to_process = False
if metric_vars_ok and ts_json_ok:
data_to_process = True
panorama_anomaly_id = False
url = (
"%s/panorama?metric=%s&from_timestamp=%s&until_timestamp=%s&panorama_anomaly_id=true"
% (
settings.SKYLINE_URL,
str(base_name),
str(requested_timestamp),
str(requested_timestamp),
)
)
panorama_resp = None
logger.info("getting anomaly id from panorama: %s" % str(url))
if settings.WEBAPP_AUTH_ENABLED:
user = str(settings.WEBAPP_AUTH_USER)
password = str(settings.WEBAPP_AUTH_USER_PASSWORD)
try:
if settings.WEBAPP_AUTH_ENABLED:
r = requests.get(url, timeout=2, auth=(user, password))
else:
r = requests.get(url, timeout=2)
panorama_resp = True
except:
logger.error(traceback.format_exc())
logger.error("error :: failed to get anomaly id from panorama: %s" % str(url))
if panorama_resp:
try:
data = literal_eval(r.text)
if str(data) == "[]":
panorama_anomaly_id = None
logger.debug("debug :: panorama anomlay data: %s" % str(data))
else:
panorama_anomaly_id = int(data[0][0])
logger.debug("debug :: panorama anomlay data: %s" % str(data))
except:
logger.error(traceback.format_exc())
logger.error(
"error :: failed to get anomaly id from panorama response: %s"
% str(r.text)
)
return (
metric_paths,
images,
human_date,
metric_vars,
ts_json,
data_to_process,
panorama_anomaly_id,
)
|
def ionosphere_metric_data(requested_timestamp, data_for_metric, context):
"""
Get a list of all training data folders and metrics
"""
base_name = data_for_metric.replace(settings.FULL_NAMESPACE, "", 1)
if context == "training_data":
log_context = "training data"
if context == "features_profiles":
log_context = "features profile data"
logger.info(
"%s requested for %s at %s"
% (context, str(base_name), str(requested_timestamp))
)
metric_paths = []
images = []
timeseries_dir = base_name.replace(".", "/")
if context == "training_data":
metric_data_dir = "%s/%s/%s" % (
settings.IONOSPHERE_DATA_FOLDER,
str(requested_timestamp),
timeseries_dir,
)
if context == "features_profiles":
metric_data_dir = "%s/%s/%s" % (
settings.IONOSPHERE_PROFILES_FOLDER,
timeseries_dir,
str(requested_timestamp),
)
human_date = time.strftime(
"%Y-%m-%d %H:%M:%S %Z (%A)", time.localtime(int(requested_timestamp))
)
metric_var_filename = "%s.txt" % str(base_name)
metric_vars_file = False
ts_json_filename = "%s.json" % str(base_name)
ts_json_file = "none"
td_files = listdir(metric_data_dir)
for i_file in td_files:
metric_file = path.join(metric_data_dir, i_file)
metric_paths.append([i_file, metric_file])
if i_file.endswith(".png"):
images.append(str(metric_file))
if i_file == metric_var_filename:
metric_vars_file = str(metric_file)
if i_file == ts_json_filename:
ts_json_file = str(metric_file)
metric_vars_ok = False
metric_vars = ["error: could not read metrics vars file", metric_vars_file]
if path.isfile(metric_vars_file):
try:
metric_vars = []
with open(metric_vars_file) as f:
for line in f:
add_line = line.replace("\n", "")
metric_vars.append(add_line)
metric_vars_ok = True
except:
metric_vars_ok = False
ts_json_ok = False
ts_json = ["error: could not timeseries json file", ts_json_file]
if path.isfile(ts_json_file):
try:
ts_json = []
with open(ts_json_file) as f:
for line in f:
ts_json.append(line)
ts_json_ok = True
except:
ts_json_ok = False
data_to_process = False
if metric_vars_ok and ts_json_ok:
data_to_process = True
panorama_anomaly_id = False
url = (
"%s/panorama?metric=%s&from_timestamp=%s&until_timestamp=%s&panorama_anomaly_id=true"
% (
settings.SKYLINE_URL,
str(base_name),
str(requested_timestamp),
str(requested_timestamp),
)
)
panorama_resp = None
logger.info("getting anomaly id from panorama: %s" % str(url))
if settings.WEBAPP_AUTH_ENABLED:
user = str(settings.WEBAPP_AUTH_USER)
password = str(settings.WEBAPP_AUTH_USER_PASSWORD)
try:
if settings.WEBAPP_AUTH_ENABLED:
r = requests.get(url, timeout=2, auth=(user, password))
else:
r = requests.get(url, timeout=2)
panorama_resp = True
except:
logger.error(traceback.format_exc())
logger.error("error :: failed to get anomaly id from panorama: %s" % str(url))
if panorama_resp:
try:
data = literal_eval(r.text)
if str(data) == "[]":
panorama_anomaly_id = None
logger.debug("debug :: panorama anomlay data: %s" % str(data))
else:
panorama_anomaly_id = int(data[0][0])
logger.debug("debug :: panorama anomlay data: %s" % str(data))
except:
logger.error(traceback.format_exc())
logger.error(
"error :: failed to get anomaly id from panorama response: %s"
% str(r.text)
)
return (
metric_paths,
images,
human_date,
metric_vars,
ts_json,
data_to_process,
panorama_anomaly_id,
)
|
https://github.com/earthgecko/skyline/issues/24
|
2016-08-22 16:42:05 :: 7874 :: Traceback (most recent call last):
File "/opt/skyline/github/skyline/skyline/panorama/panorama.py", line 297, in spin_process
metric_vars.from_timestamp
AttributeError: 'module' object has no attribute 'from_timestamp'
2016-08-22 16:42:05 :: 7874 :: error :: failed to read from_timestamp variable from check file - /opt/skyline/panaroma/check/1471884121.stats.statsd.graphiteStats.flush_length.txt
|
AttributeError
|
def fetch_production(
zone_key="CR",
session=None,
target_datetime=None,
logger=logging.getLogger(__name__),
):
# ensure we have an arrow object.
# if no target_datetime is specified, this defaults to now.
target_datetime = arrow.get(target_datetime).to(TIMEZONE)
# if before 01:30am on the current day then fetch previous day due to
# data lag.
today = arrow.get().to(TIMEZONE).date()
if target_datetime.date() == today:
target_datetime = (
target_datetime
if target_datetime.time() >= dt.time(1, 30)
else target_datetime.shift(days=-1)
)
if target_datetime < arrow.get("2012-07-01"):
# data availability limit found by manual trial and error
logger.error(
"CR API does not provide data before 2012-07-01, {} was requested".format(
target_datetime
),
extra={"key": zone_key},
)
return None
# Do not use existing session as some amount of cache is taking place
r = requests.session()
url = "https://apps.grupoice.com/CenceWeb/CencePosdespachoNacional.jsf"
response = r.get(url)
soup = BeautifulSoup(response.text, "html.parser")
jsf_view_state = soup.find("input", {"name": "javax.faces.ViewState"})["value"]
data = [
("formPosdespacho:txtFechaInicio_input", target_datetime.format(DATE_FORMAT)),
("formPosdespacho:pickFecha", ""),
("formPosdespacho_SUBMIT", 1),
("javax.faces.ViewState", jsf_view_state),
]
response = r.post(url, data=data)
# tell pandas which table to use by providing CHARACTERISTIC_NAME
df = pd.read_html(
response.text, match=CHARACTERISTIC_NAME, skiprows=1, index_col=0
)[0]
results = df_to_data(zone_key, target_datetime, df, logger)
return results
|
def fetch_production(
zone_key="CR",
session=None,
target_datetime=None,
logger=logging.getLogger(__name__),
):
# ensure we have an arrow object. if no target_datetime is specified, this defaults to now.
target_datetime = arrow.get(target_datetime).to(TIMEZONE)
if target_datetime < arrow.get("2012-07-01"):
# data availability limit found by manual trial and error
logger.error(
"CR API does not provide data before 2012-07-01, {} was requested".format(
target_datetime
),
extra={"key": zone_key},
)
return None
# Do not use existing session as some amount of cache is taking place
r = requests.session()
url = "https://apps.grupoice.com/CenceWeb/CencePosdespachoNacional.jsf"
response = r.get(url)
soup = BeautifulSoup(response.text, "html.parser")
jsf_view_state = soup.find("input", {"name": "javax.faces.ViewState"})["value"]
data = [
("formPosdespacho:txtFechaInicio_input", target_datetime.format(DATE_FORMAT)),
("formPosdespacho:pickFecha", ""),
("formPosdespacho_SUBMIT", 1),
("javax.faces.ViewState", jsf_view_state),
]
response = r.post(url, data=data)
# tell pandas which table to use by providing CHARACTERISTIC_NAME
df = pd.read_html(
response.text, match=CHARACTERISTIC_NAME, skiprows=1, index_col=0
)[0]
results = df_to_data(zone_key, target_datetime, df, logger)
return results
|
https://github.com/tmrowco/electricitymap-contrib/issues/1477
|
Traceback (most recent call last):
File "/home/feeder/lib/fetch_data.py", line 130, in launch_parsers
**parser_kwargs)
File "/home/electricitymap/parsers/CR.py", line 191, in fetch_production
df = pd.read_html(response.text, match=CHARACTERISTIC_NAME, skiprows=1, index_col=0)[0]
File "/usr/local/lib/python3.6/site-packages/pandas/io/html.py", line 915, in read_html
keep_default_na=keep_default_na)
File "/usr/local/lib/python3.6/site-packages/pandas/io/html.py", line 749, in _parse
raise_with_traceback(retained)
File "/usr/local/lib/python3.6/site-packages/pandas/compat/__init__.py", line 385, in raise_with_traceback
raise exc.with_traceback(traceback)
ValueError: No tables found matching pattern 'Angostura'
|
ValueError
|
def df_to_data(zone_key, day, df, logger):
df = df.dropna(axis=1, how="any")
# Check for empty dataframe
if df.shape == (1, 1):
return []
df = df.drop(["Intercambio Sur", "Intercambio Norte", "Total"], errors="ignore")
df = df.iloc[:, :-1]
results = []
unknown_plants = set()
hour = 0
for column in df:
data = empty_record(zone_key)
data_time = day.replace(hour=hour, minute=0, second=0, microsecond=0).datetime
for index, value in df[column].items():
source = POWER_PLANTS.get(index)
if not source:
source = "unknown"
unknown_plants.add(index)
data["datetime"] = data_time
data["production"][source] += max(0.0, value)
hour += 1
results.append(data)
for plant in unknown_plants:
logger.warning(
"{} is not mapped to generation type".format(plant), extra={"key": zone_key}
)
return results
|
def df_to_data(zone_key, day, df, logger):
df = df.dropna(axis=1, how="any")
# Check for empty dataframe
if df.shape == (1, 1):
return []
df = df.drop(["Intercambio Sur", "Intercambio Norte", "Total"], errors="ignore")
df = df.iloc[:, :-1]
results = []
unknown_plants = set()
hour = 0
for column in df:
data = empty_record(zone_key)
data_time = day.replace(hour=hour, minute=0, second=0, microsecond=0).datetime
for index, value in df[column].items():
source = POWER_PLANTS.get(index)
if not source:
source = "unknown"
unknown_plants.add(index)
data["datetime"] = data_time
data["production"][source] += max(0.0, value)
hour += 1
results.append(data)
for plant in unknown_plants:
logger.warning(
"{} is not mapped to generation type".format(plant), extra={"key": zone_key}
)
return results
|
https://github.com/tmrowco/electricitymap-contrib/issues/1561
|
Traceback (most recent call last):
File "/home/feeder/lib/fetch_data.py", line 131, in launch_parsers
**parser_kwargs)
File "/home/contrib/parsers/CR.py", line 178, in fetch_production
jsf_view_state = soup.select('#javax.faces.ViewState')[0]['value']
IndexError: list index out of range
|
IndexError
|
def fetch_production(
zone_key="CR",
session=None,
target_datetime=None,
logger=logging.getLogger(__name__),
):
# ensure we have an arrow object. if no target_datetime is specified, this defaults to now.
target_datetime = arrow.get(target_datetime).to(TIMEZONE)
if target_datetime < arrow.get("2012-07-01"):
# data availability limit found by manual trial and error
logger.error(
"CR API does not provide data before 2012-07-01, {} was requested".format(
target_datetime
),
extra={"key": zone_key},
)
return None
# Do not use existing session as some amount of cache is taking place
r = requests.session()
url = "https://apps.grupoice.com/CenceWeb/CencePosdespachoNacional.jsf"
response = r.get(url)
soup = BeautifulSoup(response.text, "html.parser")
jsf_view_state = soup.find("input", {"name": "javax.faces.ViewState"})["value"]
data = [
("formPosdespacho:txtFechaInicio_input", target_datetime.format(DATE_FORMAT)),
("formPosdespacho:pickFecha", ""),
("formPosdespacho_SUBMIT", 1),
("javax.faces.ViewState", jsf_view_state),
]
response = r.post(url, data=data)
# tell pandas which table to use by providing CHARACTERISTIC_NAME
df = pd.read_html(
response.text, match=CHARACTERISTIC_NAME, skiprows=1, index_col=0
)[0]
results = df_to_data(zone_key, target_datetime, df, logger)
return results
|
def fetch_production(
zone_key="CR",
session=None,
target_datetime=None,
logger=logging.getLogger(__name__),
):
# ensure we have an arrow object. if no target_datetime is specified, this defaults to now.
target_datetime = arrow.get(target_datetime).to(TIMEZONE)
if target_datetime < arrow.get("2012-07-01"):
# data availability limit found by manual trial and error
logger.error(
"CR API does not provide data before 2012-07-01, {} was requested".format(
target_datetime
),
extra={"key": zone_key},
)
return None
# Do not use existing session as some amount of cache is taking place
r = requests.session()
url = "https://appcenter.grupoice.com/CenceWeb/CencePosdespachoNacional.jsf"
response = r.get(url)
soup = BeautifulSoup(response.text, "html.parser")
jsf_view_state = soup.select("#javax.faces.ViewState")[0]["value"]
data = [
("formPosdespacho", "formPosdespacho"),
("formPosdespacho:txtFechaInicio_input", target_datetime.format(DATE_FORMAT)),
("formPosdespacho:pickFecha", ""),
("formPosdespacho:j_idt60_selection", ""),
("formPosdespacho:j_idt60_scrollState", "0,1915"),
("javax.faces.ViewState", jsf_view_state),
]
response = r.post(url, cookies={}, data=data)
# tell pandas which table to use by providing CHARACTERISTIC_NAME
df = pd.read_html(
response.text, match=CHARACTERISTIC_NAME, skiprows=1, index_col=0
)[0]
results = df_to_data(zone_key, target_datetime, df, logger)
return results
|
https://github.com/tmrowco/electricitymap-contrib/issues/1561
|
Traceback (most recent call last):
File "/home/feeder/lib/fetch_data.py", line 131, in launch_parsers
**parser_kwargs)
File "/home/contrib/parsers/CR.py", line 178, in fetch_production
jsf_view_state = soup.select('#javax.faces.ViewState')[0]['value']
IndexError: list index out of range
|
IndexError
|
def get_data(session=None):
"""
Makes two requests for the current generation total and fuel mix.
Parses the data into raw form and reads time string associated with it.
Checks that fuel mix sum is equal to generation total.
Returns a tuple.
"""
s = session or requests.Session()
mixreq = s.get(fuel_mix_url)
genreq = s.get(current_gen_url)
mixsoup = BeautifulSoup(mixreq.content, "html.parser")
gensoup = BeautifulSoup(genreq.content, "html.parser")
try:
gen_mw = gensoup.find("td", text="MW")
ts_tag = gen_mw.findNext("td")
real_ts = ts_tag.text
gen_total = float(ts_tag.findNext("td").text)
except AttributeError:
# No data is available between 12am-1am.
raise ValueError("No production data is currently available for West Malaysia.")
mix_header = mixsoup.find("tr", {"class": "gridheader"})
mix_table = mix_header.find_parent("table")
rows = mix_table.find_all("tr")
generation_mix = {}
for row in rows[1:]:
cells = row.find_all("td")
items = [ele.text.strip() for ele in cells]
generation_mix[items[0]] = float(items[1])
if sum(generation_mix.values()) == gen_total:
# Fuel mix matches generation.
return real_ts, generation_mix
else:
raise ValueError("Malaysia generation and fuel mix totals are not equal!")
|
def get_data(session=None):
"""
Makes two requests for the current generation total and fuel mix.
Parses the data into raw form and reads time string associated with it.
Checks that fuel mix sum is equal to generation total.
Returns a tuple.
"""
s = session or requests.Session()
mixreq = s.get(fuel_mix_url)
genreq = s.get(current_gen_url)
mixsoup = BeautifulSoup(mixreq.content, "html.parser")
gensoup = BeautifulSoup(genreq.content, "html.parser")
try:
gen_mw = gensoup.find("td", text="MW")
ts_tag = gen_mw.findNext("td")
real_ts = ts_tag.text
gen_total = float(ts_tag.findNext("td").text)
except AttributeError:
# No data is available between 12am-1am.
raise ValueError("No data is currently available for Malaysia.")
mix_header = mixsoup.find("tr", {"class": "gridheader"})
mix_table = mix_header.find_parent("table")
rows = mix_table.find_all("tr")
generation_mix = {}
for row in rows[1:]:
cells = row.find_all("td")
items = [ele.text.strip() for ele in cells]
generation_mix[items[0]] = float(items[1])
if sum(generation_mix.values()) == gen_total:
# Fuel mix matches generation.
return real_ts, generation_mix
else:
raise ValueError("Malaysia generation and fuel mix totals are not equal!")
|
https://github.com/tmrowco/electricitymap-contrib/issues/1473
|
Traceback (most recent call last):
File "/home/feeder/lib/fetch_data.py", line 130, in launch_parsers
**parser_kwargs)
File "/home/electricitymap/parsers/MY_WM.py", line 309, in fetch_exchange
hvdc_hidden_values = extract_hidden_values(hvdc_switch_req)
File "/home/electricitymap/parsers/MY_WM.py", line 176, in extract_hidden_values
viewstategenerator = soup.find("input", attrs={'id': '__VIEWSTATEGENERATOR'})['value']
TypeError: 'NoneType' object is not subscriptable
|
TypeError
|
def extract_hidden_values(req):
"""
Gets current aspx page values to enable post requests to be sent.
Returns a dictionary.
"""
soup = BeautifulSoup(req.content, "html.parser")
# Find and define parameters needed to send a POST request.
try:
viewstategenerator = soup.find("input", attrs={"id": "__VIEWSTATEGENERATOR"})[
"value"
]
viewstate = soup.find("input", attrs={"id": "__VIEWSTATE"})["value"]
eventvalidation = soup.find("input", attrs={"id": "__EVENTVALIDATION"})["value"]
jschartviewerstate = soup.find(
"input", attrs={"id": "MainContent_ctl17_JsChartViewerState"}
)["value"]
except TypeError:
raise ValueError("No exchange data is currently available for West Malaysia.")
hidden_values = {
"viewstategenerator": viewstategenerator,
"viewstate": viewstate,
"eventvalidation": eventvalidation,
"jschartviewerstate": jschartviewerstate,
}
return hidden_values
|
def extract_hidden_values(req):
"""
Gets current aspx page values to enable post requests to be sent.
Returns a dictionary.
"""
soup = BeautifulSoup(req.content, "html.parser")
# Find and define parameters needed to send a POST request.
viewstategenerator = soup.find("input", attrs={"id": "__VIEWSTATEGENERATOR"})[
"value"
]
viewstate = soup.find("input", attrs={"id": "__VIEWSTATE"})["value"]
eventvalidation = soup.find("input", attrs={"id": "__EVENTVALIDATION"})["value"]
jschartviewerstate = soup.find(
"input", attrs={"id": "MainContent_ctl17_JsChartViewerState"}
)["value"]
hidden_values = {
"viewstategenerator": viewstategenerator,
"viewstate": viewstate,
"eventvalidation": eventvalidation,
"jschartviewerstate": jschartviewerstate,
}
return hidden_values
|
https://github.com/tmrowco/electricitymap-contrib/issues/1473
|
Traceback (most recent call last):
File "/home/feeder/lib/fetch_data.py", line 130, in launch_parsers
**parser_kwargs)
File "/home/electricitymap/parsers/MY_WM.py", line 309, in fetch_exchange
hvdc_hidden_values = extract_hidden_values(hvdc_switch_req)
File "/home/electricitymap/parsers/MY_WM.py", line 176, in extract_hidden_values
viewstategenerator = soup.find("input", attrs={'id': '__VIEWSTATEGENERATOR'})['value']
TypeError: 'NoneType' object is not subscriptable
|
TypeError
|
def data_processor(df, logger):
"""
Takes a dataframe and logging instance as input.
Checks for new generation types and logs awarning if any are found.
Parses the dataframe row by row removing unneeded keys.
Returns a list of 2 element tuples, each containing a datetime object
and production dictionary.
"""
# Remove leading whitespace in column headers.
df.columns = df.columns.str.strip()
keys_to_remove = {"GMT MKT Interval", "Average Actual Load", "Other", "Waste Heat"}
# Check for new generation columns.
known_keys = MAPPING.keys() | keys_to_remove
column_headers = set(df.columns)
unknown_keys = column_headers - known_keys
for heading in unknown_keys:
logger.warning(
"New column '{}' present in US-SPP data source.".format(heading),
extra={"key": "US-SPP"},
)
keys_to_remove = keys_to_remove | unknown_keys
processed_data = []
for index, row in df.iterrows():
production = row.to_dict()
extra_unknowns = sum([production[k] for k in unknown_keys])
production["unknown"] = (
production["Other"] + production["Waste Heat"] + extra_unknowns
)
dt_aware = parser.parse(production["GMT MKT Interval"])
for k in keys_to_remove:
production.pop(k, None)
mapped_production = {MAPPING.get(k, k): v for k, v in production.items()}
processed_data.append((dt_aware, mapped_production))
return processed_data
|
def data_processor(df, logger):
"""
Takes a dataframe and logging instance as input.
Checks for new generation types and logs awarning if any are found.
Parses the dataframe row by row removing unneeded keys.
Returns a list of 2 element tuples, each containing a datetime object
and production dictionary.
"""
# Remove leading whitespace in column headers.
df.columns = df.columns.str.strip()
keys_to_remove = {
"Coal Market",
"Coal Self",
"GMT MKT Interval",
"Average Actual Load",
"Other",
"Waste Heat",
}
# Check for new generation columns.
known_keys = MAPPING.keys() | keys_to_remove
column_headers = set(df.columns)
unknown_keys = column_headers - known_keys
for heading in unknown_keys:
logger.warning(
"New column '{}' present in US-SPP data source.".format(heading),
extra={"key": "US-SPP"},
)
keys_to_remove = keys_to_remove | unknown_keys
processed_data = []
for index, row in df.iterrows():
production = row.to_dict()
production["coal"] = production["Coal Market"] + production["Coal Self"]
extra_unknowns = sum([production[k] for k in unknown_keys])
production["unknown"] = (
production["Other"] + production["Waste Heat"] + extra_unknowns
)
dt_aware = parser.parse(production["GMT MKT Interval"])
for k in keys_to_remove:
production.pop(k, None)
mapped_production = {MAPPING.get(k, k): v for k, v in production.items()}
processed_data.append((dt_aware, mapped_production))
return processed_data
|
https://github.com/tmrowco/electricitymap-contrib/issues/1528
|
Traceback (most recent call last):
File "/home/feeder/lib/fetch_data.py", line 131, in launch_parsers
**parser_kwargs)
File "/home/contrib/parsers/US_SPP.py", line 122, in fetch_production
processed_data = data_processor(raw_data, logger)
File "/home/contrib/parsers/US_SPP.py", line 71, in data_processor
production['coal'] = production['Coal Market'] + production['Coal Self']
KeyError: 'Coal Market'
|
KeyError
|
def data_processer(raw_data, logger):
"""
Groups dictionaries by datetime key.
Removes unneeded keys and logs any new ones.
Returns a list of tuples containing (datetime object, dictionary).
"""
dt_key = lambda x: x["datetime"]
grouped = groupby(raw_data, dt_key)
keys_to_ignore = {"Load", "Net Purchases", "Inadvertent", "PURPA Other"}
known_keys = GENERATION_MAPPING.keys() | keys_to_ignore
unmapped = set()
parsed_data = []
for group in grouped:
dt = timestamp_converter(group[0])
generation = group[1]
production = {}
for gen_type in generation:
production[gen_type["name"]] = float(gen_type["data"])
current_keys = production.keys() | set()
unknown_keys = current_keys - known_keys
unmapped = unmapped | unknown_keys
keys_to_remove = keys_to_ignore | unknown_keys
for key in keys_to_remove:
production.pop(key, None)
production = {GENERATION_MAPPING[k]: v for k, v in production.items()}
parsed_data.append((dt, production))
for key in unmapped:
logger.warning(
"Key '{}' in US-IPC is not mapped to type.".format(key),
extra={"key": "US-IPC"},
)
return parsed_data
|
def data_processer(raw_data, logger):
"""
Groups dictionaries by datetime key.
Removes unneeded keys and logs any new ones.
Returns a list of tuples containing (datetime object, dictionary).
"""
dt_key = lambda x: x["datetime"]
grouped = groupby(raw_data, dt_key)
keys_to_ignore = {"Load", "Net Purchases", "Inadvertent", "PURPA Other"}
known_keys = GENERATION_MAPPING.keys() | keys_to_ignore
unmapped = set()
parsed_data = []
for group in grouped:
dt = timestamp_converter(group[0])
generation = group[1]
production = {}
for gen_type in generation:
production[gen_type["name"]] = float(gen_type["data"])
current_keys = production.keys() | set()
unknown_keys = current_keys - known_keys
unmapped = unmapped | unknown_keys
keys_to_remove = keys_to_ignore | unknown_keys
for key in keys_to_remove:
production.pop(key)
production = {GENERATION_MAPPING[k]: v for k, v in production.items()}
parsed_data.append((dt, production))
for key in unmapped:
logger.warning(
"Key '{}' in US-IPC is not mapped to type.".format(key),
extra={"key": "US-IPC"},
)
return parsed_data
|
https://github.com/tmrowco/electricitymap-contrib/issues/1478
|
Traceback (most recent call last):
File "/home/feeder/lib/fetch_data.py", line 130, in launch_parsers
**parser_kwargs)
File "/home/electricitymap/parsers/US_IPC.py", line 129, in fetch_production
processed_data = data_processer(raw_data, logger)
File "/home/electricitymap/parsers/US_IPC.py", line 83, in data_processer
production.pop(key)
KeyError: 'Net Purchases'
|
KeyError
|
def get_data(session=None):
"""Returns generation data as a list of floats."""
s = session or requests.Session()
# In order for the data url to return data, cookies from the display url must be obtained then reused.
response = s.get(display_url)
data_response = s.get(data_url)
raw_data = data_response.text
data = [float(i) for i in raw_data.split(",")]
return data
|
def get_data(session=None):
"""Returns generation data as a list of floats."""
s = session or requests.Session()
data_response = s.get(data_url)
raw_data = data_response.text
data = [float(i) for i in raw_data.split(",")]
return data
|
https://github.com/tmrowco/electricitymap-contrib/issues/1195
|
Traceback (most recent call last):
File "feeder_electricity.py", line 176, in fetch_exchange
objs = parser(country_code1, country_code2, session, logger=public_logger)
File "/home/electricitymap/parsers/MD.py", line 113, in fetch_exchange
exchange_status = get_data(session=session)
File "/home/electricitymap/parsers/MD.py", line 31, in get_data
data = [float(i) for i in raw_data.split(',')]
File "/home/electricitymap/parsers/MD.py", line 31, in <listcomp>
data = [float(i) for i in raw_data.split(',')]
ValueError: could not convert string to float:
|
ValueError
|
def fetch_SA_battery(session=None):
"""
Makes a request to the nemlog api for South Australia battery data.
Returns a float or None.
"""
today = arrow.now("Australia/Adelaide")
current = today.format("YYYYMMDD")
old = today.shift(days=-2).format("YYYYMMDD")
nemlog_url = "http://nemlog.com.au/api/unit/HPRL1/{}/{}/json".format(old, current)
s = session or requests.Session()
req = s.get(nemlog_url)
data = []
for line in req.iter_lines(decode_unicode=True):
data.append(line)
try:
latest = json.loads(data[-1])
except IndexError:
# No data available.
return None
state = float(latest["SCADAVALUE"])
# Source classifies charge/discharge opposite to EM.
battery_status = -1 * state
return battery_status
|
def fetch_SA_battery(session=None):
"""
Makes a request to the nemlog api for South Australia battery data.
Returns a float or None.
"""
today = arrow.now("Australia/Adelaide")
current = today.format("YYYYMMDD")
old = today.shift(days=-2).format("YYYYMMDD")
nemlog_url = "http://nemlog.com.au/api/unit/HPRL1/{}/{}/json".format(old, current)
s = session or requests.Session()
req = s.get(nemlog_url)
data = []
for line in req.iter_lines():
data.append(line)
try:
latest = json.loads(data[-1])
except IndexError:
# No data available.
return None
state = float(latest["SCADAVALUE"])
# Source classifies charge/discharge opposite to EM.
battery_status = -1 * state
return battery_status
|
https://github.com/tmrowco/electricitymap-contrib/issues/1150
|
fetch_production("AUS-SA") ->
Traceback (most recent call last):
File "AU.py", line 558, in <module>
print(fetch_production('AUS-SA'))
File "AU.py", line 422, in fetch_production
data['storage']['battery'] = AU_battery.fetch_SA_battery()
File "/home/chris/electricitymap/parsers/lib/AU_battery.py", line 30, in fetch_SA_battery
latest = json.loads(data[-1])
File "/usr/lib/python3.5/json/__init__.py", line 312, in loads
s.__class__.__name__))
TypeError: the JSON object must be str, not 'bytes'
|
TypeError
|
def fetch_FI(session=None):
url = "http://driftsdata.statnett.no/restapi/ProductionConsumption/GetLatestDetailedOverview"
data = (session or requests).get(url).json()
countries = map(lambda x: x["value"], data["Headers"])
i = countries.index(COUNTRY_CODE)
obj = {
"countryCode": COUNTRY_CODE,
"datetime": arrow.get(data["MeasuredAt"] / 1000).datetime, # time given in UTC
}
obj["consumption"] = {
"unknown": float(
data["ConsumptionData"][i]["value"]
.replace("\xa0", "")
.replace(" ", "")
.replace("-", "0")
)
}
obj["exchange"] = {}
obj["production"] = {
"unknown": float(
data["ThermalData"][i]["value"]
.replace("\xa0", "")
.replace(" ", "")
.replace("-", "0")
)
+ float(
data["NotSpecifiedData"][i]["value"]
.replace("\xa0", "")
.replace(" ", "")
.replace("-", "0")
),
"wind": float(
data["WindData"][i]["value"]
.replace("\xa0", "")
.replace(" ", "")
.replace("-", "0")
),
"nuclear": float(
data["NuclearData"][i]["value"]
.replace("\xa0", "")
.replace(" ", "")
.replace("-", "0")
),
"hydro": float(
data["HydroData"][i]["value"]
.replace("\xa0", "")
.replace(" ", "")
.replace("-", "0")
),
}
return obj
|
def fetch_FI():
url = "http://driftsdata.statnett.no/restapi/ProductionConsumption/GetLatestDetailedOverview"
data = requests.get(url).json()
countries = map(lambda x: x["value"], data["Headers"])
i = countries.index(COUNTRY_CODE)
obj = {
"countryCode": COUNTRY_CODE,
"datetime": arrow.get(data["MeasuredAt"] / 1000).datetime, # time given in UTC
}
obj["consumption"] = {
"unknown": float(
data["ConsumptionData"][i]["value"]
.replace("\xa0", "")
.replace(" ", "")
.replace("-", "0")
)
}
obj["exchange"] = {}
obj["production"] = {
"unknown": float(
data["ThermalData"][i]["value"]
.replace("\xa0", "")
.replace(" ", "")
.replace("-", "0")
)
+ float(
data["NotSpecifiedData"][i]["value"]
.replace("\xa0", "")
.replace(" ", "")
.replace("-", "0")
),
"wind": float(
data["WindData"][i]["value"]
.replace("\xa0", "")
.replace(" ", "")
.replace("-", "0")
),
"nuclear": float(
data["NuclearData"][i]["value"]
.replace("\xa0", "")
.replace(" ", "")
.replace("-", "0")
),
"hydro": float(
data["HydroData"][i]["value"]
.replace("\xa0", "")
.replace(" ", "")
.replace("-", "0")
),
}
return obj
|
https://github.com/tmrowco/electricitymap-contrib/issues/95
|
Traceback (most recent call last):
File "feeder.py", line 51, in fetch_countries
obj = parser()
File "/home/parsers/PL.py", line 29, in fetch_PL
output_array = map(fetchValue, parameters)
File "/home/parsers/PL.py", line 21, in fetchValue
last = soup.find_all('point')[-1].find_all('quantity')[-1]
IndexError: list index out of range
|
IndexError
|
def fetch_GB(session=None):
url = "http://www.bmreports.com/bsp/additional/soapfunctions.php?element=generationbyfueltypetable"
response = (session or requests).get(url)
root = ET.fromstring(response.content)
data = root[0]
parsed = {}
for item in data:
parsed[item.get("TYPE")] = float(item.get("VAL"))
obj = {
"countryCode": COUNTRY_CODE,
"datetime": arrow.get(data.get("AT")).datetime, # Time given in UTC
}
obj["consumption"] = {}
obj["production"] = {
"coal": parsed["COAL"],
"gas": parsed["CCGT"] + parsed["OCGT"],
"nuclear": parsed["NUCLEAR"],
"wind": parsed["WIND"],
"oil": parsed["OIL"],
"hydro": parsed["PS"] + parsed["NPSHYD"],
"unknown": parsed["OTHER"],
}
obj["exchange"] = {
"FR": parsed["INTFR"],
"IE": parsed["INTIRL"],
"NL": parsed["INTNED"],
}
total_production = 0
for value in obj["production"].values():
total_production += value
obj["co2"] = (
parsed["CCGT"] / total_production * 360
+ parsed["OCGT"] / total_production * 480
+ parsed["COAL"] / total_production * 910
+ parsed["OTHER"] / total_production * 300
+ parsed["OIL"] / total_production * 610
+ parsed["INTFR"] / total_production * 90
+ parsed["INTIRL"] / total_production * 450
+ parsed["INTNED"] / total_production * 550
+ parsed["INTEW"] / total_production * 450
) / 0.93
return obj
|
def fetch_GB():
url = "http://www.bmreports.com/bsp/additional/soapfunctions.php?element=generationbyfueltypetable"
response = requests.get(url)
root = ET.fromstring(response.content)
data = root[0]
parsed = {}
for item in data:
parsed[item.get("TYPE")] = float(item.get("VAL"))
obj = {
"countryCode": COUNTRY_CODE,
"datetime": arrow.get(data.get("AT")).datetime, # Time given in UTC
}
obj["consumption"] = {}
obj["production"] = {
"coal": parsed["COAL"],
"gas": parsed["CCGT"] + parsed["OCGT"],
"nuclear": parsed["NUCLEAR"],
"wind": parsed["WIND"],
"oil": parsed["OIL"],
"hydro": parsed["PS"] + parsed["NPSHYD"],
"unknown": parsed["OTHER"],
}
obj["exchange"] = {
"FR": parsed["INTFR"],
"IE": parsed["INTIRL"],
"NL": parsed["INTNED"],
}
total_production = 0
for value in obj["production"].values():
total_production += value
obj["co2"] = (
parsed["CCGT"] / total_production * 360
+ parsed["OCGT"] / total_production * 480
+ parsed["COAL"] / total_production * 910
+ parsed["OTHER"] / total_production * 300
+ parsed["OIL"] / total_production * 610
+ parsed["INTFR"] / total_production * 90
+ parsed["INTIRL"] / total_production * 450
+ parsed["INTNED"] / total_production * 550
+ parsed["INTEW"] / total_production * 450
) / 0.93
return obj
|
https://github.com/tmrowco/electricitymap-contrib/issues/95
|
Traceback (most recent call last):
File "feeder.py", line 51, in fetch_countries
obj = parser()
File "/home/parsers/PL.py", line 29, in fetch_PL
output_array = map(fetchValue, parameters)
File "/home/parsers/PL.py", line 21, in fetchValue
last = soup.find_all('point')[-1].find_all('quantity')[-1]
IndexError: list index out of range
|
IndexError
|
def fetch_LV(session=None):
url = "http://driftsdata.statnett.no/restapi/ProductionConsumption/GetLatestDetailedOverview"
data = (session or requests).get(url).json()
countries = map(lambda x: x["value"], data["Headers"])
i = countries.index(COUNTRY_CODE)
obj = {
"countryCode": COUNTRY_CODE,
"datetime": arrow.get(data["MeasuredAt"] / 1000).datetime, # time given in UTC
}
obj["consumption"] = {
"unknown": float(
data["ConsumptionData"][i]["value"]
.replace("\xa0", "")
.replace(" ", "")
.replace("-", "0")
)
}
obj["exchange"] = {}
obj["production"] = {
"unknown": float(
data["ThermalData"][i]["value"]
.replace("\xa0", "")
.replace(" ", "")
.replace("-", "0")
)
+ float(
data["NotSpecifiedData"][i]["value"]
.replace("\xa0", "")
.replace(" ", "")
.replace("-", "0")
),
"wind": float(
data["WindData"][i]["value"]
.replace("\xa0", "")
.replace(" ", "")
.replace("-", "0")
),
"nuclear": float(
data["NuclearData"][i]["value"]
.replace("\xa0", "")
.replace(" ", "")
.replace("-", "0")
),
"hydro": float(
data["HydroData"][i]["value"]
.replace("\xa0", "")
.replace(" ", "")
.replace("-", "0")
),
}
return obj
|
def fetch_LV():
url = "http://driftsdata.statnett.no/restapi/ProductionConsumption/GetLatestDetailedOverview"
data = requests.get(url).json()
countries = map(lambda x: x["value"], data["Headers"])
i = countries.index(COUNTRY_CODE)
obj = {
"countryCode": COUNTRY_CODE,
"datetime": arrow.get(data["MeasuredAt"] / 1000).datetime, # time given in UTC
}
obj["consumption"] = {
"unknown": float(
data["ConsumptionData"][i]["value"]
.replace("\xa0", "")
.replace(" ", "")
.replace("-", "0")
)
}
obj["exchange"] = {}
obj["production"] = {
"unknown": float(
data["ThermalData"][i]["value"]
.replace("\xa0", "")
.replace(" ", "")
.replace("-", "0")
)
+ float(
data["NotSpecifiedData"][i]["value"]
.replace("\xa0", "")
.replace(" ", "")
.replace("-", "0")
),
"wind": float(
data["WindData"][i]["value"]
.replace("\xa0", "")
.replace(" ", "")
.replace("-", "0")
),
"nuclear": float(
data["NuclearData"][i]["value"]
.replace("\xa0", "")
.replace(" ", "")
.replace("-", "0")
),
"hydro": float(
data["HydroData"][i]["value"]
.replace("\xa0", "")
.replace(" ", "")
.replace("-", "0")
),
}
return obj
|
https://github.com/tmrowco/electricitymap-contrib/issues/95
|
Traceback (most recent call last):
File "feeder.py", line 51, in fetch_countries
obj = parser()
File "/home/parsers/PL.py", line 29, in fetch_PL
output_array = map(fetchValue, parameters)
File "/home/parsers/PL.py", line 21, in fetchValue
last = soup.find_all('point')[-1].find_all('quantity')[-1]
IndexError: list index out of range
|
IndexError
|
def fetch_NO(session=None):
url = "http://driftsdata.statnett.no/restapi/ProductionConsumption/GetLatestDetailedOverview"
data = (session or requests).get(url).json()
countries = map(lambda x: x["value"], data["Headers"])
i = countries.index(COUNTRY_CODE)
obj = {
"countryCode": COUNTRY_CODE,
"datetime": arrow.get(data["MeasuredAt"] / 1000).datetime, # time given in UTC
}
obj["consumption"] = {
"unknown": float(
data["ConsumptionData"][i]["value"]
.replace("\xa0", "")
.replace(" ", "")
.replace("-", "0")
)
}
obj["exchange"] = {}
obj["production"] = {
"solar": 0,
"unknown": float(
data["ThermalData"][i]["value"]
.replace("\xa0", "")
.replace(" ", "")
.replace("-", "0")
)
+ float(
data["NotSpecifiedData"][i]["value"]
.replace("\xa0", "")
.replace(" ", "")
.replace("-", "0")
),
"wind": float(
data["WindData"][i]["value"]
.replace("\xa0", "")
.replace(" ", "")
.replace("-", "0")
),
"nuclear": float(
data["NuclearData"][i]["value"]
.replace("\xa0", "")
.replace(" ", "")
.replace("-", "0")
),
"hydro": float(
data["HydroData"][i]["value"]
.replace("\xa0", "")
.replace(" ", "")
.replace("-", "0")
),
}
return obj
|
def fetch_NO():
url = "http://driftsdata.statnett.no/restapi/ProductionConsumption/GetLatestDetailedOverview"
data = requests.get(url).json()
countries = map(lambda x: x["value"], data["Headers"])
i = countries.index(COUNTRY_CODE)
obj = {
"countryCode": COUNTRY_CODE,
"datetime": arrow.get(data["MeasuredAt"] / 1000).datetime, # time given in UTC
}
obj["consumption"] = {
"unknown": float(
data["ConsumptionData"][i]["value"]
.replace("\xa0", "")
.replace(" ", "")
.replace("-", "0")
)
}
obj["exchange"] = {}
obj["production"] = {
"solar": 0,
"unknown": float(
data["ThermalData"][i]["value"]
.replace("\xa0", "")
.replace(" ", "")
.replace("-", "0")
)
+ float(
data["NotSpecifiedData"][i]["value"]
.replace("\xa0", "")
.replace(" ", "")
.replace("-", "0")
),
"wind": float(
data["WindData"][i]["value"]
.replace("\xa0", "")
.replace(" ", "")
.replace("-", "0")
),
"nuclear": float(
data["NuclearData"][i]["value"]
.replace("\xa0", "")
.replace(" ", "")
.replace("-", "0")
),
"hydro": float(
data["HydroData"][i]["value"]
.replace("\xa0", "")
.replace(" ", "")
.replace("-", "0")
),
}
return obj
|
https://github.com/tmrowco/electricitymap-contrib/issues/95
|
Traceback (most recent call last):
File "feeder.py", line 51, in fetch_countries
obj = parser()
File "/home/parsers/PL.py", line 29, in fetch_PL
output_array = map(fetchValue, parameters)
File "/home/parsers/PL.py", line 21, in fetchValue
last = soup.find_all('point')[-1].find_all('quantity')[-1]
IndexError: list index out of range
|
IndexError
|
def fetch_PL(session=None):
return fetch_ENTSOE(ENTSOE_DOMAIN, COUNTRY_CODE, session)
|
def fetch_PL():
parameters = ["B01", "B02", "B03", "B04", "B05", "B06", "B10", "B11", "B12", "B19"]
output_array = map(fetchValue, parameters)
data = {
"countryCode": COUNTRY_CODE,
"production": {
"wind": output_array[9],
"solar": 0,
"hydro": output_array[6] + output_array[7] + output_array[8],
"biomass": output_array[0],
"nuclear": 0,
"gas": output_array[2] + output_array[3],
"coal": output_array[1] + output_array[4],
"oil": output_array[5],
"unknown": 0,
},
}
return data
|
https://github.com/tmrowco/electricitymap-contrib/issues/95
|
Traceback (most recent call last):
File "feeder.py", line 51, in fetch_countries
obj = parser()
File "/home/parsers/PL.py", line 29, in fetch_PL
output_array = map(fetchValue, parameters)
File "/home/parsers/PL.py", line 21, in fetchValue
last = soup.find_all('point')[-1].find_all('quantity')[-1]
IndexError: list index out of range
|
IndexError
|
def visit_for(self, node):
"""Emit a convention whenever range and len are used for indexing."""
# Verify that we have a `range([start], len(...), [stop])` call and
# that the object which is iterated is used as a subscript in the
# body of the for.
# Is it a proper range call?
if not isinstance(node.iter, astroid.Call):
return
if not self._is_builtin(node.iter.func, "range"):
return
if not node.iter.args:
return
is_constant_zero = (
isinstance(node.iter.args[0], astroid.Const) and node.iter.args[0].value == 0
)
if len(node.iter.args) == 2 and not is_constant_zero:
return
if len(node.iter.args) > 2:
return
# Is it a proper len call?
if not isinstance(node.iter.args[-1], astroid.Call):
return
second_func = node.iter.args[-1].func
if not self._is_builtin(second_func, "len"):
return
len_args = node.iter.args[-1].args
if not len_args or len(len_args) != 1:
return
iterating_object = len_args[0]
if not isinstance(iterating_object, astroid.Name):
return
# If we're defining __iter__ on self, enumerate won't work
scope = node.scope()
if iterating_object.name == "self" and scope.name == "__iter__":
return
# Verify that the body of the for loop uses a subscript
# with the object that was iterated. This uses some heuristics
# in order to make sure that the same object is used in the
# for body.
for child in node.body:
for subscript in child.nodes_of_class(astroid.Subscript):
if not isinstance(subscript.value, astroid.Name):
continue
value = subscript.slice
if isinstance(value, astroid.Index):
value = value.value
if not isinstance(value, astroid.Name):
continue
if value.name != node.target.name:
continue
if iterating_object.name != subscript.value.name:
continue
if subscript.value.scope() != node.scope():
# Ignore this subscript if it's not in the same
# scope. This means that in the body of the for
# loop, another scope was created, where the same
# name for the iterating object was used.
continue
self.add_message("consider-using-enumerate", node=node)
return
|
def visit_for(self, node):
"""Emit a convention whenever range and len are used for indexing."""
# Verify that we have a `range([start], len(...), [stop])` call and
# that the object which is iterated is used as a subscript in the
# body of the for.
# Is it a proper range call?
if not isinstance(node.iter, astroid.Call):
return
if not self._is_builtin(node.iter.func, "range"):
return
is_constant_zero = (
isinstance(node.iter.args[0], astroid.Const) and node.iter.args[0].value == 0
)
if len(node.iter.args) == 2 and not is_constant_zero:
return
if len(node.iter.args) > 2:
return
# Is it a proper len call?
if not isinstance(node.iter.args[-1], astroid.Call):
return
second_func = node.iter.args[-1].func
if not self._is_builtin(second_func, "len"):
return
len_args = node.iter.args[-1].args
if not len_args or len(len_args) != 1:
return
iterating_object = len_args[0]
if not isinstance(iterating_object, astroid.Name):
return
# If we're defining __iter__ on self, enumerate won't work
scope = node.scope()
if iterating_object.name == "self" and scope.name == "__iter__":
return
# Verify that the body of the for loop uses a subscript
# with the object that was iterated. This uses some heuristics
# in order to make sure that the same object is used in the
# for body.
for child in node.body:
for subscript in child.nodes_of_class(astroid.Subscript):
if not isinstance(subscript.value, astroid.Name):
continue
value = subscript.slice
if isinstance(value, astroid.Index):
value = value.value
if not isinstance(value, astroid.Name):
continue
if value.name != node.target.name:
continue
if iterating_object.name != subscript.value.name:
continue
if subscript.value.scope() != node.scope():
# Ignore this subscript if it's not in the same
# scope. This means that in the body of the for
# loop, another scope was created, where the same
# name for the iterating object was used.
continue
self.add_message("consider-using-enumerate", node=node)
return
|
https://github.com/PyCQA/pylint/issues/3735
|
************* Module alectryon.test
test:1:0: C0114: Missing module docstring (missing-module-docstring)
Traceback (most recent call last):
File "/home/clement/.local/bin/pylint", line 8, in <module>
sys.exit(run_pylint())
File "/home/clement/.local/lib/python3.8/site-packages/pylint/__init__.py", line 22, in run_pylint
PylintRun(sys.argv[1:])
File "/home/clement/.local/lib/python3.8/site-packages/pylint/lint/run.py", line 344, in __init__
linter.check(args)
File "/home/clement/.local/lib/python3.8/site-packages/pylint/lint/pylinter.py", line 864, in check
self._check_files(
File "/home/clement/.local/lib/python3.8/site-packages/pylint/lint/pylinter.py", line 904, in _check_files
self._check_file(get_ast, check_astroid_module, name, filepath, modname)
File "/home/clement/.local/lib/python3.8/site-packages/pylint/lint/pylinter.py", line 930, in _check_file
check_astroid_module(ast_node)
File "/home/clement/.local/lib/python3.8/site-packages/pylint/lint/pylinter.py", line 1062, in check_astroid_module
retval = self._check_astroid_module(
File "/home/clement/.local/lib/python3.8/site-packages/pylint/lint/pylinter.py", line 1107, in _check_astroid_module
walker.walk(ast_node)
File "/home/clement/.local/lib/python3.8/site-packages/pylint/utils/ast_walker.py", line 75, in walk
self.walk(child)
File "/home/clement/.local/lib/python3.8/site-packages/pylint/utils/ast_walker.py", line 72, in walk
callback(astroid)
File "/home/clement/.local/lib/python3.8/site-packages/pylint/checkers/refactoring.py", line 1346, in visit_for
if not isinstance(node.iter.args[-1], astroid.Call):
IndexError: list index out of range
|
IndexError
|
def is_default_argument(
node: astroid.node_classes.NodeNG,
scope: Optional[astroid.node_classes.NodeNG] = None,
) -> bool:
"""return true if the given Name node is used in function or lambda
default argument's value
"""
if not scope:
scope = node.scope()
if isinstance(scope, (astroid.FunctionDef, astroid.Lambda)):
for default_node in scope.args.defaults:
for default_name_node in default_node.nodes_of_class(astroid.Name):
if default_name_node is node:
return True
return False
|
def is_default_argument(node: astroid.node_classes.NodeNG) -> bool:
"""return true if the given Name node is used in function or lambda
default argument's value
"""
parent = node.scope()
if isinstance(parent, (astroid.FunctionDef, astroid.Lambda)):
for default_node in parent.args.defaults:
for default_name_node in default_node.nodes_of_class(astroid.Name):
if default_name_node is node:
return True
return False
|
https://github.com/PyCQA/pylint/issues/3461
|
Traceback (most recent call last):
File "tmp.py", line 11, in <module>
class Wrong:
File "tmp.py", line 14, in Wrong
def work(self) -> self.Result2:
NameError: name 'self' is not defined
|
NameError
|
def __init__(self, node, scope_type):
self._atomic = ScopeConsumer(copy.copy(node.locals), {}, scope_type)
self.node = node
|
def __init__(self, node, scope_type):
self._atomic = ScopeConsumer(copy.copy(node.locals), {}, scope_type)
|
https://github.com/PyCQA/pylint/issues/3461
|
Traceback (most recent call last):
File "tmp.py", line 11, in <module>
class Wrong:
File "tmp.py", line 14, in Wrong
def work(self) -> self.Result2:
NameError: name 'self' is not defined
|
NameError
|
def visit_name(self, node):
"""Check that a name is defined in the current scope"""
stmt = node.statement()
if stmt.fromlineno is None:
# name node from an astroid built from live code, skip
assert not stmt.root().file.endswith(".py")
return
name = node.name
frame = stmt.scope()
start_index = len(self._to_consume) - 1
undefined_variable_is_enabled = self.linter.is_message_enabled("undefined-variable")
used_before_assignment_is_enabled = self.linter.is_message_enabled(
"used-before-assignment"
)
# iterates through parent scopes, from the inner to the outer
base_scope_type = self._to_consume[start_index].scope_type
# pylint: disable=too-many-nested-blocks; refactoring this block is a pain.
for i in range(start_index, -1, -1):
current_consumer = self._to_consume[i]
# The list of base classes in the class definition is not part
# of the class body.
# If the current scope is a class scope but it's not the inner
# scope, ignore it. This prevents to access this scope instead of
# the globals one in function members when there are some common
# names.
if current_consumer.scope_type == "class" and (
utils.is_ancestor_name(current_consumer.node, node)
or (i != start_index and self._ignore_class_scope(node))
):
continue
# if the name node is used as a function default argument's value or as
# a decorator, then start from the parent frame of the function instead
# of the function frame - and thus open an inner class scope
if (
current_consumer.scope_type == "function"
and self._defined_in_function_definition(node, current_consumer.node)
):
# ignore function scope if is an annotation/default/decorator, as not in the body
continue
if current_consumer.scope_type == "lambda" and utils.is_default_argument(
node, current_consumer.node
):
continue
# the name has already been consumed, only check it's not a loop
# variable used outside the loop
# avoid the case where there are homonyms inside function scope and
# comprehension current scope (avoid bug #1731)
if name in current_consumer.consumed and not (
current_consumer.scope_type == "comprehension"
and self._has_homonym_in_upper_function_scope(node, i)
):
defnode = utils.assign_parent(current_consumer.consumed[name][0])
self._check_late_binding_closure(node, defnode)
self._loopvar_name(node, name)
break
found_node = current_consumer.get_next_to_consume(node)
if found_node is None:
continue
# checks for use before assignment
defnode = utils.assign_parent(current_consumer.to_consume[name][0])
if (
undefined_variable_is_enabled or used_before_assignment_is_enabled
) and defnode is not None:
self._check_late_binding_closure(node, defnode)
defstmt = defnode.statement()
defframe = defstmt.frame()
# The class reuses itself in the class scope.
recursive_klass = (
frame is defframe
and defframe.parent_of(node)
and isinstance(defframe, astroid.ClassDef)
and node.name == defframe.name
)
if (
recursive_klass
and utils.is_inside_lambda(node)
and (
not utils.is_default_argument(node)
or node.scope().parent.scope() is not defframe
)
):
# Self-referential class references are fine in lambda's --
# As long as they are not part of the default argument directly
# under the scope of the parent self-referring class.
# Example of valid default argument:
# class MyName3:
# myattr = 1
# mylambda3 = lambda: lambda a=MyName3: a
# Example of invalid default argument:
# class MyName4:
# myattr = 1
# mylambda4 = lambda a=MyName4: lambda: a
# If the above conditional is True,
# there is no possibility of undefined-variable
# Also do not consume class name
# (since consuming blocks subsequent checks)
# -- quit
break
(
maybee0601,
annotation_return,
use_outer_definition,
) = self._is_variable_violation(
node,
name,
defnode,
stmt,
defstmt,
frame,
defframe,
base_scope_type,
recursive_klass,
)
if use_outer_definition:
continue
if (
maybee0601
and not utils.is_defined_before(node)
and not astroid.are_exclusive(stmt, defstmt, ("NameError",))
):
# Used and defined in the same place, e.g `x += 1` and `del x`
defined_by_stmt = defstmt is stmt and isinstance(
node, (astroid.DelName, astroid.AssignName)
)
if (
recursive_klass
or defined_by_stmt
or annotation_return
or isinstance(defstmt, astroid.Delete)
):
if not utils.node_ignores_exception(node, NameError):
# Handle postponed evaluation of annotations
if not (
self._postponed_evaluation_enabled
and isinstance(
stmt,
(
astroid.AnnAssign,
astroid.FunctionDef,
astroid.Arguments,
),
)
and name in node.root().locals
):
self.add_message("undefined-variable", args=name, node=node)
elif base_scope_type != "lambda":
# E0601 may *not* occurs in lambda scope.
# Handle postponed evaluation of annotations
if not (
self._postponed_evaluation_enabled
and isinstance(stmt, (astroid.AnnAssign, astroid.FunctionDef))
):
self.add_message("used-before-assignment", args=name, node=node)
elif base_scope_type == "lambda":
# E0601 can occur in class-level scope in lambdas, as in
# the following example:
# class A:
# x = lambda attr: f + attr
# f = 42
if isinstance(frame, astroid.ClassDef) and name in frame.locals:
if isinstance(node.parent, astroid.Arguments):
if stmt.fromlineno <= defstmt.fromlineno:
# Doing the following is fine:
# class A:
# x = 42
# y = lambda attr=x: attr
self.add_message(
"used-before-assignment", args=name, node=node
)
else:
self.add_message("undefined-variable", args=name, node=node)
elif current_consumer.scope_type == "lambda":
self.add_message("undefined-variable", node=node, args=name)
current_consumer.mark_as_consumed(name, found_node)
# check it's not a loop variable used outside the loop
self._loopvar_name(node, name)
break
else:
# we have not found the name, if it isn't a builtin, that's an
# undefined name !
if undefined_variable_is_enabled and not (
name in astroid.Module.scope_attrs
or utils.is_builtin(name)
or name in self.config.additional_builtins
or (
name == "__class__"
and isinstance(frame, astroid.FunctionDef)
and frame.is_method()
)
):
if not utils.node_ignores_exception(node, NameError):
self.add_message("undefined-variable", args=name, node=node)
|
def visit_name(self, node):
"""Check that a name is defined in the current scope"""
stmt = node.statement()
if stmt.fromlineno is None:
# name node from an astroid built from live code, skip
assert not stmt.root().file.endswith(".py")
return
name = node.name
frame = stmt.scope()
# if the name node is used as a function default argument's value or as
# a decorator, then start from the parent frame of the function instead
# of the function frame - and thus open an inner class scope
if (
utils.is_default_argument(node)
or utils.is_func_decorator(node)
or utils.is_ancestor_name(frame, node)
):
start_index = len(self._to_consume) - 2
else:
start_index = len(self._to_consume) - 1
undefined_variable_is_enabled = self.linter.is_message_enabled("undefined-variable")
used_before_assignment_is_enabled = self.linter.is_message_enabled(
"used-before-assignment"
)
# iterates through parent scopes, from the inner to the outer
base_scope_type = self._to_consume[start_index].scope_type
# pylint: disable=too-many-nested-blocks; refactoring this block is a pain.
for i in range(start_index, -1, -1):
current_consumer = self._to_consume[i]
# if the current scope is a class scope but it's not the inner
# scope, ignore it. This prevents to access this scope instead of
# the globals one in function members when there are some common
# names.
if current_consumer.scope_type == "class" and i != start_index:
# The only exceptions are: when the variable forms an iter within a
# comprehension scope; and/or when used as a default, decorator,
# or annotation within a function.
if self._ignore_class_scope(node):
continue
# the name has already been consumed, only check it's not a loop
# variable used outside the loop
# avoid the case where there are homonyms inside function scope and
# comprehension current scope (avoid bug #1731)
if name in current_consumer.consumed and not (
current_consumer.scope_type == "comprehension"
and self._has_homonym_in_upper_function_scope(node, i)
):
defnode = utils.assign_parent(current_consumer.consumed[name][0])
self._check_late_binding_closure(node, defnode)
self._loopvar_name(node, name)
break
found_node = current_consumer.get_next_to_consume(node)
if found_node is None:
continue
# checks for use before assignment
defnode = utils.assign_parent(current_consumer.to_consume[name][0])
if (
undefined_variable_is_enabled or used_before_assignment_is_enabled
) and defnode is not None:
self._check_late_binding_closure(node, defnode)
defstmt = defnode.statement()
defframe = defstmt.frame()
# The class reuses itself in the class scope.
recursive_klass = (
frame is defframe
and defframe.parent_of(node)
and isinstance(defframe, astroid.ClassDef)
and node.name == defframe.name
)
if (
recursive_klass
and utils.is_inside_lambda(node)
and (
not utils.is_default_argument(node)
or node.scope().parent.scope() is not defframe
)
):
# Self-referential class references are fine in lambda's --
# As long as they are not part of the default argument directly
# under the scope of the parent self-referring class.
# Example of valid default argument:
# class MyName3:
# myattr = 1
# mylambda3 = lambda: lambda a=MyName3: a
# Example of invalid default argument:
# class MyName4:
# myattr = 1
# mylambda4 = lambda a=MyName4: lambda: a
# If the above conditional is True,
# there is no possibility of undefined-variable
# Also do not consume class name
# (since consuming blocks subsequent checks)
# -- quit
break
(
maybee0601,
annotation_return,
use_outer_definition,
) = self._is_variable_violation(
node,
name,
defnode,
stmt,
defstmt,
frame,
defframe,
base_scope_type,
recursive_klass,
)
if use_outer_definition:
continue
if (
maybee0601
and not utils.is_defined_before(node)
and not astroid.are_exclusive(stmt, defstmt, ("NameError",))
):
# Used and defined in the same place, e.g `x += 1` and `del x`
defined_by_stmt = defstmt is stmt and isinstance(
node, (astroid.DelName, astroid.AssignName)
)
if (
recursive_klass
or defined_by_stmt
or annotation_return
or isinstance(defstmt, astroid.Delete)
):
if not utils.node_ignores_exception(node, NameError):
# Handle postponed evaluation of annotations
if not (
self._postponed_evaluation_enabled
and isinstance(
stmt,
(
astroid.AnnAssign,
astroid.FunctionDef,
astroid.Arguments,
),
)
and name in node.root().locals
):
self.add_message("undefined-variable", args=name, node=node)
elif base_scope_type != "lambda":
# E0601 may *not* occurs in lambda scope.
# Handle postponed evaluation of annotations
if not (
self._postponed_evaluation_enabled
and isinstance(stmt, (astroid.AnnAssign, astroid.FunctionDef))
):
self.add_message("used-before-assignment", args=name, node=node)
elif base_scope_type == "lambda":
# E0601 can occur in class-level scope in lambdas, as in
# the following example:
# class A:
# x = lambda attr: f + attr
# f = 42
if isinstance(frame, astroid.ClassDef) and name in frame.locals:
if isinstance(node.parent, astroid.Arguments):
if stmt.fromlineno <= defstmt.fromlineno:
# Doing the following is fine:
# class A:
# x = 42
# y = lambda attr=x: attr
self.add_message(
"used-before-assignment", args=name, node=node
)
else:
self.add_message("undefined-variable", args=name, node=node)
elif current_consumer.scope_type == "lambda":
self.add_message("undefined-variable", node=node, args=name)
current_consumer.mark_as_consumed(name, found_node)
# check it's not a loop variable used outside the loop
self._loopvar_name(node, name)
break
else:
# we have not found the name, if it isn't a builtin, that's an
# undefined name !
if undefined_variable_is_enabled and not (
name in astroid.Module.scope_attrs
or utils.is_builtin(name)
or name in self.config.additional_builtins
or (
name == "__class__"
and isinstance(frame, astroid.FunctionDef)
and frame.is_method()
)
):
if not utils.node_ignores_exception(node, NameError):
self.add_message("undefined-variable", args=name, node=node)
|
https://github.com/PyCQA/pylint/issues/3461
|
Traceback (most recent call last):
File "tmp.py", line 11, in <module>
class Wrong:
File "tmp.py", line 14, in Wrong
def work(self) -> self.Result2:
NameError: name 'self' is not defined
|
NameError
|
def _ignore_class_scope(self, node):
"""
Return True if the node is in a local class scope, as an assignment.
:param node: Node considered
:type node: astroid.Node
:return: True if the node is in a local class scope, as an assignment. False otherwise.
:rtype: bool
"""
# Detect if we are in a local class scope, as an assignment.
# For example, the following is fair game.
#
# class A:
# b = 1
# c = lambda b=b: b * b
#
# class B:
# tp = 1
# def func(self, arg: tp):
# ...
# class C:
# tp = 2
# def func(self, arg=tp):
# ...
# class C:
# class Tp:
# pass
# class D(Tp):
# ...
name = node.name
frame = node.statement().scope()
in_annotation_or_default_or_decorator = self._defined_in_function_definition(
node, frame
)
in_ancestor_list = utils.is_ancestor_name(frame, node)
if in_annotation_or_default_or_decorator or in_ancestor_list:
frame_locals = frame.parent.scope().locals
else:
frame_locals = frame.locals
return not (
(isinstance(frame, astroid.ClassDef) or in_annotation_or_default_or_decorator)
and not self._in_lambda_or_comprehension_body(node, frame)
and name in frame_locals
)
|
def _ignore_class_scope(self, node):
"""
Return True if the node is in a local class scope, as an assignment.
:param node: Node considered
:type node: astroid.Node
:return: True if the node is in a local class scope, as an assignment. False otherwise.
:rtype: bool
"""
# Detect if we are in a local class scope, as an assignment.
# For example, the following is fair game.
#
# class A:
# b = 1
# c = lambda b=b: b * b
#
# class B:
# tp = 1
# def func(self, arg: tp):
# ...
# class C:
# tp = 2
# def func(self, arg=tp):
# ...
name = node.name
frame = node.statement().scope()
in_annotation_or_default_or_decorator = self._defined_in_function_definition(
node, frame
)
if in_annotation_or_default_or_decorator:
frame_locals = frame.parent.scope().locals
else:
frame_locals = frame.locals
return not (
(isinstance(frame, astroid.ClassDef) or in_annotation_or_default_or_decorator)
and not self._in_lambda_or_comprehension_body(node, frame)
and name in frame_locals
)
|
https://github.com/PyCQA/pylint/issues/3461
|
Traceback (most recent call last):
File "tmp.py", line 11, in <module>
class Wrong:
File "tmp.py", line 14, in Wrong
def work(self) -> self.Result2:
NameError: name 'self' is not defined
|
NameError
|
def _worker_check_single_file(file_item):
name, filepath, modname = file_item
_worker_linter.open()
_worker_linter.check_single_file(name, filepath, modname)
msgs = [_get_new_args(m) for m in _worker_linter.reporter.messages]
return (
_worker_linter.current_name,
filepath,
_worker_linter.file_state.base_name,
msgs,
_worker_linter.stats,
_worker_linter.msg_status,
)
|
def _worker_check_single_file(file_item):
name, filepath, modname = file_item
_worker_linter.open()
_worker_linter.check_single_file(name, filepath, modname)
msgs = [_get_new_args(m) for m in _worker_linter.reporter.messages]
return (
_worker_linter.current_name,
_worker_linter.file_state.base_name,
msgs,
_worker_linter.stats,
_worker_linter.msg_status,
)
|
https://github.com/PyCQA/pylint/issues/3564
|
Traceback (most recent call last):
File "/opt/gitlab-runner/NCiG8AGt/0/<path>/.tox/pylint/bin/pylint", line 8, in <module>
sys.exit(run_pylint())
File "/opt/gitlab-runner/NCiG8AGt/0/<path>/.tox/pylint/lib/python3.6/site-packages/pylint/__init__.py", line 22, in run_pylint
PylintRun(sys.argv[1:])
File "/opt/gitlab-runner/NCiG8AGt/0/<path>/.tox/pylint/lib/python3.6/site-packages/pylint/lint/run.py", line 338, in __init__
linter.check(args)
File "/opt/gitlab-runner/NCiG8AGt/0/<path>/.tox/pylint/lib/python3.6/site-packages/pylint/lint/pylinter.py", line 878, in check
files_or_modules,
File "/opt/gitlab-runner/NCiG8AGt/0/<path>/.tox/pylint/lib/python3.6/site-packages/pylint/lint/check_parallel.py", line 106, in check_parallel
linter.reporter.handle_message(msg)
File "/opt/gitlab-runner/NCiG8AGt/0/<path>/.tox/pylint/lib/python3.6/site-packages/pylint/reporters/text.py", line 151, in handle_message
self.write_message(msg)
File "/opt/gitlab-runner/NCiG8AGt/0/<path>/.tox/pylint/lib/python3.6/site-packages/pylint/reporters/text.py", line 141, in write_message
self.writeln(msg.format(self._template))
File "/opt/gitlab-runner/NCiG8AGt/0/<path>/.tox/pylint/lib/python3.6/site-packages/pylint/message/message.py", line 51, in format
return template.format(**dict(zip(self._fields, self)))
AttributeError: 'NoneType' object has no attribute 'format'
ERROR: InvocationError for command /opt/gitlab-runner/NCiG8AGt/0/<path>/.tox/pylint/bin/pylint src/ (exited with code 1)
|
AttributeError
|
def check_parallel(linter, jobs, files, arguments=None):
"""Use the given linter to lint the files with given amount of workers (jobs)"""
# The reporter does not need to be passed to worker processess, i.e. the reporter does
# not need to be pickleable
original_reporter = linter.reporter
linter.reporter = None
# The linter is inherited by all the pool's workers, i.e. the linter
# is identical to the linter object here. This is requred so that
# a custom PyLinter object can be used.
initializer = functools.partial(_worker_initialize, arguments=arguments)
with multiprocessing.Pool(jobs, initializer=initializer, initargs=[linter]) as pool:
# ..and now when the workers have inherited the linter, the actual reporter
# can be set back here on the parent process so that results get stored into
# correct reporter
linter.set_reporter(original_reporter)
linter.open()
all_stats = []
for (
module,
file_path,
base_name,
messages,
stats,
msg_status,
) in pool.imap_unordered(_worker_check_single_file, files):
linter.file_state.base_name = base_name
linter.set_current_module(module, file_path)
for msg in messages:
msg = Message(*msg)
linter.reporter.handle_message(msg)
all_stats.append(stats)
linter.msg_status |= msg_status
linter.stats = _merge_stats(all_stats)
# Insert stats data to local checkers.
for checker in linter.get_checkers():
if checker is not linter:
checker.stats = linter.stats
|
def check_parallel(linter, jobs, files, arguments=None):
"""Use the given linter to lint the files with given amount of workers (jobs)"""
# The reporter does not need to be passed to worker processess, i.e. the reporter does
# not need to be pickleable
original_reporter = linter.reporter
linter.reporter = None
# The linter is inherited by all the pool's workers, i.e. the linter
# is identical to the linter object here. This is requred so that
# a custom PyLinter object can be used.
initializer = functools.partial(_worker_initialize, arguments=arguments)
with multiprocessing.Pool(jobs, initializer=initializer, initargs=[linter]) as pool:
# ..and now when the workers have inherited the linter, the actual reporter
# can be set back here on the parent process so that results get stored into
# correct reporter
linter.set_reporter(original_reporter)
linter.open()
all_stats = []
for module, base_name, messages, stats, msg_status in pool.imap_unordered(
_worker_check_single_file, files
):
linter.file_state.base_name = base_name
linter.set_current_module(module)
for msg in messages:
msg = Message(*msg)
linter.reporter.handle_message(msg)
all_stats.append(stats)
linter.msg_status |= msg_status
linter.stats = _merge_stats(all_stats)
# Insert stats data to local checkers.
for checker in linter.get_checkers():
if checker is not linter:
checker.stats = linter.stats
|
https://github.com/PyCQA/pylint/issues/3564
|
Traceback (most recent call last):
File "/opt/gitlab-runner/NCiG8AGt/0/<path>/.tox/pylint/bin/pylint", line 8, in <module>
sys.exit(run_pylint())
File "/opt/gitlab-runner/NCiG8AGt/0/<path>/.tox/pylint/lib/python3.6/site-packages/pylint/__init__.py", line 22, in run_pylint
PylintRun(sys.argv[1:])
File "/opt/gitlab-runner/NCiG8AGt/0/<path>/.tox/pylint/lib/python3.6/site-packages/pylint/lint/run.py", line 338, in __init__
linter.check(args)
File "/opt/gitlab-runner/NCiG8AGt/0/<path>/.tox/pylint/lib/python3.6/site-packages/pylint/lint/pylinter.py", line 878, in check
files_or_modules,
File "/opt/gitlab-runner/NCiG8AGt/0/<path>/.tox/pylint/lib/python3.6/site-packages/pylint/lint/check_parallel.py", line 106, in check_parallel
linter.reporter.handle_message(msg)
File "/opt/gitlab-runner/NCiG8AGt/0/<path>/.tox/pylint/lib/python3.6/site-packages/pylint/reporters/text.py", line 151, in handle_message
self.write_message(msg)
File "/opt/gitlab-runner/NCiG8AGt/0/<path>/.tox/pylint/lib/python3.6/site-packages/pylint/reporters/text.py", line 141, in write_message
self.writeln(msg.format(self._template))
File "/opt/gitlab-runner/NCiG8AGt/0/<path>/.tox/pylint/lib/python3.6/site-packages/pylint/message/message.py", line 51, in format
return template.format(**dict(zip(self._fields, self)))
AttributeError: 'NoneType' object has no attribute 'format'
ERROR: InvocationError for command /opt/gitlab-runner/NCiG8AGt/0/<path>/.tox/pylint/bin/pylint src/ (exited with code 1)
|
AttributeError
|
def _check_late_binding_closure(self, node, assignment_node):
if not self.linter.is_message_enabled("cell-var-from-loop"):
return
def _is_direct_lambda_call():
return (
isinstance(node_scope.parent, astroid.Call)
and node_scope.parent.func is node_scope
)
node_scope = node.scope()
if not isinstance(node_scope, (astroid.Lambda, astroid.FunctionDef)):
return
if isinstance(node.parent, astroid.Arguments):
return
if isinstance(assignment_node, astroid.Comprehension):
if assignment_node.parent.parent_of(node.scope()):
self.add_message("cell-var-from-loop", node=node, args=node.name)
else:
assign_scope = assignment_node.scope()
maybe_for = assignment_node
while maybe_for and not isinstance(maybe_for, astroid.For):
if maybe_for is assign_scope:
break
maybe_for = maybe_for.parent
else:
if (
maybe_for
and maybe_for.parent_of(node_scope)
and not _is_direct_lambda_call()
and not isinstance(node_scope.statement(), astroid.Return)
):
self.add_message("cell-var-from-loop", node=node, args=node.name)
|
def _check_late_binding_closure(self, node, assignment_node):
if not self.linter.is_message_enabled("cell-var-from-loop"):
return
def _is_direct_lambda_call():
return (
isinstance(node_scope.parent, astroid.Call)
and node_scope.parent.func is node_scope
)
node_scope = node.scope()
if not isinstance(node_scope, (astroid.Lambda, astroid.FunctionDef)):
return
if isinstance(node.parent, astroid.Arguments):
return
if isinstance(assignment_node, astroid.Comprehension):
if assignment_node.parent.parent_of(node.scope()):
self.add_message("cell-var-from-loop", node=node, args=node.name)
else:
assign_scope = assignment_node.scope()
maybe_for = assignment_node
while not isinstance(maybe_for, astroid.For):
if maybe_for is assign_scope:
break
maybe_for = maybe_for.parent
else:
if (
maybe_for.parent_of(node_scope)
and not _is_direct_lambda_call()
and not isinstance(node_scope.statement(), astroid.Return)
):
self.add_message("cell-var-from-loop", node=node, args=node.name)
|
https://github.com/PyCQA/pylint/issues/3646
|
Traceback (most recent call last):
File "/Users/user/.local/share/virtualenvs/my_project/bin/pylint", line 8, in <module>
sys.exit(run_pylint())
File "/Users/user/.local/share/virtualenvs/my_project/lib/python3.6/site-packages/pylint/__init__.py", line 22, in run_pylint
PylintRun(sys.argv[1:])
File "/Users/user/.local/share/virtualenvs/my_project/lib/python3.6/site-packages/pylint/lint/run.py", line 344, in __init__
linter.check(args)
File "/Users/user/.local/share/virtualenvs/my_project/lib/python3.6/site-packages/pylint/lint/pylinter.py", line 871, in check
self.get_ast, self._iterate_file_descrs(files_or_modules)
File "/Users/user/.local/share/virtualenvs/my_project/lib/python3.6/site-packages/pylint/lint/pylinter.py", line 904, in _check_files
self._check_file(get_ast, check_astroid_module, name, filepath, modname)
File "/Users/user/.local/share/virtualenvs/my_project/lib/python3.6/site-packages/pylint/lint/pylinter.py", line 930, in _check_file
check_astroid_module(ast_node)
File "/Users/user/.local/share/virtualenvs/my_project/lib/python3.6/site-packages/pylint/lint/pylinter.py", line 1063, in check_astroid_module
ast_node, walker, rawcheckers, tokencheckers
File "/Users/user/.local/share/virtualenvs/my_project/lib/python3.6/site-packages/pylint/lint/pylinter.py", line 1107, in _check_astroid_module
walker.walk(ast_node)
File "/Users/user/.local/share/virtualenvs/my_project/lib/python3.6/site-packages/pylint/utils/ast_walker.py", line 75, in walk
self.walk(child)
File "/Users/user/.local/share/virtualenvs/my_project/lib/python3.6/site-packages/pylint/utils/ast_walker.py", line 75, in walk
self.walk(child)
File "/Users/user/.local/share/virtualenvs/my_project/lib/python3.6/site-packages/pylint/utils/ast_walker.py", line 75, in walk
self.walk(child)
[Previous line repeated 1 more times]
File "/Users/user/.local/share/virtualenvs/my_project/lib/python3.6/site-packages/pylint/utils/ast_walker.py", line 72, in walk
callback(astroid)
File "/Users/user/.local/share/virtualenvs/my_project/lib/python3.6/site-packages/pylint/checkers/variables.py", line 1019, in visit_name
self._check_late_binding_closure(node, defnode)
File "/Users/user/.local/share/virtualenvs/my_project/lib/python3.6/site-packages/pylint/checkers/variables.py", line 1722, in _check_late_binding_closure
maybe_for = maybe_for.parent
AttributeError: 'NoneType' object has no attribute 'parent'
|
AttributeError
|
def run_pylint():
"""run pylint"""
from pylint.lint import Run as PylintRun
try:
PylintRun(sys.argv[1:])
except KeyboardInterrupt:
sys.exit(1)
|
def run_pylint():
"""run pylint"""
try:
PylintRun(sys.argv[1:])
except KeyboardInterrupt:
sys.exit(1)
|
https://github.com/PyCQA/pylint/issues/3386
|
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "/Users/jeppe/.pyenv/versions/3.8.1/lib/python3.8/multiprocessing/spawn.py", line 116, in spawn_main
exitcode = _main(fd, parent_sentinel)
File "/Users/jeppe/.pyenv/versions/3.8.1/lib/python3.8/multiprocessing/spawn.py", line 125, in _main
prepare(preparation_data)
File "/Users/jeppe/.pyenv/versions/3.8.1/lib/python3.8/multiprocessing/spawn.py", line 236, in prepare
_fixup_main_from_path(data['init_main_from_path'])
File "/Users/jeppe/.pyenv/versions/3.8.1/lib/python3.8/multiprocessing/spawn.py", line 287, in _fixup_main_from_path
main_content = runpy.run_path(main_path,
File "/Users/jeppe/.pyenv/versions/3.8.1/lib/python3.8/runpy.py", line 263, in run_path
return _run_module_code(code, init_globals, run_name,
File "/Users/jeppe/.pyenv/versions/3.8.1/lib/python3.8/runpy.py", line 96, in _run_module_code
_run_code(code, mod_globals, init_globals,
File "/Users/jeppe/.pyenv/versions/3.8.1/lib/python3.8/runpy.py", line 86, in _run_code
exec(code, run_globals)
File "/Users/jeppe/.bin/pylint", line 5, in <module>
from pylint import run_pylint
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/pylint/__init__.py", line 13, in <module>
from pylint.checkers.similar import Run as SimilarRun
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/pylint/checkers/__init__.py", line 42, in <module>
from pylint.checkers.base_checker import BaseChecker, BaseTokenChecker
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/pylint/checkers/base_checker.py", line 17, in <module>
from pylint.config import OptionsProviderMixIn
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/pylint/config.py", line 49, in <module>
from pylint import utils
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/pylint/utils/__init__.py", line 44, in <module>
from pylint.utils.ast_walker import ASTWalker
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/pylint/utils/ast_walker.py", line 8, in <module>
from astroid import nodes
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/astroid/__init__.py", line 63, in <module>
from astroid.nodes import *
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/astroid/nodes.py", line 23, in <module>
from astroid.node_classes import (
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/astroid/node_classes.py", line 38, in <module>
from astroid import bases
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/astroid/bases.py", line 32, in <module>
MANAGER = manager.AstroidManager()
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/astroid/util.py", line 26, in <lambda>
lambda: importlib.import_module("." + module_name, "astroid")
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/importlib/__init__.py", line 127, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/astroid/manager.py", line 25, in <module>
from astroid import modutils
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/astroid/modutils.py", line 32, in <module>
import platform
File "/Users/jeppe/platform.py", line 1, in <module>
import non_existing_module
ModuleNotFoundError: No module named 'non_existing_module'
Traceback (most recent call last):
File "/Users/jeppe/.bin/pylint", line 8, in <module>
sys.exit(run_pylint())
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/pylint/__init__.py", line 23, in run_pylint
PylintRun(sys.argv[1:])
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/pylint/lint.py", line 1731, in __init__
linter.check(args)
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/pylint/lint.py", line 1006, in check
self._parallel_check(files_or_modules)
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/pylint/lint.py", line 1080, in _parallel_check
for result in self._parallel_task(files_or_modules):
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/pylint/lint.py", line 1028, in _parallel_task
manager = multiprocessing.Manager()
File "/Users/jeppe/.pyenv/versions/3.8.1/lib/python3.8/multiprocessing/context.py", line 57, in Manager
m.start()
File "/Users/jeppe/.pyenv/versions/3.8.1/lib/python3.8/multiprocessing/managers.py", line 583, in start
self._address = reader.recv()
File "/Users/jeppe/.pyenv/versions/3.8.1/lib/python3.8/multiprocessing/connection.py", line 250, in recv
buf = self._recv_bytes()
File "/Users/jeppe/.pyenv/versions/3.8.1/lib/python3.8/multiprocessing/connection.py", line 414, in _recv_bytes
buf = self._recv(4)
File "/Users/jeppe/.pyenv/versions/3.8.1/lib/python3.8/multiprocessing/connection.py", line 383, in _recv
raise EOFError
EOFError
|
ModuleNotFoundError
|
def run_epylint():
"""run pylint"""
from pylint.epylint import Run as EpylintRun
EpylintRun()
|
def run_epylint():
"""run pylint"""
EpylintRun()
|
https://github.com/PyCQA/pylint/issues/3386
|
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "/Users/jeppe/.pyenv/versions/3.8.1/lib/python3.8/multiprocessing/spawn.py", line 116, in spawn_main
exitcode = _main(fd, parent_sentinel)
File "/Users/jeppe/.pyenv/versions/3.8.1/lib/python3.8/multiprocessing/spawn.py", line 125, in _main
prepare(preparation_data)
File "/Users/jeppe/.pyenv/versions/3.8.1/lib/python3.8/multiprocessing/spawn.py", line 236, in prepare
_fixup_main_from_path(data['init_main_from_path'])
File "/Users/jeppe/.pyenv/versions/3.8.1/lib/python3.8/multiprocessing/spawn.py", line 287, in _fixup_main_from_path
main_content = runpy.run_path(main_path,
File "/Users/jeppe/.pyenv/versions/3.8.1/lib/python3.8/runpy.py", line 263, in run_path
return _run_module_code(code, init_globals, run_name,
File "/Users/jeppe/.pyenv/versions/3.8.1/lib/python3.8/runpy.py", line 96, in _run_module_code
_run_code(code, mod_globals, init_globals,
File "/Users/jeppe/.pyenv/versions/3.8.1/lib/python3.8/runpy.py", line 86, in _run_code
exec(code, run_globals)
File "/Users/jeppe/.bin/pylint", line 5, in <module>
from pylint import run_pylint
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/pylint/__init__.py", line 13, in <module>
from pylint.checkers.similar import Run as SimilarRun
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/pylint/checkers/__init__.py", line 42, in <module>
from pylint.checkers.base_checker import BaseChecker, BaseTokenChecker
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/pylint/checkers/base_checker.py", line 17, in <module>
from pylint.config import OptionsProviderMixIn
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/pylint/config.py", line 49, in <module>
from pylint import utils
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/pylint/utils/__init__.py", line 44, in <module>
from pylint.utils.ast_walker import ASTWalker
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/pylint/utils/ast_walker.py", line 8, in <module>
from astroid import nodes
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/astroid/__init__.py", line 63, in <module>
from astroid.nodes import *
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/astroid/nodes.py", line 23, in <module>
from astroid.node_classes import (
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/astroid/node_classes.py", line 38, in <module>
from astroid import bases
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/astroid/bases.py", line 32, in <module>
MANAGER = manager.AstroidManager()
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/astroid/util.py", line 26, in <lambda>
lambda: importlib.import_module("." + module_name, "astroid")
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/importlib/__init__.py", line 127, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/astroid/manager.py", line 25, in <module>
from astroid import modutils
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/astroid/modutils.py", line 32, in <module>
import platform
File "/Users/jeppe/platform.py", line 1, in <module>
import non_existing_module
ModuleNotFoundError: No module named 'non_existing_module'
Traceback (most recent call last):
File "/Users/jeppe/.bin/pylint", line 8, in <module>
sys.exit(run_pylint())
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/pylint/__init__.py", line 23, in run_pylint
PylintRun(sys.argv[1:])
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/pylint/lint.py", line 1731, in __init__
linter.check(args)
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/pylint/lint.py", line 1006, in check
self._parallel_check(files_or_modules)
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/pylint/lint.py", line 1080, in _parallel_check
for result in self._parallel_task(files_or_modules):
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/pylint/lint.py", line 1028, in _parallel_task
manager = multiprocessing.Manager()
File "/Users/jeppe/.pyenv/versions/3.8.1/lib/python3.8/multiprocessing/context.py", line 57, in Manager
m.start()
File "/Users/jeppe/.pyenv/versions/3.8.1/lib/python3.8/multiprocessing/managers.py", line 583, in start
self._address = reader.recv()
File "/Users/jeppe/.pyenv/versions/3.8.1/lib/python3.8/multiprocessing/connection.py", line 250, in recv
buf = self._recv_bytes()
File "/Users/jeppe/.pyenv/versions/3.8.1/lib/python3.8/multiprocessing/connection.py", line 414, in _recv_bytes
buf = self._recv(4)
File "/Users/jeppe/.pyenv/versions/3.8.1/lib/python3.8/multiprocessing/connection.py", line 383, in _recv
raise EOFError
EOFError
|
ModuleNotFoundError
|
def run_pyreverse():
"""run pyreverse"""
from pylint.pyreverse.main import Run as PyreverseRun
PyreverseRun(sys.argv[1:])
|
def run_pyreverse():
"""run pyreverse"""
PyreverseRun(sys.argv[1:])
|
https://github.com/PyCQA/pylint/issues/3386
|
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "/Users/jeppe/.pyenv/versions/3.8.1/lib/python3.8/multiprocessing/spawn.py", line 116, in spawn_main
exitcode = _main(fd, parent_sentinel)
File "/Users/jeppe/.pyenv/versions/3.8.1/lib/python3.8/multiprocessing/spawn.py", line 125, in _main
prepare(preparation_data)
File "/Users/jeppe/.pyenv/versions/3.8.1/lib/python3.8/multiprocessing/spawn.py", line 236, in prepare
_fixup_main_from_path(data['init_main_from_path'])
File "/Users/jeppe/.pyenv/versions/3.8.1/lib/python3.8/multiprocessing/spawn.py", line 287, in _fixup_main_from_path
main_content = runpy.run_path(main_path,
File "/Users/jeppe/.pyenv/versions/3.8.1/lib/python3.8/runpy.py", line 263, in run_path
return _run_module_code(code, init_globals, run_name,
File "/Users/jeppe/.pyenv/versions/3.8.1/lib/python3.8/runpy.py", line 96, in _run_module_code
_run_code(code, mod_globals, init_globals,
File "/Users/jeppe/.pyenv/versions/3.8.1/lib/python3.8/runpy.py", line 86, in _run_code
exec(code, run_globals)
File "/Users/jeppe/.bin/pylint", line 5, in <module>
from pylint import run_pylint
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/pylint/__init__.py", line 13, in <module>
from pylint.checkers.similar import Run as SimilarRun
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/pylint/checkers/__init__.py", line 42, in <module>
from pylint.checkers.base_checker import BaseChecker, BaseTokenChecker
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/pylint/checkers/base_checker.py", line 17, in <module>
from pylint.config import OptionsProviderMixIn
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/pylint/config.py", line 49, in <module>
from pylint import utils
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/pylint/utils/__init__.py", line 44, in <module>
from pylint.utils.ast_walker import ASTWalker
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/pylint/utils/ast_walker.py", line 8, in <module>
from astroid import nodes
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/astroid/__init__.py", line 63, in <module>
from astroid.nodes import *
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/astroid/nodes.py", line 23, in <module>
from astroid.node_classes import (
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/astroid/node_classes.py", line 38, in <module>
from astroid import bases
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/astroid/bases.py", line 32, in <module>
MANAGER = manager.AstroidManager()
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/astroid/util.py", line 26, in <lambda>
lambda: importlib.import_module("." + module_name, "astroid")
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/importlib/__init__.py", line 127, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/astroid/manager.py", line 25, in <module>
from astroid import modutils
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/astroid/modutils.py", line 32, in <module>
import platform
File "/Users/jeppe/platform.py", line 1, in <module>
import non_existing_module
ModuleNotFoundError: No module named 'non_existing_module'
Traceback (most recent call last):
File "/Users/jeppe/.bin/pylint", line 8, in <module>
sys.exit(run_pylint())
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/pylint/__init__.py", line 23, in run_pylint
PylintRun(sys.argv[1:])
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/pylint/lint.py", line 1731, in __init__
linter.check(args)
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/pylint/lint.py", line 1006, in check
self._parallel_check(files_or_modules)
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/pylint/lint.py", line 1080, in _parallel_check
for result in self._parallel_task(files_or_modules):
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/pylint/lint.py", line 1028, in _parallel_task
manager = multiprocessing.Manager()
File "/Users/jeppe/.pyenv/versions/3.8.1/lib/python3.8/multiprocessing/context.py", line 57, in Manager
m.start()
File "/Users/jeppe/.pyenv/versions/3.8.1/lib/python3.8/multiprocessing/managers.py", line 583, in start
self._address = reader.recv()
File "/Users/jeppe/.pyenv/versions/3.8.1/lib/python3.8/multiprocessing/connection.py", line 250, in recv
buf = self._recv_bytes()
File "/Users/jeppe/.pyenv/versions/3.8.1/lib/python3.8/multiprocessing/connection.py", line 414, in _recv_bytes
buf = self._recv(4)
File "/Users/jeppe/.pyenv/versions/3.8.1/lib/python3.8/multiprocessing/connection.py", line 383, in _recv
raise EOFError
EOFError
|
ModuleNotFoundError
|
def run_symilar():
"""run symilar"""
from pylint.checkers.similar import Run as SimilarRun
SimilarRun(sys.argv[1:])
|
def run_symilar():
"""run symilar"""
SimilarRun(sys.argv[1:])
|
https://github.com/PyCQA/pylint/issues/3386
|
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "/Users/jeppe/.pyenv/versions/3.8.1/lib/python3.8/multiprocessing/spawn.py", line 116, in spawn_main
exitcode = _main(fd, parent_sentinel)
File "/Users/jeppe/.pyenv/versions/3.8.1/lib/python3.8/multiprocessing/spawn.py", line 125, in _main
prepare(preparation_data)
File "/Users/jeppe/.pyenv/versions/3.8.1/lib/python3.8/multiprocessing/spawn.py", line 236, in prepare
_fixup_main_from_path(data['init_main_from_path'])
File "/Users/jeppe/.pyenv/versions/3.8.1/lib/python3.8/multiprocessing/spawn.py", line 287, in _fixup_main_from_path
main_content = runpy.run_path(main_path,
File "/Users/jeppe/.pyenv/versions/3.8.1/lib/python3.8/runpy.py", line 263, in run_path
return _run_module_code(code, init_globals, run_name,
File "/Users/jeppe/.pyenv/versions/3.8.1/lib/python3.8/runpy.py", line 96, in _run_module_code
_run_code(code, mod_globals, init_globals,
File "/Users/jeppe/.pyenv/versions/3.8.1/lib/python3.8/runpy.py", line 86, in _run_code
exec(code, run_globals)
File "/Users/jeppe/.bin/pylint", line 5, in <module>
from pylint import run_pylint
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/pylint/__init__.py", line 13, in <module>
from pylint.checkers.similar import Run as SimilarRun
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/pylint/checkers/__init__.py", line 42, in <module>
from pylint.checkers.base_checker import BaseChecker, BaseTokenChecker
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/pylint/checkers/base_checker.py", line 17, in <module>
from pylint.config import OptionsProviderMixIn
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/pylint/config.py", line 49, in <module>
from pylint import utils
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/pylint/utils/__init__.py", line 44, in <module>
from pylint.utils.ast_walker import ASTWalker
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/pylint/utils/ast_walker.py", line 8, in <module>
from astroid import nodes
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/astroid/__init__.py", line 63, in <module>
from astroid.nodes import *
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/astroid/nodes.py", line 23, in <module>
from astroid.node_classes import (
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/astroid/node_classes.py", line 38, in <module>
from astroid import bases
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/astroid/bases.py", line 32, in <module>
MANAGER = manager.AstroidManager()
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/astroid/util.py", line 26, in <lambda>
lambda: importlib.import_module("." + module_name, "astroid")
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/importlib/__init__.py", line 127, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/astroid/manager.py", line 25, in <module>
from astroid import modutils
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/astroid/modutils.py", line 32, in <module>
import platform
File "/Users/jeppe/platform.py", line 1, in <module>
import non_existing_module
ModuleNotFoundError: No module named 'non_existing_module'
Traceback (most recent call last):
File "/Users/jeppe/.bin/pylint", line 8, in <module>
sys.exit(run_pylint())
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/pylint/__init__.py", line 23, in run_pylint
PylintRun(sys.argv[1:])
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/pylint/lint.py", line 1731, in __init__
linter.check(args)
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/pylint/lint.py", line 1006, in check
self._parallel_check(files_or_modules)
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/pylint/lint.py", line 1080, in _parallel_check
for result in self._parallel_task(files_or_modules):
File "/Users/jeppe/.virtualenvs/sublime-tools/lib/python3.8/site-packages/pylint/lint.py", line 1028, in _parallel_task
manager = multiprocessing.Manager()
File "/Users/jeppe/.pyenv/versions/3.8.1/lib/python3.8/multiprocessing/context.py", line 57, in Manager
m.start()
File "/Users/jeppe/.pyenv/versions/3.8.1/lib/python3.8/multiprocessing/managers.py", line 583, in start
self._address = reader.recv()
File "/Users/jeppe/.pyenv/versions/3.8.1/lib/python3.8/multiprocessing/connection.py", line 250, in recv
buf = self._recv_bytes()
File "/Users/jeppe/.pyenv/versions/3.8.1/lib/python3.8/multiprocessing/connection.py", line 414, in _recv_bytes
buf = self._recv(4)
File "/Users/jeppe/.pyenv/versions/3.8.1/lib/python3.8/multiprocessing/connection.py", line 383, in _recv
raise EOFError
EOFError
|
ModuleNotFoundError
|
def get_values(self, obj):
"""get label and shape for classes.
The label contains all attributes and methods
"""
label = obj.title
if obj.shape == "interface":
label = "«interface»\\n%s" % label
if not self.config.only_classnames:
label = r"%s|%s\l|" % (label, r"\l".join(obj.attrs))
for func in obj.methods:
if func.args.args:
args = [arg.name for arg in func.args.args if arg.name != "self"]
else:
args = []
label = r"%s%s(%s)\l" % (label, func.name, ", ".join(args))
label = "{%s}" % label
if is_exception(obj.node):
return dict(fontcolor="red", label=label, shape="record")
return dict(label=label, shape="record")
|
def get_values(self, obj):
"""get label and shape for classes.
The label contains all attributes and methods
"""
label = obj.title
if obj.shape == "interface":
label = "«interface»\\n%s" % label
if not self.config.only_classnames:
label = r"%s|%s\l|" % (label, r"\l".join(obj.attrs))
for func in obj.methods:
args = [arg.name for arg in func.args.args if arg.name != "self"]
label = r"%s%s(%s)\l" % (label, func.name, ", ".join(args))
label = "{%s}" % label
if is_exception(obj.node):
return dict(fontcolor="red", label=label, shape="record")
return dict(label=label, shape="record")
|
https://github.com/PyCQA/pylint/issues/3351
|
pyreverse -o png -my -f OTHER -S -A -p testuml ~/pyreverse_error.py
...
Traceback (most recent call last):
File "/home/ervthon/venv357/bin/pyreverse", line 11, in <module>
sys.exit(run_pyreverse())
File "/home/ervthon/venv357/lib/python3.5/site-packages/pylint/__init__.py", line 37, in run_pyreverse
PyreverseRun(sys.argv[1:])
File "/home/ervthon/venv357/lib/python3.5/site-packages/pylint/pyreverse/main.py", line 184, in __init__
sys.exit(self.run(args))
File "/home/ervthon/venv357/lib/python3.5/site-packages/pylint/pyreverse/main.py", line 209, in run
writer.DotWriter(self.config).write(diadefs)
File "/home/ervthon/venv357/lib/python3.5/site-packages/pylint/pyreverse/writer.py", line 38, in write
self.write_classes(diagram)
File "/home/ervthon/venv357/lib/python3.5/site-packages/pylint/pyreverse/writer.py", line 59, in write_classes
self.printer.emit_node(i, **self.get_values(obj))
File "/home/ervthon/venv357/lib/python3.5/site-packages/pylint/pyreverse/writer.py", line 134, in get_values
args = [arg.name for arg in func.args.args if arg.name != "self"]
TypeError: 'NoneType' object is not iterable
|
TypeError
|
def visit_classdef(self, node):
"""visit an astroid.Class node
* set the locals_type and instance_attrs_type mappings
* set the implements list and build it
* optionally tag the node with a unique id
"""
if hasattr(node, "locals_type"):
return
node.locals_type = collections.defaultdict(list)
if self.tag:
node.uid = self.generate_id()
# resolve ancestors
for baseobj in node.ancestors(recurs=False):
specializations = getattr(baseobj, "specializations", [])
specializations.append(node)
baseobj.specializations = specializations
# resolve instance attributes
node.instance_attrs_type = collections.defaultdict(list)
for assignattrs in node.instance_attrs.values():
for assignattr in assignattrs:
if not isinstance(assignattr, astroid.Unknown):
self.handle_assignattr_type(assignattr, node)
# resolve implemented interface
try:
node.implements = list(interfaces(node, self.inherited_interfaces))
except astroid.InferenceError:
node.implements = ()
|
def visit_classdef(self, node):
"""visit an astroid.Class node
* set the locals_type and instance_attrs_type mappings
* set the implements list and build it
* optionally tag the node with a unique id
"""
if hasattr(node, "locals_type"):
return
node.locals_type = collections.defaultdict(list)
if self.tag:
node.uid = self.generate_id()
# resolve ancestors
for baseobj in node.ancestors(recurs=False):
specializations = getattr(baseobj, "specializations", [])
specializations.append(node)
baseobj.specializations = specializations
# resolve instance attributes
node.instance_attrs_type = collections.defaultdict(list)
for assignattrs in node.instance_attrs.values():
for assignattr in assignattrs:
self.handle_assignattr_type(assignattr, node)
# resolve implemented interface
try:
node.implements = list(interfaces(node, self.inherited_interfaces))
except astroid.InferenceError:
node.implements = ()
|
https://github.com/PyCQA/pylint/issues/3256
|
parsing tests/test_simple.py...
Traceback (most recent call last):
File "/home/enkidulan/.cache/pypoetry/virtualenvs/pylintdev-py3.7/bin/pyreverse", line 7, in <module>
exec(compile(f.read(), __file__, 'exec'))
File "/home/enkidulan/projects/pylint/pylint/bin/pyreverse", line 4, in <module>
run_pyreverse()
File "/home/enkidulan/projects/pylint/pylint/pylint/__init__.py", line 37, in run_pyreverse
PyreverseRun(sys.argv[1:])
File "/home/enkidulan/projects/pylint/pylint/pylint/pyreverse/main.py", line 184, in __init__
sys.exit(self.run(args))
File "/home/enkidulan/projects/pylint/pylint/pylint/pyreverse/main.py", line 202, in run
diadefs = handler.get_diadefs(project, linker)
File "/home/enkidulan/projects/pylint/pylint/pylint/pyreverse/diadefslib.py", line 235, in get_diadefs
diagrams = DefaultDiadefGenerator(linker, self).visit(project)
File "/home/enkidulan/projects/pylint/pylint/pylint/pyreverse/utils.py", line 217, in visit
self.visit(local_node)
File "/home/enkidulan/projects/pylint/pylint/pylint/pyreverse/utils.py", line 217, in visit
self.visit(local_node)
File "/home/enkidulan/projects/pylint/pylint/pylint/pyreverse/utils.py", line 214, in visit
methods[0](node)
File "/home/enkidulan/projects/pylint/pylint/pylint/pyreverse/diadefslib.py", line 169, in visit_classdef
self.extract_classes(node, anc_level, association_level)
File "/home/enkidulan/projects/pylint/pylint/pylint/pyreverse/diadefslib.py", line 113, in extract_classes
self.add_class(klass_node)
File "/home/enkidulan/projects/pylint/pylint/pylint/pyreverse/diadefslib.py", line 83, in add_class
self.linker.visit(node)
File "/home/enkidulan/projects/pylint/pylint/pylint/pyreverse/utils.py", line 214, in visit
methods[0](node)
File "/home/enkidulan/projects/pylint/pylint/pylint/pyreverse/inspector.py", line 166, in visit_classdef
self.handle_assignattr_type(assignattr, node)
File "/home/enkidulan/projects/pylint/pylint/pylint/pyreverse/inspector.py", line 232, in handle_assignattr_type
current = set(parent.instance_attrs_type[node.attrname])
AttributeError: 'Unknown' object has no attribute 'attrname'
|
AttributeError
|
def _load_reporter(self):
name = self._reporter_name.lower()
if name in self._reporters:
self.set_reporter(self._reporters[name]())
else:
try:
reporter_class = self._load_reporter_class()
except (ImportError, AttributeError):
raise exceptions.InvalidReporterError(name)
else:
self.set_reporter(reporter_class())
|
def _load_reporter(self):
name = self._reporter_name.lower()
if name in self._reporters:
self.set_reporter(self._reporters[name]())
else:
qname = self._reporter_name
module = modutils.load_module_from_name(modutils.get_module_part(qname))
class_name = qname.split(".")[-1]
reporter_class = getattr(module, class_name)
self.set_reporter(reporter_class())
|
https://github.com/PyCQA/pylint/issues/1388
|
ubuntu@ip-0000:~/scripts/pooja/ext-code/pylint$ pip list | grep astroid
DEPRECATION: The default format will switch to columns in the future. You can use --format=(legacy|columns) (or define a format=(legacy|columns) in your pip.conf under the [list] section) to disable this warning.
astroid (1.5.0)
ubuntu@ip-0000:~/scripts/pooja/ext-code/pylint$ pip list | grep pylint
DEPRECATION: The default format will switch to columns in the future. You can use --format=(legacy|columns) (or define a format=(legacy|columns) in your pip.conf under the [list] section) to disable this warning.
pylint (1.7.0, /usr/local/lib/python2.7/dist-packages/pylint-1.7.0-py2.7.egg)
ubuntu@ip-00000:~/scripts/pooja/ext-code/pylint$ cd /var/lib/jenkins/jobs/CI_SyntaxValidator_commons/workspace
(reverse-i-search)`inst': sudo python setup.py ^Cstall
ubuntu@ip-00000:/var/lib/jenkins/jobs/CI_SyntaxValidator_commons/workspace$ ^C
ubuntu@ip-0000:/var/lib/jenkins/jobs/CI_SyntaxValidator_commons/workspace$ pylint commons/
Traceback (most recent call last):
File "/usr/local/bin/pylint", line 11, in <module>
load_entry_point('pylint==1.7.0', 'console_scripts', 'pylint')()
File "/usr/local/lib/python2.7/dist-packages/pylint-1.7.0-py2.7.egg/pylint/__init__.py", line 13, in run_pylint
Run(sys.argv[1:])
File "/usr/local/lib/python2.7/dist-packages/pylint-1.7.0-py2.7.egg/pylint/lint.py", line 1220, in __init__
linter.load_default_plugins()
File "/usr/local/lib/python2.7/dist-packages/pylint-1.7.0-py2.7.egg/pylint/lint.py", line 453, in load_default_plugins
checkers.initialize(self)
File "/usr/local/lib/python2.7/dist-packages/pylint-1.7.0-py2.7.egg/pylint/checkers/__init__.py", line 114, in initialize
register_plugins(linter, __path__[0])
File "/usr/local/lib/python2.7/dist-packages/pylint-1.7.0-py2.7.egg/pylint/utils.py", line 992, in register_plugins
module = modutils.load_module_from_file(join(directory, filename))
File "/usr/local/lib/python2.7/dist-packages/astroid-1.5.0-py2.7.egg/astroid/modutils.py", line 272, in load_module_from_file
return load_module_from_modpath(modpath, path, use_sys)
File "/usr/local/lib/python2.7/dist-packages/astroid-1.5.0-py2.7.egg/astroid/modutils.py", line 233, in load_module_from_modpath
module = imp.load_module(curname, mp_file, mp_filename, mp_desc)
File "/usr/local/lib/python2.7/dist-packages/pylint-1.7.0-py2.7.egg/pylint/checkers/python3.py", line 100, in <module>
class Python3Checker(checkers.BaseChecker):
File "/usr/local/lib/python2.7/dist-packages/pylint-1.7.0-py2.7.egg/pylint/checkers/python3.py", line 501, in Python3Checker
'sys.version_info < (3, 0)',
File "/usr/local/lib/python2.7/dist-packages/astroid-1.5.0-py2.7.egg/astroid/node_classes.py", line 624, in repr_tree
_repr_tree(self, result, set())
File "build/bdist.linux-x86_64/egg/singledispatch.py", line 210, in wrapper
File "/usr/local/lib/python2.7/dist-packages/astroid-1.5.0-py2.7.egg/astroid/node_classes.py", line 613, in _repr_node
depth)
File "build/bdist.linux-x86_64/egg/singledispatch.py", line 210, in wrapper
File "/usr/local/lib/python2.7/dist-packages/astroid-1.5.0-py2.7.egg/astroid/node_classes.py", line 613, in _repr_node
depth)
File "build/bdist.linux-x86_64/egg/singledispatch.py", line 210, in wrapper
File "build/bdist.linux-x86_64/egg/singledispatch.py", line 191, in dispatch
File "build/bdist.linux-x86_64/egg/singledispatch.py", line 142, in _find_impl
File "build/bdist.linux-x86_64/egg/singledispatch.py", line 130, in _compose_mro
File "build/bdist.linux-x86_64/egg/singledispatch.py", line 84, in _c3_mro
File "build/bdist.linux-x86_64/egg/singledispatch.py", line 88, in _c3_mro
File "build/bdist.linux-x86_64/egg/singledispatch.py", line 39, in _c3_merge
RuntimeError: Inconsistent hierarchy
|
RuntimeError
|
def _parallel_task(self, files_or_modules):
# Prepare configuration for child linters.
child_config = self._get_jobs_config()
children = []
manager = multiprocessing.Manager()
tasks_queue = manager.Queue()
results_queue = manager.Queue()
# Send files to child linters.
expanded_files = []
for descr in self.expand_files(files_or_modules):
modname, filepath, is_arg = descr["name"], descr["path"], descr["isarg"]
if self.should_analyze_file(modname, filepath, is_argument=is_arg):
expanded_files.append(descr)
# do not start more jobs than needed
for _ in range(min(self.config.jobs, len(expanded_files))):
child_linter = ChildLinter(args=(tasks_queue, results_queue, child_config))
child_linter.start()
children.append(child_linter)
for files_or_module in expanded_files:
path = files_or_module["path"]
tasks_queue.put([path])
# collect results from child linters
failed = False
for _ in expanded_files:
try:
result = results_queue.get()
except Exception as ex:
print(
"internal error while receiving results from child linter",
file=sys.stderr,
)
print(ex, file=sys.stderr)
failed = True
break
yield result
# Stop child linters and wait for their completion.
for _ in range(self.config.jobs):
tasks_queue.put("STOP")
for child in children:
child.join()
if failed:
print("Error occurred, stopping the linter.", file=sys.stderr)
sys.exit(32)
|
def _parallel_task(self, files_or_modules):
# Prepare configuration for child linters.
child_config = self._get_jobs_config()
children = []
manager = multiprocessing.Manager()
tasks_queue = manager.Queue()
results_queue = manager.Queue()
# Send files to child linters.
expanded_files = self.expand_files(files_or_modules)
# do not start more jobs than needed
for _ in range(min(self.config.jobs, len(expanded_files))):
child_linter = ChildLinter(args=(tasks_queue, results_queue, child_config))
child_linter.start()
children.append(child_linter)
for files_or_module in expanded_files:
path = files_or_module["path"]
tasks_queue.put([path])
# collect results from child linters
failed = False
for _ in expanded_files:
try:
result = results_queue.get()
except Exception as ex:
print(
"internal error while receiving results from child linter",
file=sys.stderr,
)
print(ex, file=sys.stderr)
failed = True
break
yield result
# Stop child linters and wait for their completion.
for _ in range(self.config.jobs):
tasks_queue.put("STOP")
for child in children:
child.join()
if failed:
print("Error occurred, stopping the linter.", file=sys.stderr)
sys.exit(32)
|
https://github.com/PyCQA/pylint/issues/1885
|
~/devel/openage % pylint --jobs=2 openage
No config file found, using default configuration
************* Module openage.__init__
W: 20, 0: TODO pylint: disable=wrong-import-position (fixme)
************* Module openage.__main__
W: 11, 0: TODO remove this once all multiprocessing has been eliminated: (fixme)
************* Module openage.default_dirs
W: 14, 0: TODO: use os.pathsep for multipath variables (fixme)
W: 42, 0: TODO: other windows paths (fixme)
Process ChildLinter-3:
Traceback (most recent call last):
File "/usr/lib64/python3.6/tokenize.py", line 390, in find_cookie
line_string = line.decode('utf-8')
UnicodeDecodeError: 'utf-8' codec can't decode byte 0xf8 in position 40: invalid start byte
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/lib64/python3.6/site-packages/astroid/builder.py", line 129, in file_build
stream, encoding, data = open_source_file(path)
File "/usr/lib64/python3.6/site-packages/astroid/builder.py", line 49, in open_source_file
encoding = detect_encoding(byte_stream.readline)[0]
File "/usr/lib64/python3.6/tokenize.py", line 431, in detect_encoding
encoding = find_cookie(first)
File "/usr/lib64/python3.6/tokenize.py", line 395, in find_cookie
raise SyntaxError(msg)
File "<string>", line None
SyntaxError: invalid or missing encoding declaration for 'openage/cabextract/lzxd.cpython-35m-x86_64-linux-gnu.so'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/lib64/python3.6/site-packages/pylint/lint.py", line 896, in get_ast
return MANAGER.ast_from_file(filepath, modname, source=True)
File "/usr/lib64/python3.6/site-packages/astroid/manager.py", line 80, in ast_from_file
return AstroidBuilder(self).file_build(filepath, modname)
File "/usr/lib64/python3.6/site-packages/astroid/builder.py", line 137, in file_build
'{error}', modname=modname, path=path, error=exc))
File "/usr/lib64/python3.6/site-packages/astroid/util.py", line 30, in reraise
six.reraise(type(exception), exception, sys.exc_info()[2])
File "/usr/lib64/python3.6/site-packages/six.py", line 692, in reraise
raise value.with_traceback(tb)
File "/usr/lib64/python3.6/site-packages/astroid/builder.py", line 129, in file_build
stream, encoding, data = open_source_file(path)
File "/usr/lib64/python3.6/site-packages/astroid/builder.py", line 49, in open_source_file
encoding = detect_encoding(byte_stream.readline)[0]
File "/usr/lib64/python3.6/tokenize.py", line 431, in detect_encoding
encoding = find_cookie(first)
File "/usr/lib64/python3.6/tokenize.py", line 395, in find_cookie
raise SyntaxError(msg)
astroid.exceptions.AstroidSyntaxError: Python 3 encoding specification error or unknown encoding:
invalid or missing encoding declaration for 'openage/cabextract/lzxd.cpython-35m-x86_64-linux-gnu.so'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/lib64/python3.6/multiprocessing/process.py", line 258, in _bootstrap
self.run()
File "/usr/lib64/python3.6/site-packages/pylint/lint.py", line 208, in run
result = self._run_linter(file_or_module[0])
File "/usr/lib64/python3.6/site-packages/pylint/lint.py", line 237, in _run_linter
linter.check(file_or_module)
File "/usr/lib64/python3.6/site-packages/pylint/lint.py", line 726, in check
self._do_check(files_or_modules)
File "/usr/lib64/python3.6/site-packages/pylint/lint.py", line 844, in _do_check
ast_node = self.get_ast(filepath, modname)
File "/usr/lib64/python3.6/site-packages/pylint/lint.py", line 900, in get_ast
args=str(ex.error))
File "/usr/lib64/python3.6/site-packages/pylint/utils.py", line 359, in add_message
'Message %s must provide line, got None' % msgid)
pylint.exceptions.InvalidMessageError: Message E0001 must provide line, got None
Process ChildLinter-2:
Traceback (most recent call last):
File "/usr/lib64/python3.6/tokenize.py", line 390, in find_cookie
line_string = line.decode('utf-8')
UnicodeDecodeError: 'utf-8' codec can't decode byte 0xe0 in position 24: invalid continuation byte
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/lib64/python3.6/site-packages/astroid/builder.py", line 129, in file_build
stream, encoding, data = open_source_file(path)
File "/usr/lib64/python3.6/site-packages/astroid/builder.py", line 49, in open_source_file
encoding = detect_encoding(byte_stream.readline)[0]
File "/usr/lib64/python3.6/tokenize.py", line 431, in detect_encoding
encoding = find_cookie(first)
File "/usr/lib64/python3.6/tokenize.py", line 395, in find_cookie
raise SyntaxError(msg)
File "<string>", line None
SyntaxError: invalid or missing encoding declaration for 'openage/cabextract/cabchecksum.cpython-35m-x86_64-linux-gnu.so'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/lib64/python3.6/site-packages/pylint/lint.py", line 896, in get_ast
return MANAGER.ast_from_file(filepath, modname, source=True)
File "/usr/lib64/python3.6/site-packages/astroid/manager.py", line 80, in ast_from_file
return AstroidBuilder(self).file_build(filepath, modname)
File "/usr/lib64/python3.6/site-packages/astroid/builder.py", line 137, in file_build
'{error}', modname=modname, path=path, error=exc))
File "/usr/lib64/python3.6/site-packages/astroid/util.py", line 30, in reraise
six.reraise(type(exception), exception, sys.exc_info()[2])
File "/usr/lib64/python3.6/site-packages/six.py", line 692, in reraise
raise value.with_traceback(tb)
File "/usr/lib64/python3.6/site-packages/astroid/builder.py", line 129, in file_build
stream, encoding, data = open_source_file(path)
File "/usr/lib64/python3.6/site-packages/astroid/builder.py", line 49, in open_source_file
encoding = detect_encoding(byte_stream.readline)[0]
File "/usr/lib64/python3.6/tokenize.py", line 431, in detect_encoding
encoding = find_cookie(first)
File "/usr/lib64/python3.6/tokenize.py", line 395, in find_cookie
raise SyntaxError(msg)
astroid.exceptions.AstroidSyntaxError: Python 3 encoding specification error or unknown encoding:
invalid or missing encoding declaration for 'openage/cabextract/cabchecksum.cpython-35m-x86_64-linux-gnu.so'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/lib64/python3.6/multiprocessing/process.py", line 258, in _bootstrap
self.run()
File "/usr/lib64/python3.6/site-packages/pylint/lint.py", line 208, in run
result = self._run_linter(file_or_module[0])
File "/usr/lib64/python3.6/site-packages/pylint/lint.py", line 237, in _run_linter
linter.check(file_or_module)
File "/usr/lib64/python3.6/site-packages/pylint/lint.py", line 726, in check
self._do_check(files_or_modules)
File "/usr/lib64/python3.6/site-packages/pylint/lint.py", line 844, in _do_check
ast_node = self.get_ast(filepath, modname)
File "/usr/lib64/python3.6/site-packages/pylint/lint.py", line 900, in get_ast
args=str(ex.error))
File "/usr/lib64/python3.6/site-packages/pylint/utils.py", line 359, in add_message
'Message %s must provide line, got None' % msgid)
pylint.exceptions.InvalidMessageError: Message E0001 must provide line, got None
^CTraceback (most recent call last):
File "/usr/lib/python-exec/python3.6/pylint", line 11, in <module>
load_entry_point('pylint==1.7.2', 'console_scripts', 'pylint')()
File "/usr/lib64/python3.6/site-packages/pylint/__init__.py", line 13, in run_pylint
Run(sys.argv[1:])
File "/usr/lib64/python3.6/site-packages/pylint/lint.py", line 1300, in __init__
linter.check(args)
File "/usr/lib64/python3.6/site-packages/pylint/lint.py", line 729, in check
self._parallel_check(files_or_modules)
File "/usr/lib64/python3.6/site-packages/pylint/lint.py", line 797, in _parallel_check
for result in self._parallel_task(files_or_modules):
File "/usr/lib64/python3.6/site-packages/pylint/lint.py", line 772, in _parallel_task
result = results_queue.get()
File "<string>", line 2, in get
File "/usr/lib64/python3.6/multiprocessing/managers.py", line 757, in _callmethod
kind, result = conn.recv()
File "/usr/lib64/python3.6/multiprocessing/connection.py", line 250, in recv
buf = self._recv_bytes()
File "/usr/lib64/python3.6/multiprocessing/connection.py", line 407, in _recv_bytes
buf = self._recv(4)
File "/usr/lib64/python3.6/multiprocessing/connection.py", line 379, in _recv
chunk = read(handle, remaining)
KeyboardInterrupt
|
UnicodeDecodeError
|
def _check_consider_get(self, node):
def type_and_name_are_equal(node_a, node_b):
for _type in [astroid.Name, astroid.AssignName]:
if all(isinstance(_node, _type) for _node in [node_a, node_b]):
return node_a.name == node_b.name
if all(isinstance(_node, astroid.Const) for _node in [node_a, node_b]):
return node_a.value == node_b.value
return False
if_block_ok = (
isinstance(node.test, astroid.Compare)
and len(node.body) == 1
and isinstance(node.body[0], astroid.Assign)
and isinstance(node.body[0].value, astroid.Subscript)
and type_and_name_are_equal(node.body[0].value.value, node.test.ops[0][1])
and isinstance(node.body[0].value.slice, astroid.Index)
and type_and_name_are_equal(node.body[0].value.slice.value, node.test.left)
and len(node.body[0].targets) == 1
and isinstance(node.body[0].targets[0], astroid.AssignName)
and isinstance(utils.safe_infer(node.test.ops[0][1]), astroid.Dict)
)
if if_block_ok and not node.orelse:
self.add_message("consider-using-get", node=node)
elif (
if_block_ok
and len(node.orelse) == 1
and isinstance(node.orelse[0], astroid.Assign)
and type_and_name_are_equal(node.orelse[0].targets[0], node.body[0].targets[0])
and len(node.orelse[0].targets) == 1
):
self.add_message("consider-using-get", node=node)
|
def _check_consider_get(self, node):
def type_and_name_are_equal(node_a, node_b):
for _type in [astroid.Name, astroid.AssignName]:
if all(isinstance(_node, _type) for _node in [node_a, node_b]):
return node_a.name == node_b.name
if all(isinstance(_node, astroid.Const) for _node in [node_a, node_b]):
return node_a.value == node_b.value
return False
if_block_ok = (
isinstance(node.test, astroid.Compare)
and len(node.body) == 1
and isinstance(node.body[0], astroid.Assign)
and isinstance(node.body[0].value, astroid.Subscript)
and type_and_name_are_equal(node.body[0].value.value, node.test.ops[0][1])
and type_and_name_are_equal(node.body[0].value.slice.value, node.test.left)
and len(node.body[0].targets) == 1
and isinstance(node.body[0].targets[0], astroid.AssignName)
and isinstance(utils.safe_infer(node.test.ops[0][1]), astroid.Dict)
)
if if_block_ok and not node.orelse:
self.add_message("consider-using-get", node=node)
elif (
if_block_ok
and len(node.orelse) == 1
and isinstance(node.orelse[0], astroid.Assign)
and type_and_name_are_equal(node.orelse[0].targets[0], node.body[0].targets[0])
and len(node.orelse[0].targets) == 1
):
self.add_message("consider-using-get", node=node)
|
https://github.com/PyCQA/pylint/issues/2252
|
Traceback (most recent call last):
File "/usr/lib64/python3.6/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/usr/lib64/python3.6/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/home/user/pylint/pylint/__main__.py", line 7, in <module>
pylint.run_pylint()
File "/home/user/pylint/pylint/__init__.py", line 18, in run_pylint
Run(sys.argv[1:])
File "/home/user/pylint/pylint/lint.py", line 1372, in __init__
linter.check(args)
File "/home/user/pylint/pylint/lint.py", line 784, in check
self._do_check(files_or_modules)
File "/home/user/pylint/pylint/lint.py", line 917, in _do_check
self.check_astroid_module(ast_node, walker, rawcheckers, tokencheckers)
File "/home/user/pylint/pylint/lint.py", line 997, in check_astroid_module
walker.walk(ast_node)
File "/home/user/pylint/pylint/utils.py", line 1152, in walk
self.walk(child)
File "/home/user/pylint/pylint/utils.py", line 1149, in walk
cb(astroid)
File "/home/user/pylint/pylint/checkers/refactoring.py", line 410, in visit_if
self._check_consider_get(node)
File "/home/user/pylint/pylint/checkers/refactoring.py", line 392, in _check_consider_get
and type_and_name_are_equal(node.body[0].value.slice.value, node.test.left)
AttributeError: 'Slice' object has no attribute 'value'
|
AttributeError
|
def _is_node_return_ended(node):
"""Check if the node ends with an explicit return statement.
Args:
node (astroid.NodeNG): node to be checked.
Returns:
bool: True if the node ends with an explicit statement, False otherwise.
"""
# Recursion base case
if isinstance(node, astroid.Return):
return True
if isinstance(node, astroid.Raise):
# a Raise statement doesn't need to end with a return statement
# but if the exception raised is handled, then the handler has to
# ends with a return statement
if not node.exc:
# Ignore bare raises
return True
exc = utils.safe_infer(node.exc)
if exc is None or exc is astroid.Uninferable:
return False
exc_name = exc.pytype().split(".")[-1]
handlers = utils.get_exception_handlers(node, exc_name)
if handlers:
# among all the handlers handling the exception at least one
# must end with a return statement
return any(_is_node_return_ended(_handler) for _handler in handlers)
# if no handlers handle the exception then it's ok
return True
if isinstance(node, astroid.If):
# if statement is returning if there are exactly two return statements in its
# children : one for the body part, the other for the orelse part
return_stmts = [_is_node_return_ended(_child) for _child in node.get_children()]
return sum(return_stmts) == 2
# recurses on the children of the node except for those which are except handler
# because one cannot be sure that the handler will really be used
return any(
_is_node_return_ended(_child)
for _child in node.get_children()
if not isinstance(_child, astroid.ExceptHandler)
)
|
def _is_node_return_ended(node):
"""Check if the node ends with an explicit return statement.
Args:
node (astroid.NodeNG): node to be checked.
Returns:
bool: True if the node ends with an explicit statement, False otherwise.
"""
# Recursion base case
if isinstance(node, astroid.Return):
return True
if isinstance(node, astroid.Raise):
# a Raise statement doesn't need to end with a return statement
# but if the exception raised is handled, then the handler has to
# ends with a return statement
exc = utils.safe_infer(node.exc)
if exc is None or exc is astroid.Uninferable:
return False
exc_name = exc.pytype().split(".")[-1]
handlers = utils.get_exception_handlers(node, exc_name)
if handlers:
# among all the handlers handling the exception at least one
# must end with a return statement
return any(_is_node_return_ended(_handler) for _handler in handlers)
# if no handlers handle the exception then it's ok
return True
if isinstance(node, astroid.If):
# if statement is returning if there are exactly two return statements in its
# children : one for the body part, the other for the orelse part
return_stmts = [_is_node_return_ended(_child) for _child in node.get_children()]
return sum(return_stmts) == 2
# recurses on the children of the node except for those which are except handler
# because one cannot be sure that the handler will really be used
return any(
_is_node_return_ended(_child)
for _child in node.get_children()
if not isinstance(_child, astroid.ExceptHandler)
)
|
https://github.com/PyCQA/pylint/issues/1773
|
$ pylint pylint_test.py
No config file found, using default configuration
************* Module pylint_test
C: 1, 0: Missing module docstring (missing-docstring)
C: 1, 0: Missing class docstring (missing-docstring)
C: 4, 0: Missing function docstring (missing-docstring)
W: 6, 8: Using a conditional statement with a constant value (using-constant-test)
Traceback (most recent call last):
File "/usr/local/bin/pylint", line 11, in <module>
sys.exit(run_pylint())
File "/usr/local/lib/python3.6/site-packages/pylint/__init__.py", line 16, in run_pylint
Run(sys.argv[1:])
File "/usr/local/lib/python3.6/site-packages/pylint/lint.py", line 1347, in __init__
linter.check(args)
File "/usr/local/lib/python3.6/site-packages/pylint/lint.py", line 768, in check
self._do_check(files_or_modules)
File "/usr/local/lib/python3.6/site-packages/pylint/lint.py", line 901, in _do_check
self.check_astroid_module(ast_node, walker, rawcheckers, tokencheckers)
File "/usr/local/lib/python3.6/site-packages/pylint/lint.py", line 980, in check_astroid_module
walker.walk(ast_node)
File "/usr/local/lib/python3.6/site-packages/pylint/utils.py", line 1014, in walk
self.walk(child)
File "/usr/local/lib/python3.6/site-packages/pylint/utils.py", line 1016, in walk
cb(astroid)
File "/usr/local/lib/python3.6/site-packages/pylint/checkers/refactoring.py", line 357, in leave_functiondef
self._check_consistent_returns(node)
File "/usr/local/lib/python3.6/site-packages/pylint/checkers/refactoring.py", line 560, in _check_consistent_returns
and _is_node_return_ended(node)):
File "/usr/local/lib/python3.6/site-packages/pylint/checkers/refactoring.py", line 73, in _is_node_return_ended
return any(_is_node_return_ended(_child) for _child in node.get_children()
File "/usr/local/lib/python3.6/site-packages/pylint/checkers/refactoring.py", line 74, in <genexpr>
if not isinstance(_child, astroid.ExceptHandler))
File "/usr/local/lib/python3.6/site-packages/pylint/checkers/refactoring.py", line 73, in _is_node_return_ended
return any(_is_node_return_ended(_child) for _child in node.get_children()
File "/usr/local/lib/python3.6/site-packages/pylint/checkers/refactoring.py", line 74, in <genexpr>
if not isinstance(_child, astroid.ExceptHandler))
File "/usr/local/lib/python3.6/site-packages/pylint/checkers/refactoring.py", line 69, in _is_node_return_ended
return_stmts = [_is_node_return_ended(_child) for _child in node.get_children()]
File "/usr/local/lib/python3.6/site-packages/pylint/checkers/refactoring.py", line 69, in <listcomp>
return_stmts = [_is_node_return_ended(_child) for _child in node.get_children()]
File "/usr/local/lib/python3.6/site-packages/pylint/checkers/refactoring.py", line 63, in _is_node_return_ended
return any(_is_node_return_ended(_handler) for _handler in handlers)
File "/usr/local/lib/python3.6/site-packages/pylint/checkers/refactoring.py", line 63, in <genexpr>
return any(_is_node_return_ended(_handler) for _handler in handlers)
File "/usr/local/lib/python3.6/site-packages/pylint/checkers/refactoring.py", line 73, in _is_node_return_ended
return any(_is_node_return_ended(_child) for _child in node.get_children()
File "/usr/local/lib/python3.6/site-packages/pylint/checkers/refactoring.py", line 74, in <genexpr>
if not isinstance(_child, astroid.ExceptHandler))
File "/usr/local/lib/python3.6/site-packages/pylint/checkers/refactoring.py", line 55, in _is_node_return_ended
exc = utils.safe_infer(node.exc)
File "/usr/local/lib/python3.6/site-packages/pylint/checkers/utils.py", line 806, in safe_infer
inferit = node.infer(context=context)
AttributeError: 'NoneType' object has no attribute 'infer'
|
AttributeError
|
def _check_redefinition(self, redeftype, node):
"""check for redefinition of a function / method / class name"""
defined_self = node.parent.frame()[node.name]
if defined_self is not node and not astroid.are_exclusive(node, defined_self):
dummy_variables_rgx = lint_utils.get_global_option(
self, "dummy-variables-rgx", default=None
)
if dummy_variables_rgx and dummy_variables_rgx.match(node.name):
return
self.add_message(
"function-redefined", node=node, args=(redeftype, defined_self.fromlineno)
)
|
def _check_redefinition(self, redeftype, node):
"""check for redefinition of a function / method / class name"""
defined_self = node.parent.frame()[node.name]
if defined_self is not node and not astroid.are_exclusive(node, defined_self):
dummy_variables_rgx = lint_utils.get_global_option(
self, "dummy-variables-rgx", default=None
)
if dummy_variables_rgx and dummy_variables_rgx.match(defined_self.name):
return
self.add_message(
"function-redefined", node=node, args=(redeftype, defined_self.fromlineno)
)
|
https://github.com/PyCQA/pylint/issues/1774
|
Traceback (most recent call last):
File "/Users/jacques/miniconda/bin/pylint", line 11, in <module>
sys.exit(run_pylint())
File "/Users/jacques/miniconda/lib/python3.6/site-packages/pylint/__init__.py", line 16, in run_pylint
Run(sys.argv[1:])
File "/Users/jacques/miniconda/lib/python3.6/site-packages/pylint/lint.py", line 1347, in __init__
linter.check(args)
File "/Users/jacques/miniconda/lib/python3.6/site-packages/pylint/lint.py", line 768, in check
self._do_check(files_or_modules)
File "/Users/jacques/miniconda/lib/python3.6/site-packages/pylint/lint.py", line 901, in _do_check
self.check_astroid_module(ast_node, walker, rawcheckers, tokencheckers)
File "/Users/jacques/miniconda/lib/python3.6/site-packages/pylint/lint.py", line 980, in check_astroid_module
walker.walk(ast_node)
File "/Users/jacques/miniconda/lib/python3.6/site-packages/pylint/utils.py", line 1014, in walk
self.walk(child)
File "/Users/jacques/miniconda/lib/python3.6/site-packages/pylint/utils.py", line 1011, in walk
cb(astroid)
File "/Users/jacques/miniconda/lib/python3.6/site-packages/pylint/checkers/base.py", line 505, in visit_functiondef
self._check_redefinition(node.is_method() and 'method' or 'function', node)
File "/Users/jacques/miniconda/lib/python3.6/site-packages/pylint/checkers/base.py", line 717, in _check_redefinition
if dummy_variables_rgx and dummy_variables_rgx.match(defined_self.name):
AttributeError: 'ImportFrom' object has no attribute 'name'
|
AttributeError
|
def _check_stop_iteration_inside_generator(self, node):
"""Check if an exception of type StopIteration is raised inside a generator"""
frame = node.frame()
if not isinstance(frame, astroid.FunctionDef) or not frame.is_generator():
return
if utils.node_ignores_exception(node, StopIteration):
return
if not node.exc:
return
exc = utils.safe_infer(node.exc)
if exc is None or exc is astroid.Uninferable:
return
if self._check_exception_inherit_from_stopiteration(exc):
self.add_message("stop-iteration-return", node=node)
|
def _check_stop_iteration_inside_generator(self, node):
"""Check if an exception of type StopIteration is raised inside a generator"""
frame = node.frame()
if not isinstance(frame, astroid.FunctionDef) or not frame.is_generator():
return
if utils.node_ignores_exception(node, StopIteration):
return
if not node.exc:
return
exc = utils.safe_infer(node.exc)
if exc is not None and self._check_exception_inherit_from_stopiteration(exc):
self.add_message("stop-iteration-return", node=node)
|
https://github.com/PyCQA/pylint/issues/1779
|
# pylint test.py
No config file found, using default configuration
Traceback (most recent call last):
File "/usr/local/bin/pylint", line 11, in <module>
sys.exit(run_pylint())
File "/usr/local/lib/python3.6/dist-packages/pylint/__init__.py", line 16, in run_pylint
Run(sys.argv[1:])
File "/usr/local/lib/python3.6/dist-packages/pylint/lint.py", line 1347, in __init__
linter.check(args)
File "/usr/local/lib/python3.6/dist-packages/pylint/lint.py", line 768, in check
self._do_check(files_or_modules)
File "/usr/local/lib/python3.6/dist-packages/pylint/lint.py", line 901, in _do_check
self.check_astroid_module(ast_node, walker, rawcheckers, tokencheckers)
File "/usr/local/lib/python3.6/dist-packages/pylint/lint.py", line 980, in check_astroid_module
walker.walk(ast_node)
File "/usr/local/lib/python3.6/dist-packages/pylint/utils.py", line 1014, in walk
self.walk(child)
File "/usr/local/lib/python3.6/dist-packages/pylint/utils.py", line 1014, in walk
self.walk(child)
File "/usr/local/lib/python3.6/dist-packages/pylint/utils.py", line 1011, in walk
cb(astroid)
File "/usr/local/lib/python3.6/dist-packages/pylint/checkers/refactoring.py", line 362, in visit_raise
self._check_stop_iteration_inside_generator(node)
File "/usr/local/lib/python3.6/dist-packages/pylint/checkers/refactoring.py", line 374, in _check_stop_iteration_inside_generator
if exc is not None and self._check_exception_inherit_from_stopiteration(exc):
File "/usr/local/lib/python3.6/dist-packages/pylint/checkers/refactoring.py", line 381, in _check_exception_inherit_from_stopiteration
return any(_class.qname() == stopiteration_qname for _class in exc.mro())
TypeError: 'Uninferable' object is not iterable
|
TypeError
|
def __init__(self, linter=None):
BaseChecker.__init__(self, linter)
self._accessed = ScopeAccessMap()
self._first_attrs = []
self._meth_could_be_func = None
|
def __init__(self, linter=None):
BaseChecker.__init__(self, linter)
self._accessed = []
self._first_attrs = []
self._meth_could_be_func = None
|
https://github.com/PyCQA/pylint/issues/1126
|
No config file found, using default configuration
************* Module pylint_bug
C: 1, 0: Missing module docstring (missing-docstring)
C: 1, 0: Missing class docstring (missing-docstring)
C: 2,10: Missing method docstring (missing-docstring)
R: 1, 0: Too few public methods (1/2) (too-few-public-methods)
C: 6, 0: Missing function docstring (missing-docstring)
Traceback (most recent call last):
File "/tmp/test_pylint/bin/pylint", line 11, in <module>
sys.exit(run_pylint())
File "/private/tmp/test_pylint/lib/python3.5/site-packages/pylint/__init__.py", line 13, in run_pylint
Run(sys.argv[1:])
File "/private/tmp/test_pylint/lib/python3.5/site-packages/pylint/lint.py", line 1310, in __init__
linter.check(args)
File "/private/tmp/test_pylint/lib/python3.5/site-packages/pylint/lint.py", line 732, in check
self._do_check(files_or_modules)
File "/private/tmp/test_pylint/lib/python3.5/site-packages/pylint/lint.py", line 863, in _do_check
self.check_astroid_module(ast_node, walker, rawcheckers, tokencheckers)
File "/private/tmp/test_pylint/lib/python3.5/site-packages/pylint/lint.py", line 944, in check_astroid_module
walker.walk(ast_node)
File "/private/tmp/test_pylint/lib/python3.5/site-packages/pylint/utils.py", line 944, in walk
self.walk(child)
File "/private/tmp/test_pylint/lib/python3.5/site-packages/pylint/utils.py", line 944, in walk
self.walk(child)
File "/private/tmp/test_pylint/lib/python3.5/site-packages/pylint/utils.py", line 944, in walk
self.walk(child)
File "/private/tmp/test_pylint/lib/python3.5/site-packages/pylint/utils.py", line 941, in walk
cb(astroid)
File "/private/tmp/test_pylint/lib/python3.5/site-packages/pylint/checkers/classes.py", line 578, in visit_attribute
self._accessed[-1][attrname].append(node)
IndexError: list index out of range
|
IndexError
|
def visit_classdef(self, node):
"""init visit variable _accessed"""
self._check_bases_classes(node)
# if not an exception or a metaclass
if node.type == "class" and has_known_bases(node):
try:
node.local_attr("__init__")
except astroid.NotFoundError:
self.add_message("no-init", args=node, node=node)
self._check_slots(node)
self._check_proper_bases(node)
self._check_consistent_mro(node)
|
def visit_classdef(self, node):
"""init visit variable _accessed"""
self._accessed.append(defaultdict(list))
self._check_bases_classes(node)
# if not an exception or a metaclass
if node.type == "class" and has_known_bases(node):
try:
node.local_attr("__init__")
except astroid.NotFoundError:
self.add_message("no-init", args=node, node=node)
self._check_slots(node)
self._check_proper_bases(node)
self._check_consistent_mro(node)
|
https://github.com/PyCQA/pylint/issues/1126
|
No config file found, using default configuration
************* Module pylint_bug
C: 1, 0: Missing module docstring (missing-docstring)
C: 1, 0: Missing class docstring (missing-docstring)
C: 2,10: Missing method docstring (missing-docstring)
R: 1, 0: Too few public methods (1/2) (too-few-public-methods)
C: 6, 0: Missing function docstring (missing-docstring)
Traceback (most recent call last):
File "/tmp/test_pylint/bin/pylint", line 11, in <module>
sys.exit(run_pylint())
File "/private/tmp/test_pylint/lib/python3.5/site-packages/pylint/__init__.py", line 13, in run_pylint
Run(sys.argv[1:])
File "/private/tmp/test_pylint/lib/python3.5/site-packages/pylint/lint.py", line 1310, in __init__
linter.check(args)
File "/private/tmp/test_pylint/lib/python3.5/site-packages/pylint/lint.py", line 732, in check
self._do_check(files_or_modules)
File "/private/tmp/test_pylint/lib/python3.5/site-packages/pylint/lint.py", line 863, in _do_check
self.check_astroid_module(ast_node, walker, rawcheckers, tokencheckers)
File "/private/tmp/test_pylint/lib/python3.5/site-packages/pylint/lint.py", line 944, in check_astroid_module
walker.walk(ast_node)
File "/private/tmp/test_pylint/lib/python3.5/site-packages/pylint/utils.py", line 944, in walk
self.walk(child)
File "/private/tmp/test_pylint/lib/python3.5/site-packages/pylint/utils.py", line 944, in walk
self.walk(child)
File "/private/tmp/test_pylint/lib/python3.5/site-packages/pylint/utils.py", line 944, in walk
self.walk(child)
File "/private/tmp/test_pylint/lib/python3.5/site-packages/pylint/utils.py", line 941, in walk
cb(astroid)
File "/private/tmp/test_pylint/lib/python3.5/site-packages/pylint/checkers/classes.py", line 578, in visit_attribute
self._accessed[-1][attrname].append(node)
IndexError: list index out of range
|
IndexError
|
def leave_classdef(self, cnode):
"""close a class node:
check that instance attributes are defined in __init__ and check
access to existent members
"""
# check access to existent members on non metaclass classes
ignore_mixins = get_global_option(self, "ignore-mixin-members", default=True)
if ignore_mixins and cnode.name[-5:].lower() == "mixin":
# We are in a mixin class. No need to try to figure out if
# something is missing, since it is most likely that it will
# miss.
return
accessed = self._accessed.accessed(cnode)
if cnode.type != "metaclass":
self._check_accessed_members(cnode, accessed)
# checks attributes are defined in an allowed method such as __init__
if not self.linter.is_message_enabled("attribute-defined-outside-init"):
return
defining_methods = self.config.defining_attr_methods
current_module = cnode.root()
for attr, nodes in six.iteritems(cnode.instance_attrs):
# skip nodes which are not in the current module and it may screw up
# the output, while it's not worth it
nodes = [
n
for n in nodes
if not isinstance(n.statement(), (astroid.Delete, astroid.AugAssign))
and n.root() is current_module
]
if not nodes:
continue # error detected by typechecking
# check if any method attr is defined in is a defining method
if any(node.frame().name in defining_methods for node in nodes):
continue
# check attribute is defined in a parent's __init__
for parent in cnode.instance_attr_ancestors(attr):
attr_defined = False
# check if any parent method attr is defined in is a defining method
for node in parent.instance_attrs[attr]:
if node.frame().name in defining_methods:
attr_defined = True
if attr_defined:
# we're done :)
break
else:
# check attribute is defined as a class attribute
try:
cnode.local_attr(attr)
except astroid.NotFoundError:
for node in nodes:
if node.frame().name not in defining_methods:
# If the attribute was set by a callfunc in any
# of the defining methods, then don't emit
# the warning.
if _called_in_methods(node.frame(), cnode, defining_methods):
continue
self.add_message(
"attribute-defined-outside-init", args=attr, node=node
)
|
def leave_classdef(self, cnode):
"""close a class node:
check that instance attributes are defined in __init__ and check
access to existent members
"""
# check access to existent members on non metaclass classes
ignore_mixins = get_global_option(self, "ignore-mixin-members", default=True)
if ignore_mixins and cnode.name[-5:].lower() == "mixin":
# We are in a mixin class. No need to try to figure out if
# something is missing, since it is most likely that it will
# miss.
return
accessed = self._accessed.pop()
if cnode.type != "metaclass":
self._check_accessed_members(cnode, accessed)
# checks attributes are defined in an allowed method such as __init__
if not self.linter.is_message_enabled("attribute-defined-outside-init"):
return
defining_methods = self.config.defining_attr_methods
current_module = cnode.root()
for attr, nodes in six.iteritems(cnode.instance_attrs):
# skip nodes which are not in the current module and it may screw up
# the output, while it's not worth it
nodes = [
n
for n in nodes
if not isinstance(n.statement(), (astroid.Delete, astroid.AugAssign))
and n.root() is current_module
]
if not nodes:
continue # error detected by typechecking
# check if any method attr is defined in is a defining method
if any(node.frame().name in defining_methods for node in nodes):
continue
# check attribute is defined in a parent's __init__
for parent in cnode.instance_attr_ancestors(attr):
attr_defined = False
# check if any parent method attr is defined in is a defining method
for node in parent.instance_attrs[attr]:
if node.frame().name in defining_methods:
attr_defined = True
if attr_defined:
# we're done :)
break
else:
# check attribute is defined as a class attribute
try:
cnode.local_attr(attr)
except astroid.NotFoundError:
for node in nodes:
if node.frame().name not in defining_methods:
# If the attribute was set by a callfunc in any
# of the defining methods, then don't emit
# the warning.
if _called_in_methods(node.frame(), cnode, defining_methods):
continue
self.add_message(
"attribute-defined-outside-init", args=attr, node=node
)
|
https://github.com/PyCQA/pylint/issues/1126
|
No config file found, using default configuration
************* Module pylint_bug
C: 1, 0: Missing module docstring (missing-docstring)
C: 1, 0: Missing class docstring (missing-docstring)
C: 2,10: Missing method docstring (missing-docstring)
R: 1, 0: Too few public methods (1/2) (too-few-public-methods)
C: 6, 0: Missing function docstring (missing-docstring)
Traceback (most recent call last):
File "/tmp/test_pylint/bin/pylint", line 11, in <module>
sys.exit(run_pylint())
File "/private/tmp/test_pylint/lib/python3.5/site-packages/pylint/__init__.py", line 13, in run_pylint
Run(sys.argv[1:])
File "/private/tmp/test_pylint/lib/python3.5/site-packages/pylint/lint.py", line 1310, in __init__
linter.check(args)
File "/private/tmp/test_pylint/lib/python3.5/site-packages/pylint/lint.py", line 732, in check
self._do_check(files_or_modules)
File "/private/tmp/test_pylint/lib/python3.5/site-packages/pylint/lint.py", line 863, in _do_check
self.check_astroid_module(ast_node, walker, rawcheckers, tokencheckers)
File "/private/tmp/test_pylint/lib/python3.5/site-packages/pylint/lint.py", line 944, in check_astroid_module
walker.walk(ast_node)
File "/private/tmp/test_pylint/lib/python3.5/site-packages/pylint/utils.py", line 944, in walk
self.walk(child)
File "/private/tmp/test_pylint/lib/python3.5/site-packages/pylint/utils.py", line 944, in walk
self.walk(child)
File "/private/tmp/test_pylint/lib/python3.5/site-packages/pylint/utils.py", line 944, in walk
self.walk(child)
File "/private/tmp/test_pylint/lib/python3.5/site-packages/pylint/utils.py", line 941, in walk
cb(astroid)
File "/private/tmp/test_pylint/lib/python3.5/site-packages/pylint/checkers/classes.py", line 578, in visit_attribute
self._accessed[-1][attrname].append(node)
IndexError: list index out of range
|
IndexError
|
def visit_attribute(self, node):
"""check if the getattr is an access to a class member
if so, register it. Also check for access to protected
class member from outside its class (but ignore __special__
methods)
"""
# Check self
if self.is_first_attr(node):
self._accessed.set_accessed(node)
return
if not self.linter.is_message_enabled("protected-access"):
return
self._check_protected_attribute_access(node)
|
def visit_attribute(self, node):
"""check if the getattr is an access to a class member
if so, register it. Also check for access to protected
class member from outside its class (but ignore __special__
methods)
"""
attrname = node.attrname
# Check self
if self.is_first_attr(node):
self._accessed[-1][attrname].append(node)
return
if not self.linter.is_message_enabled("protected-access"):
return
self._check_protected_attribute_access(node)
|
https://github.com/PyCQA/pylint/issues/1126
|
No config file found, using default configuration
************* Module pylint_bug
C: 1, 0: Missing module docstring (missing-docstring)
C: 1, 0: Missing class docstring (missing-docstring)
C: 2,10: Missing method docstring (missing-docstring)
R: 1, 0: Too few public methods (1/2) (too-few-public-methods)
C: 6, 0: Missing function docstring (missing-docstring)
Traceback (most recent call last):
File "/tmp/test_pylint/bin/pylint", line 11, in <module>
sys.exit(run_pylint())
File "/private/tmp/test_pylint/lib/python3.5/site-packages/pylint/__init__.py", line 13, in run_pylint
Run(sys.argv[1:])
File "/private/tmp/test_pylint/lib/python3.5/site-packages/pylint/lint.py", line 1310, in __init__
linter.check(args)
File "/private/tmp/test_pylint/lib/python3.5/site-packages/pylint/lint.py", line 732, in check
self._do_check(files_or_modules)
File "/private/tmp/test_pylint/lib/python3.5/site-packages/pylint/lint.py", line 863, in _do_check
self.check_astroid_module(ast_node, walker, rawcheckers, tokencheckers)
File "/private/tmp/test_pylint/lib/python3.5/site-packages/pylint/lint.py", line 944, in check_astroid_module
walker.walk(ast_node)
File "/private/tmp/test_pylint/lib/python3.5/site-packages/pylint/utils.py", line 944, in walk
self.walk(child)
File "/private/tmp/test_pylint/lib/python3.5/site-packages/pylint/utils.py", line 944, in walk
self.walk(child)
File "/private/tmp/test_pylint/lib/python3.5/site-packages/pylint/utils.py", line 944, in walk
self.walk(child)
File "/private/tmp/test_pylint/lib/python3.5/site-packages/pylint/utils.py", line 941, in walk
cb(astroid)
File "/private/tmp/test_pylint/lib/python3.5/site-packages/pylint/checkers/classes.py", line 578, in visit_attribute
self._accessed[-1][attrname].append(node)
IndexError: list index out of range
|
IndexError
|
def visit_assignattr(self, node):
if isinstance(node.assign_type(), astroid.AugAssign) and self.is_first_attr(node):
self._accessed.set_accessed(node)
self._check_in_slots(node)
|
def visit_assignattr(self, node):
if isinstance(node.assign_type(), astroid.AugAssign) and self.is_first_attr(node):
self._accessed[-1][node.attrname].append(node)
self._check_in_slots(node)
|
https://github.com/PyCQA/pylint/issues/1126
|
No config file found, using default configuration
************* Module pylint_bug
C: 1, 0: Missing module docstring (missing-docstring)
C: 1, 0: Missing class docstring (missing-docstring)
C: 2,10: Missing method docstring (missing-docstring)
R: 1, 0: Too few public methods (1/2) (too-few-public-methods)
C: 6, 0: Missing function docstring (missing-docstring)
Traceback (most recent call last):
File "/tmp/test_pylint/bin/pylint", line 11, in <module>
sys.exit(run_pylint())
File "/private/tmp/test_pylint/lib/python3.5/site-packages/pylint/__init__.py", line 13, in run_pylint
Run(sys.argv[1:])
File "/private/tmp/test_pylint/lib/python3.5/site-packages/pylint/lint.py", line 1310, in __init__
linter.check(args)
File "/private/tmp/test_pylint/lib/python3.5/site-packages/pylint/lint.py", line 732, in check
self._do_check(files_or_modules)
File "/private/tmp/test_pylint/lib/python3.5/site-packages/pylint/lint.py", line 863, in _do_check
self.check_astroid_module(ast_node, walker, rawcheckers, tokencheckers)
File "/private/tmp/test_pylint/lib/python3.5/site-packages/pylint/lint.py", line 944, in check_astroid_module
walker.walk(ast_node)
File "/private/tmp/test_pylint/lib/python3.5/site-packages/pylint/utils.py", line 944, in walk
self.walk(child)
File "/private/tmp/test_pylint/lib/python3.5/site-packages/pylint/utils.py", line 944, in walk
self.walk(child)
File "/private/tmp/test_pylint/lib/python3.5/site-packages/pylint/utils.py", line 944, in walk
self.walk(child)
File "/private/tmp/test_pylint/lib/python3.5/site-packages/pylint/utils.py", line 941, in walk
cb(astroid)
File "/private/tmp/test_pylint/lib/python3.5/site-packages/pylint/checkers/classes.py", line 578, in visit_attribute
self._accessed[-1][attrname].append(node)
IndexError: list index out of range
|
IndexError
|
def _format_option_value(optdict, value):
"""return the user input's value from a 'compiled' value"""
if isinstance(value, (list, tuple)):
value = ",".join(_format_option_value(optdict, item) for item in value)
elif isinstance(value, dict):
value = ",".join("%s:%s" % (k, v) for k, v in value.items())
elif hasattr(value, "match"): # optdict.get('type') == 'regexp'
# compiled regexp
value = value.pattern
elif optdict.get("type") == "yn":
value = value and "yes" or "no"
elif isinstance(value, six.string_types) and value.isspace():
value = "'%s'" % value
return value
|
def _format_option_value(optdict, value):
"""return the user input's value from a 'compiled' value"""
if isinstance(value, (list, tuple)):
value = ",".join(value)
elif isinstance(value, dict):
value = ",".join("%s:%s" % (k, v) for k, v in value.items())
elif hasattr(value, "match"): # optdict.get('type') == 'regexp'
# compiled regexp
value = value.pattern
elif optdict.get("type") == "yn":
value = value and "yes" or "no"
elif isinstance(value, six.string_types) and value.isspace():
value = "'%s'" % value
return value
|
https://github.com/PyCQA/pylint/issues/990
|
$ pylint
[working help output]
$ pylint --ignore-patterns=.*_pb2.py
No config file found, using default configuration
Traceback (most recent call last):
File "/home/rcardona/.virtualenvs/tmp-4658876925714861/bin/pylint", line 11, in <module>
sys.exit(run_pylint())
File "/home/rcardona/.virtualenvs/tmp-4658876925714861/local/lib/python2.7/site-packages/pylint/__init__.py", line 11, in run_pylint
Run(sys.argv[1:])
File "/home/rcardona/.virtualenvs/tmp-4658876925714861/local/lib/python2.7/site-packages/pylint/lint.py", line 1289, in __init__
print(linter.help())
File "/home/rcardona/.virtualenvs/tmp-4658876925714861/local/lib/python2.7/site-packages/pylint/config.py", line 692, in help
return self.cmdline_parser.format_help()
File "/usr/lib/python2.7/optparse.py", line 1650, in format_help
result.append(self.format_option_help(formatter))
File "/home/rcardona/.virtualenvs/tmp-4658876925714861/local/lib/python2.7/site-packages/pylint/config.py", line 321, in format_option_help
result.append(group.format_help(formatter))
File "/usr/lib/python2.7/optparse.py", line 1114, in format_help
result += OptionContainer.format_help(self, formatter)
File "/usr/lib/python2.7/optparse.py", line 1085, in format_help
result.append(self.format_option_help(formatter))
File "/usr/lib/python2.7/optparse.py", line 1074, in format_option_help
result.append(formatter.format_option(option))
File "/usr/lib/python2.7/optparse.py", line 316, in format_option
help_text = self.expand_default(option)
File "/home/rcardona/.virtualenvs/tmp-4658876925714861/local/lib/python2.7/site-packages/pylint/config.py", line 234, in _expand_default
value = utils._format_option_value(optdict, value)
File "/home/rcardona/.virtualenvs/tmp-4658876925714861/local/lib/python2.7/site-packages/pylint/utils.py", line 1092, in _format_option_value
value = ','.join(value)
TypeError: sequence item 0: expected string, _sre.SRE_Pattern found
|
TypeError
|
def _parse_token(self, token, value, parts):
if token == "YYYY":
parts["year"] = int(value)
elif token == "YY":
value = int(value)
parts["year"] = 1900 + value if value > 68 else 2000 + value
elif token in ["MMMM", "MMM"]:
parts["month"] = self.locale.month_number(value.lower())
elif token in ["MM", "M"]:
parts["month"] = int(value)
elif token in ["DDDD", "DDD"]:
parts["day_of_year"] = int(value)
elif token in ["DD", "D"]:
parts["day"] = int(value)
elif token == "Do":
parts["day"] = int(value)
elif token == "dddd":
# locale day names are 1-indexed
day_of_week = [x.lower() for x in self.locale.day_names].index(value.lower())
parts["day_of_week"] = day_of_week - 1
elif token == "ddd":
# locale day abbreviations are 1-indexed
day_of_week = [x.lower() for x in self.locale.day_abbreviations].index(
value.lower()
)
parts["day_of_week"] = day_of_week - 1
elif token.upper() in ["HH", "H"]:
parts["hour"] = int(value)
elif token in ["mm", "m"]:
parts["minute"] = int(value)
elif token in ["ss", "s"]:
parts["second"] = int(value)
elif token == "S":
# We have the *most significant* digits of an arbitrary-precision integer.
# We want the six most significant digits as an integer, rounded.
# IDEA: add nanosecond support somehow? Need datetime support for it first.
value = value.ljust(7, str("0"))
# floating-point (IEEE-754) defaults to half-to-even rounding
seventh_digit = int(value[6])
if seventh_digit == 5:
rounding = int(value[5]) % 2
elif seventh_digit > 5:
rounding = 1
else:
rounding = 0
parts["microsecond"] = int(value[:6]) + rounding
elif token == "X":
parts["timestamp"] = float(value)
elif token == "x":
parts["expanded_timestamp"] = int(value)
elif token in ["ZZZ", "ZZ", "Z"]:
parts["tzinfo"] = TzinfoParser.parse(value)
elif token in ["a", "A"]:
if value in (self.locale.meridians["am"], self.locale.meridians["AM"]):
parts["am_pm"] = "am"
elif value in (self.locale.meridians["pm"], self.locale.meridians["PM"]):
parts["am_pm"] = "pm"
elif token == "W":
parts["weekdate"] = value
|
def _parse_token(self, token, value, parts):
if token == "YYYY":
parts["year"] = int(value)
elif token == "YY":
value = int(value)
parts["year"] = 1900 + value if value > 68 else 2000 + value
elif token in ["MMMM", "MMM"]:
parts["month"] = self.locale.month_number(value.lower())
elif token in ["MM", "M"]:
parts["month"] = int(value)
elif token in ["DDDD", "DDD"]:
parts["day_of_year"] = int(value)
elif token in ["DD", "D"]:
parts["day"] = int(value)
elif token == "Do":
parts["day"] = int(value)
elif token == "dddd":
parts["day_of_week"] = self.locale.day_names.index(value) - 1
elif token == "ddd":
parts["day_of_week"] = self.locale.day_abbreviations.index(value) - 1
elif token.upper() in ["HH", "H"]:
parts["hour"] = int(value)
elif token in ["mm", "m"]:
parts["minute"] = int(value)
elif token in ["ss", "s"]:
parts["second"] = int(value)
elif token == "S":
# We have the *most significant* digits of an arbitrary-precision integer.
# We want the six most significant digits as an integer, rounded.
# IDEA: add nanosecond support somehow? Need datetime support for it first.
value = value.ljust(7, str("0"))
# floating-point (IEEE-754) defaults to half-to-even rounding
seventh_digit = int(value[6])
if seventh_digit == 5:
rounding = int(value[5]) % 2
elif seventh_digit > 5:
rounding = 1
else:
rounding = 0
parts["microsecond"] = int(value[:6]) + rounding
elif token == "X":
parts["timestamp"] = float(value)
elif token == "x":
parts["expanded_timestamp"] = int(value)
elif token in ["ZZZ", "ZZ", "Z"]:
parts["tzinfo"] = TzinfoParser.parse(value)
elif token in ["a", "A"]:
if value in (self.locale.meridians["am"], self.locale.meridians["AM"]):
parts["am_pm"] = "am"
elif value in (self.locale.meridians["pm"], self.locale.meridians["PM"]):
parts["am_pm"] = "pm"
elif token == "W":
parts["weekdate"] = value
|
https://github.com/arrow-py/arrow/issues/851
|
Traceback (most recent call last):
File "mytest.py", line 5, in <module>
print(arrow.get(date_en, f))
File "/bla/env/lib/python3.7/site-packages/arrow/api.py", line 21, in get
return _factory.get(*args, **kwargs)
File "/bla/env/lib/python3.7/site-packages/arrow/factory.py", line 243, in get
dt = parser.DateTimeParser(locale).parse(args[0], args[1])
File "/bla/env/lib/python3.7/site-packages/arrow/parser.py", line 238, in parse
self._parse_token(token, value, parts)
File "/bla/env/lib/python3.7/site-packages/arrow/parser.py", line 346, in _parse_token
parts["day_of_week"] = self.locale.day_names.index(value) - 1
ValueError: 'MONDAY' is not in list
|
ValueError
|
def fromtimestamp(cls, timestamp, tzinfo=None):
"""Constructs an :class:`Arrow <arrow.arrow.Arrow>` object from a timestamp, converted to
the given timezone.
:param timestamp: an ``int`` or ``float`` timestamp, or a ``str`` that converts to either.
:param tzinfo: (optional) a ``tzinfo`` object. Defaults to local time.
"""
if tzinfo is None:
tzinfo = dateutil_tz.tzlocal()
elif util.isstr(tzinfo):
tzinfo = parser.TzinfoParser.parse(tzinfo)
if not util.is_timestamp(timestamp):
raise ValueError("The provided timestamp '{}' is invalid.".format(timestamp))
timestamp = util.normalize_timestamp(float(timestamp))
dt = datetime.fromtimestamp(timestamp, tzinfo)
return cls(
dt.year,
dt.month,
dt.day,
dt.hour,
dt.minute,
dt.second,
dt.microsecond,
dt.tzinfo,
)
|
def fromtimestamp(cls, timestamp, tzinfo=None):
"""Constructs an :class:`Arrow <arrow.arrow.Arrow>` object from a timestamp, converted to
the given timezone.
:param timestamp: an ``int`` or ``float`` timestamp, or a ``str`` that converts to either.
:param tzinfo: (optional) a ``tzinfo`` object. Defaults to local time.
"""
if tzinfo is None:
tzinfo = dateutil_tz.tzlocal()
elif util.isstr(tzinfo):
tzinfo = parser.TzinfoParser.parse(tzinfo)
if not util.is_timestamp(timestamp):
raise ValueError("The provided timestamp '{}' is invalid.".format(timestamp))
dt = datetime.fromtimestamp(float(timestamp), tzinfo)
return cls(
dt.year,
dt.month,
dt.day,
dt.hour,
dt.minute,
dt.second,
dt.microsecond,
dt.tzinfo,
)
|
https://github.com/arrow-py/arrow/issues/795
|
arrow.get(1590889920000.0).timestamp
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/charlie/Library/Python/3.7/lib/python/site-packages/arrow/api.py", line 21, in get
return _factory.get(*args, **kwargs)
File "/Users/charlie/Library/Python/3.7/lib/python/site-packages/arrow/factory.py", line 176, in get
return self.type.fromtimestamp(arg, tzinfo=tz)
File "/Users/charlie/Library/Python/3.7/lib/python/site-packages/arrow/arrow.py", line 161, in fromtimestamp
dt = datetime.fromtimestamp(float(timestamp), tzinfo)
ValueError: year 52383 is out of range
|
ValueError
|
def utcfromtimestamp(cls, timestamp):
"""Constructs an :class:`Arrow <arrow.arrow.Arrow>` object from a timestamp, in UTC time.
:param timestamp: an ``int`` or ``float`` timestamp, or a ``str`` that converts to either.
"""
if not util.is_timestamp(timestamp):
raise ValueError("The provided timestamp '{}' is invalid.".format(timestamp))
timestamp = util.normalize_timestamp(float(timestamp))
dt = datetime.utcfromtimestamp(timestamp)
return cls(
dt.year,
dt.month,
dt.day,
dt.hour,
dt.minute,
dt.second,
dt.microsecond,
dateutil_tz.tzutc(),
)
|
def utcfromtimestamp(cls, timestamp):
"""Constructs an :class:`Arrow <arrow.arrow.Arrow>` object from a timestamp, in UTC time.
:param timestamp: an ``int`` or ``float`` timestamp, or a ``str`` that converts to either.
"""
if not util.is_timestamp(timestamp):
raise ValueError("The provided timestamp '{}' is invalid.".format(timestamp))
dt = datetime.utcfromtimestamp(float(timestamp))
return cls(
dt.year,
dt.month,
dt.day,
dt.hour,
dt.minute,
dt.second,
dt.microsecond,
dateutil_tz.tzutc(),
)
|
https://github.com/arrow-py/arrow/issues/795
|
arrow.get(1590889920000.0).timestamp
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/charlie/Library/Python/3.7/lib/python/site-packages/arrow/api.py", line 21, in get
return _factory.get(*args, **kwargs)
File "/Users/charlie/Library/Python/3.7/lib/python/site-packages/arrow/factory.py", line 176, in get
return self.type.fromtimestamp(arg, tzinfo=tz)
File "/Users/charlie/Library/Python/3.7/lib/python/site-packages/arrow/arrow.py", line 161, in fromtimestamp
dt = datetime.fromtimestamp(float(timestamp), tzinfo)
ValueError: year 52383 is out of range
|
ValueError
|
def _build_datetime(parts):
weekdate = parts.get("weekdate")
if weekdate is not None:
# we can use strptime (%G, %V, %u) in python 3.6 but these tokens aren't available before that
year, week = int(weekdate[0]), int(weekdate[1])
if weekdate[2] is not None:
day = int(weekdate[2])
else:
# day not given, default to 1
day = 1
dt = iso_to_gregorian(year, week, day)
parts["year"] = dt.year
parts["month"] = dt.month
parts["day"] = dt.day
timestamp = parts.get("timestamp")
if timestamp is not None:
return datetime.fromtimestamp(timestamp, tz=tz.tzutc())
expanded_timestamp = parts.get("expanded_timestamp")
if expanded_timestamp is not None:
return datetime.fromtimestamp(
normalize_timestamp(expanded_timestamp),
tz=tz.tzutc(),
)
day_of_year = parts.get("day_of_year")
if day_of_year is not None:
year = parts.get("year")
month = parts.get("month")
if year is None:
raise ParserError(
"Year component is required with the DDD and DDDD tokens."
)
if month is not None:
raise ParserError(
"Month component is not allowed with the DDD and DDDD tokens."
)
date_string = "{}-{}".format(year, day_of_year)
try:
dt = datetime.strptime(date_string, "%Y-%j")
except ValueError:
raise ParserError(
"The provided day of year '{}' is invalid.".format(day_of_year)
)
parts["year"] = dt.year
parts["month"] = dt.month
parts["day"] = dt.day
am_pm = parts.get("am_pm")
hour = parts.get("hour", 0)
if am_pm == "pm" and hour < 12:
hour += 12
elif am_pm == "am" and hour == 12:
hour = 0
# Support for midnight at the end of day
if hour == 24:
if parts.get("minute", 0) != 0:
raise ParserError("Midnight at the end of day must not contain minutes")
if parts.get("second", 0) != 0:
raise ParserError("Midnight at the end of day must not contain seconds")
if parts.get("microsecond", 0) != 0:
raise ParserError(
"Midnight at the end of day must not contain microseconds"
)
hour = 0
day_increment = 1
else:
day_increment = 0
# account for rounding up to 1000000
microsecond = parts.get("microsecond", 0)
if microsecond == 1000000:
microsecond = 0
second_increment = 1
else:
second_increment = 0
increment = timedelta(days=day_increment, seconds=second_increment)
return (
datetime(
year=parts.get("year", 1),
month=parts.get("month", 1),
day=parts.get("day", 1),
hour=hour,
minute=parts.get("minute", 0),
second=parts.get("second", 0),
microsecond=microsecond,
tzinfo=parts.get("tzinfo"),
)
+ increment
)
|
def _build_datetime(parts):
weekdate = parts.get("weekdate")
if weekdate is not None:
# we can use strptime (%G, %V, %u) in python 3.6 but these tokens aren't available before that
year, week = int(weekdate[0]), int(weekdate[1])
if weekdate[2] is not None:
day = int(weekdate[2])
else:
# day not given, default to 1
day = 1
dt = iso_to_gregorian(year, week, day)
parts["year"] = dt.year
parts["month"] = dt.month
parts["day"] = dt.day
timestamp = parts.get("timestamp")
if timestamp is not None:
return datetime.fromtimestamp(timestamp, tz=tz.tzutc())
expanded_timestamp = parts.get("expanded_timestamp")
if expanded_timestamp is not None:
if expanded_timestamp > MAX_TIMESTAMP:
if expanded_timestamp < MAX_TIMESTAMP_MS:
expanded_timestamp /= 1e3
elif expanded_timestamp < MAX_TIMESTAMP_US:
expanded_timestamp /= 1e6
else:
raise ValueError(
"The specified timestamp '{}' is too large.".format(
expanded_timestamp
)
)
return datetime.fromtimestamp(expanded_timestamp, tz=tz.tzutc())
day_of_year = parts.get("day_of_year")
if day_of_year is not None:
year = parts.get("year")
month = parts.get("month")
if year is None:
raise ParserError(
"Year component is required with the DDD and DDDD tokens."
)
if month is not None:
raise ParserError(
"Month component is not allowed with the DDD and DDDD tokens."
)
date_string = "{}-{}".format(year, day_of_year)
try:
dt = datetime.strptime(date_string, "%Y-%j")
except ValueError:
raise ParserError(
"The provided day of year '{}' is invalid.".format(day_of_year)
)
parts["year"] = dt.year
parts["month"] = dt.month
parts["day"] = dt.day
am_pm = parts.get("am_pm")
hour = parts.get("hour", 0)
if am_pm == "pm" and hour < 12:
hour += 12
elif am_pm == "am" and hour == 12:
hour = 0
# Support for midnight at the end of day
if hour == 24:
if parts.get("minute", 0) != 0:
raise ParserError("Midnight at the end of day must not contain minutes")
if parts.get("second", 0) != 0:
raise ParserError("Midnight at the end of day must not contain seconds")
if parts.get("microsecond", 0) != 0:
raise ParserError(
"Midnight at the end of day must not contain microseconds"
)
hour = 0
day_increment = 1
else:
day_increment = 0
# account for rounding up to 1000000
microsecond = parts.get("microsecond", 0)
if microsecond == 1000000:
microsecond = 0
second_increment = 1
else:
second_increment = 0
increment = timedelta(days=day_increment, seconds=second_increment)
return (
datetime(
year=parts.get("year", 1),
month=parts.get("month", 1),
day=parts.get("day", 1),
hour=hour,
minute=parts.get("minute", 0),
second=parts.get("second", 0),
microsecond=microsecond,
tzinfo=parts.get("tzinfo"),
)
+ increment
)
|
https://github.com/arrow-py/arrow/issues/795
|
arrow.get(1590889920000.0).timestamp
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/charlie/Library/Python/3.7/lib/python/site-packages/arrow/api.py", line 21, in get
return _factory.get(*args, **kwargs)
File "/Users/charlie/Library/Python/3.7/lib/python/site-packages/arrow/factory.py", line 176, in get
return self.type.fromtimestamp(arg, tzinfo=tz)
File "/Users/charlie/Library/Python/3.7/lib/python/site-packages/arrow/arrow.py", line 161, in fromtimestamp
dt = datetime.fromtimestamp(float(timestamp), tzinfo)
ValueError: year 52383 is out of range
|
ValueError
|
def is_timestamp(value):
"""Check if value is a valid timestamp."""
if isinstance(value, bool):
return False
if not (
isinstance(value, numbers.Integral)
or isinstance(value, float)
or isinstance(value, str)
):
return False
try:
float(value)
return True
except ValueError:
return False
|
def is_timestamp(value):
"""Check if value is a valid timestamp."""
if isinstance(value, bool):
return False
if not (
isinstance(value, int) or isinstance(value, float) or isinstance(value, str)
):
return False
try:
float(value)
return True
except ValueError:
return False
|
https://github.com/arrow-py/arrow/issues/795
|
arrow.get(1590889920000.0).timestamp
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/charlie/Library/Python/3.7/lib/python/site-packages/arrow/api.py", line 21, in get
return _factory.get(*args, **kwargs)
File "/Users/charlie/Library/Python/3.7/lib/python/site-packages/arrow/factory.py", line 176, in get
return self.type.fromtimestamp(arg, tzinfo=tz)
File "/Users/charlie/Library/Python/3.7/lib/python/site-packages/arrow/arrow.py", line 161, in fromtimestamp
dt = datetime.fromtimestamp(float(timestamp), tzinfo)
ValueError: year 52383 is out of range
|
ValueError
|
def humanize(self, other=None, locale="en_us", only_distance=False, granularity="auto"):
"""Returns a localized, humanized representation of a relative difference in time.
:param other: (optional) an :class:`Arrow <arrow.arrow.Arrow>` or ``datetime`` object.
Defaults to now in the current :class:`Arrow <arrow.arrow.Arrow>` object's timezone.
:param locale: (optional) a ``str`` specifying a locale. Defaults to 'en_us'.
:param only_distance: (optional) returns only time difference eg: "11 seconds" without "in" or "ago" part.
:param granularity: (optional) defines the precision of the output. Set it to strings 'second', 'minute', 'hour', 'day', 'month' or 'year'.
Usage::
>>> earlier = arrow.utcnow().shift(hours=-2)
>>> earlier.humanize()
'2 hours ago'
>>> later = earlier.shift(hours=4)
>>> later.humanize(earlier)
'in 4 hours'
"""
locale = locales.get_locale(locale)
if other is None:
utc = datetime.utcnow().replace(tzinfo=dateutil_tz.tzutc())
dt = utc.astimezone(self._datetime.tzinfo)
elif isinstance(other, Arrow):
dt = other._datetime
elif isinstance(other, datetime):
if other.tzinfo is None:
dt = other.replace(tzinfo=self._datetime.tzinfo)
else:
dt = other.astimezone(self._datetime.tzinfo)
else:
raise TypeError()
delta = int(round(util.total_seconds(self._datetime - dt)))
sign = -1 if delta < 0 else 1
diff = abs(delta)
delta = diff
if granularity == "auto":
if diff < 10:
return locale.describe("now", only_distance=only_distance)
if diff < 45:
seconds = sign * delta
return locale.describe("seconds", seconds, only_distance=only_distance)
elif diff < 90:
return locale.describe("minute", sign, only_distance=only_distance)
elif diff < 2700:
minutes = sign * int(max(delta / 60, 2))
return locale.describe("minutes", minutes, only_distance=only_distance)
elif diff < 5400:
return locale.describe("hour", sign, only_distance=only_distance)
elif diff < 79200:
hours = sign * int(max(delta / 3600, 2))
return locale.describe("hours", hours, only_distance=only_distance)
elif diff < 129600:
return locale.describe("day", sign, only_distance=only_distance)
elif diff < 554400:
days = sign * int(max(delta / 86400, 2))
return locale.describe("days", days, only_distance=only_distance)
elif diff < 907200:
return locale.describe("week", sign, only_distance=only_distance)
elif diff < 2419200:
weeks = sign * int(max(delta / 604800, 2))
return locale.describe("weeks", weeks, only_distance=only_distance)
elif diff < 3888000:
return locale.describe("month", sign, only_distance=only_distance)
elif diff < 29808000:
self_months = self._datetime.year * 12 + self._datetime.month
other_months = dt.year * 12 + dt.month
months = sign * int(max(abs(other_months - self_months), 2))
return locale.describe("months", months, only_distance=only_distance)
elif diff < 47260800:
return locale.describe("year", sign, only_distance=only_distance)
else:
years = sign * int(max(delta / 31536000, 2))
return locale.describe("years", years, only_distance=only_distance)
else:
if granularity == "second":
delta = sign * delta
if abs(delta) < 2:
return locale.describe("now", only_distance=only_distance)
elif granularity == "minute":
delta = sign * delta / float(60)
elif granularity == "hour":
delta = sign * delta / float(60 * 60)
elif granularity == "day":
delta = sign * delta / float(60 * 60 * 24)
elif granularity == "week":
delta = sign * delta / float(60 * 60 * 24 * 7)
elif granularity == "month":
delta = sign * delta / float(60 * 60 * 24 * 30.5)
elif granularity == "year":
delta = sign * delta / float(60 * 60 * 24 * 365.25)
else:
raise AttributeError(
'Error. Could not understand your level of granularity. Please select between \
"second", "minute", "hour", "day", "week", "month" or "year"'
)
if trunc(abs(delta)) != 1:
granularity += "s"
return locale.describe(granularity, delta, only_distance=only_distance)
|
def humanize(self, other=None, locale="en_us", only_distance=False, granularity="auto"):
"""Returns a localized, humanized representation of a relative difference in time.
:param other: (optional) an :class:`Arrow <arrow.arrow.Arrow>` or ``datetime`` object.
Defaults to now in the current :class:`Arrow <arrow.arrow.Arrow>` object's timezone.
:param locale: (optional) a ``str`` specifying a locale. Defaults to 'en_us'.
:param only_distance: (optional) returns only time difference eg: "11 seconds" without "in" or "ago" part.
:param granularity: (optional) defines the precision of the output. Set it to strings 'second', 'minute', 'hour', 'day', 'month' or 'year'.
Usage::
>>> earlier = arrow.utcnow().shift(hours=-2)
>>> earlier.humanize()
'2 hours ago'
>>> later = earlier.shift(hours=4)
>>> later.humanize(earlier)
'in 4 hours'
"""
locale = locales.get_locale(locale)
if other is None:
utc = datetime.utcnow().replace(tzinfo=dateutil_tz.tzutc())
dt = utc.astimezone(self._datetime.tzinfo)
elif isinstance(other, Arrow):
dt = other._datetime
elif isinstance(other, datetime):
if other.tzinfo is None:
dt = other.replace(tzinfo=self._datetime.tzinfo)
else:
dt = other.astimezone(self._datetime.tzinfo)
else:
raise TypeError()
delta = int(round(util.total_seconds(self._datetime - dt)))
sign = -1 if delta < 0 else 1
diff = abs(delta)
delta = diff
if granularity == "auto":
if diff < 10:
return locale.describe("now", only_distance=only_distance)
if diff < 45:
seconds = sign * delta
return locale.describe("seconds", seconds, only_distance=only_distance)
elif diff < 90:
return locale.describe("minute", sign, only_distance=only_distance)
elif diff < 2700:
minutes = sign * int(max(delta / 60, 2))
return locale.describe("minutes", minutes, only_distance=only_distance)
elif diff < 5400:
return locale.describe("hour", sign, only_distance=only_distance)
elif diff < 79200:
hours = sign * int(max(delta / 3600, 2))
return locale.describe("hours", hours, only_distance=only_distance)
elif diff < 129600:
return locale.describe("day", sign, only_distance=only_distance)
elif diff < 2160000:
days = sign * int(max(delta / 86400, 2))
return locale.describe("days", days, only_distance=only_distance)
elif diff < 3888000:
return locale.describe("month", sign, only_distance=only_distance)
elif diff < 29808000:
self_months = self._datetime.year * 12 + self._datetime.month
other_months = dt.year * 12 + dt.month
months = sign * int(max(abs(other_months - self_months), 2))
return locale.describe("months", months, only_distance=only_distance)
elif diff < 47260800:
return locale.describe("year", sign, only_distance=only_distance)
else:
years = sign * int(max(delta / 31536000, 2))
return locale.describe("years", years, only_distance=only_distance)
else:
if granularity == "second":
delta = sign * delta
if abs(delta) < 2:
return locale.describe("now", only_distance=only_distance)
elif granularity == "minute":
delta = sign * delta / float(60)
elif granularity == "hour":
delta = sign * delta / float(60 * 60)
elif granularity == "day":
delta = sign * delta / float(60 * 60 * 24)
elif granularity == "month":
delta = sign * delta / float(60 * 60 * 24 * 30.5)
elif granularity == "year":
delta = sign * delta / float(60 * 60 * 24 * 365.25)
else:
raise AttributeError(
'Error. Could not understand your level of granularity. Please select between \
"second", "minute", "hour", "day", "week", "month" or "year"'
)
if trunc(abs(delta)) != 1:
granularity += "s"
return locale.describe(granularity, delta, only_distance=only_distance)
|
https://github.com/arrow-py/arrow/issues/524
|
Traceback (most recent call last):
File "dates_test.py", line 4, in <module>
print arrow.get('2018-01-12T00:00:00.000-07:00').humanize(locale='en', granularity='week')
File "/Users/nathanzylbersztejn/miniconda2/envs/rasa/lib/python2.7/site-packages/arrow/arrow.py", line 805, in humanize
"second", "minute", "hour", "day", "week", "month" or "year"')
AttributeError: Error. Could not understand your level of granularity. Please select between "second", "minute", "hour", "day", "week", "month" or "year"
|
AttributeError
|
def get(self, *args, **kwargs):
"""Returns an :class:`Arrow <arrow.arrow.Arrow>` object based on flexible inputs.
:param locale: (optional) a ``str`` specifying a locale for the parser. Defaults to
'en_us'.
:param tzinfo: (optional) a :ref:`timezone expression <tz-expr>` or tzinfo object.
Replaces the timezone unless using an input form that is explicitly UTC or specifies
the timezone in a positional argument. Defaults to UTC.
Usage::
>>> import arrow
**No inputs** to get current UTC time::
>>> arrow.get()
<Arrow [2013-05-08T05:51:43.316458+00:00]>
**None** to also get current UTC time::
>>> arrow.get(None)
<Arrow [2013-05-08T05:51:49.016458+00:00]>
**One** :class:`Arrow <arrow.arrow.Arrow>` object, to get a copy.
>>> arw = arrow.utcnow()
>>> arrow.get(arw)
<Arrow [2013-10-23T15:21:54.354846+00:00]>
**One** ``str``, ``float``, or ``int``, convertible to a floating-point timestamp, to get
that timestamp in UTC::
>>> arrow.get(1367992474.293378)
<Arrow [2013-05-08T05:54:34.293378+00:00]>
>>> arrow.get(1367992474)
<Arrow [2013-05-08T05:54:34+00:00]>
>>> arrow.get('1367992474.293378')
<Arrow [2013-05-08T05:54:34.293378+00:00]>
>>> arrow.get('1367992474')
<Arrow [2013-05-08T05:54:34+00:00]>
**One** ISO-8601-formatted ``str``, to parse it::
>>> arrow.get('2013-09-29T01:26:43.830580')
<Arrow [2013-09-29T01:26:43.830580+00:00]>
**One** ``tzinfo``, to get the current time **converted** to that timezone::
>>> arrow.get(tz.tzlocal())
<Arrow [2013-05-07T22:57:28.484717-07:00]>
**One** naive ``datetime``, to get that datetime in UTC::
>>> arrow.get(datetime(2013, 5, 5))
<Arrow [2013-05-05T00:00:00+00:00]>
**One** aware ``datetime``, to get that datetime::
>>> arrow.get(datetime(2013, 5, 5, tzinfo=tz.tzlocal()))
<Arrow [2013-05-05T00:00:00-07:00]>
**One** naive ``date``, to get that date in UTC::
>>> arrow.get(date(2013, 5, 5))
<Arrow [2013-05-05T00:00:00+00:00]>
**Two** arguments, a naive or aware ``datetime``, and a replacement
:ref:`timezone expression <tz-expr>`::
>>> arrow.get(datetime(2013, 5, 5), 'US/Pacific')
<Arrow [2013-05-05T00:00:00-07:00]>
**Two** arguments, a naive ``date``, and a replacement
:ref:`timezone expression <tz-expr>`::
>>> arrow.get(date(2013, 5, 5), 'US/Pacific')
<Arrow [2013-05-05T00:00:00-07:00]>
**Two** arguments, both ``str``, to parse the first according to the format of the second::
>>> arrow.get('2013-05-05 12:30:45 America/Chicago', 'YYYY-MM-DD HH:mm:ss ZZZ')
<Arrow [2013-05-05T12:30:45-05:00]>
**Two** arguments, first a ``str`` to parse and second a ``list`` of formats to try::
>>> arrow.get('2013-05-05 12:30:45', ['MM/DD/YYYY', 'YYYY-MM-DD HH:mm:ss'])
<Arrow [2013-05-05T12:30:45+00:00]>
**Three or more** arguments, as for the constructor of a ``datetime``::
>>> arrow.get(2013, 5, 5, 12, 30, 45)
<Arrow [2013-05-05T12:30:45+00:00]>
**One** time.struct time::
>>> arrow.get(gmtime(0))
<Arrow [1970-01-01T00:00:00+00:00]>
"""
arg_count = len(args)
locale = kwargs.pop("locale", "en_us")
tz = kwargs.get("tzinfo", None)
# if kwargs given, send to constructor unless only tzinfo provided
if len(kwargs) > 1:
arg_count = 3
# tzinfo kwarg is not provided
if len(kwargs) == 1 and tz is None:
arg_count = 3
# () -> now, @ utc.
if arg_count == 0:
if isinstance(tz, tzinfo):
return self.type.now(tz)
return self.type.utcnow()
if arg_count == 1:
arg = args[0]
# (None) -> now, @ utc.
if arg is None:
return self.type.utcnow()
# try (int, float, str(int), str(float)) -> utc, from timestamp.
if is_timestamp(arg):
return self.type.utcfromtimestamp(arg)
# (Arrow) -> from the object's datetime.
if isinstance(arg, Arrow):
return self.type.fromdatetime(arg.datetime)
# (datetime) -> from datetime.
if isinstance(arg, datetime):
return self.type.fromdatetime(arg)
# (date) -> from date.
if isinstance(arg, date):
return self.type.fromdate(arg)
# (tzinfo) -> now, @ tzinfo.
elif isinstance(arg, tzinfo):
return self.type.now(arg)
# (str) -> parse.
elif isstr(arg):
warnings.warn(
"The .get() parsing method without a format string will parse more strictly in version 0.15.0."
"See https://github.com/crsmithdev/arrow/issues/612 for more details.",
ArrowParseWarning,
)
dt = parser.DateTimeParser(locale).parse_iso(arg)
return self.type.fromdatetime(dt, tz)
# (struct_time) -> from struct_time
elif isinstance(arg, struct_time):
return self.type.utcfromtimestamp(calendar.timegm(arg))
else:
raise TypeError(
"Can't parse single argument type of '{}'".format(type(arg))
)
elif arg_count == 2:
arg_1, arg_2 = args[0], args[1]
if isinstance(arg_1, datetime):
# (datetime, tzinfo/str) -> fromdatetime replace tzinfo.
if isinstance(arg_2, tzinfo) or isstr(arg_2):
return self.type.fromdatetime(arg_1, arg_2)
else:
raise TypeError(
"Can't parse two arguments of types 'datetime', '{}'".format(
type(arg_2)
)
)
elif isinstance(arg_1, date):
# (date, tzinfo/str) -> fromdate replace tzinfo.
if isinstance(arg_2, tzinfo) or isstr(arg_2):
return self.type.fromdate(arg_1, tzinfo=arg_2)
else:
raise TypeError(
"Can't parse two arguments of types 'date', '{}'".format(
type(arg_2)
)
)
# (str, format) -> parse.
elif isstr(arg_1) and (isstr(arg_2) or isinstance(arg_2, list)):
warnings.warn(
"The .get() parsing method with a format string will parse more strictly in version 0.15.0."
"See https://github.com/crsmithdev/arrow/issues/612 for more details.",
ArrowParseWarning,
)
dt = parser.DateTimeParser(locale).parse(args[0], args[1])
return self.type.fromdatetime(dt, tzinfo=tz)
else:
raise TypeError(
"Can't parse two arguments of types '{}', '{}'".format(
type(arg_1), type(arg_2)
)
)
# 3+ args -> datetime-like via constructor.
else:
return self.type(*args, **kwargs)
|
def get(self, *args, **kwargs):
"""Returns an :class:`Arrow <arrow.arrow.Arrow>` object based on flexible inputs.
:param locale: (optional) a ``str`` specifying a locale for the parser. Defaults to
'en_us'.
:param tzinfo: (optional) a :ref:`timezone expression <tz-expr>` or tzinfo object.
Replaces the timezone unless using an input form that is explicitly UTC or specifies
the timezone in a positional argument. Defaults to UTC.
Usage::
>>> import arrow
**No inputs** to get current UTC time::
>>> arrow.get()
<Arrow [2013-05-08T05:51:43.316458+00:00]>
**None** to also get current UTC time::
>>> arrow.get(None)
<Arrow [2013-05-08T05:51:49.016458+00:00]>
**One** :class:`Arrow <arrow.arrow.Arrow>` object, to get a copy.
>>> arw = arrow.utcnow()
>>> arrow.get(arw)
<Arrow [2013-10-23T15:21:54.354846+00:00]>
**One** ``str``, ``float``, or ``int``, convertible to a floating-point timestamp, to get
that timestamp in UTC::
>>> arrow.get(1367992474.293378)
<Arrow [2013-05-08T05:54:34.293378+00:00]>
>>> arrow.get(1367992474)
<Arrow [2013-05-08T05:54:34+00:00]>
>>> arrow.get('1367992474.293378')
<Arrow [2013-05-08T05:54:34.293378+00:00]>
>>> arrow.get('1367992474')
<Arrow [2013-05-08T05:54:34+00:00]>
**One** ISO-8601-formatted ``str``, to parse it::
>>> arrow.get('2013-09-29T01:26:43.830580')
<Arrow [2013-09-29T01:26:43.830580+00:00]>
**One** ``tzinfo``, to get the current time **converted** to that timezone::
>>> arrow.get(tz.tzlocal())
<Arrow [2013-05-07T22:57:28.484717-07:00]>
**One** naive ``datetime``, to get that datetime in UTC::
>>> arrow.get(datetime(2013, 5, 5))
<Arrow [2013-05-05T00:00:00+00:00]>
**One** aware ``datetime``, to get that datetime::
>>> arrow.get(datetime(2013, 5, 5, tzinfo=tz.tzlocal()))
<Arrow [2013-05-05T00:00:00-07:00]>
**One** naive ``date``, to get that date in UTC::
>>> arrow.get(date(2013, 5, 5))
<Arrow [2013-05-05T00:00:00+00:00]>
**Two** arguments, a naive or aware ``datetime``, and a replacement
:ref:`timezone expression <tz-expr>`::
>>> arrow.get(datetime(2013, 5, 5), 'US/Pacific')
<Arrow [2013-05-05T00:00:00-07:00]>
**Two** arguments, a naive ``date``, and a replacement
:ref:`timezone expression <tz-expr>`::
>>> arrow.get(date(2013, 5, 5), 'US/Pacific')
<Arrow [2013-05-05T00:00:00-07:00]>
**Two** arguments, both ``str``, to parse the first according to the format of the second::
>>> arrow.get('2013-05-05 12:30:45 America/Chicago', 'YYYY-MM-DD HH:mm:ss ZZZ')
<Arrow [2013-05-05T12:30:45-05:00]>
**Two** arguments, first a ``str`` to parse and second a ``list`` of formats to try::
>>> arrow.get('2013-05-05 12:30:45', ['MM/DD/YYYY', 'YYYY-MM-DD HH:mm:ss'])
<Arrow [2013-05-05T12:30:45+00:00]>
**Three or more** arguments, as for the constructor of a ``datetime``::
>>> arrow.get(2013, 5, 5, 12, 30, 45)
<Arrow [2013-05-05T12:30:45+00:00]>
**One** time.struct time::
>>> arrow.get(gmtime(0))
<Arrow [1970-01-01T00:00:00+00:00]>
"""
arg_count = len(args)
locale = kwargs.get("locale", "en_us")
tz = kwargs.get("tzinfo", None)
# if kwargs given, send to constructor unless only tzinfo provided
if len(kwargs) > 1:
arg_count = 3
# tzinfo kwarg is not provided
if len(kwargs) == 1 and tz is None:
arg_count = 3
# () -> now, @ utc.
if arg_count == 0:
if isinstance(tz, tzinfo):
return self.type.now(tz)
return self.type.utcnow()
if arg_count == 1:
arg = args[0]
# (None) -> now, @ utc.
if arg is None:
return self.type.utcnow()
# try (int, float, str(int), str(float)) -> utc, from timestamp.
if is_timestamp(arg):
return self.type.utcfromtimestamp(arg)
# (Arrow) -> from the object's datetime.
if isinstance(arg, Arrow):
return self.type.fromdatetime(arg.datetime)
# (datetime) -> from datetime.
if isinstance(arg, datetime):
return self.type.fromdatetime(arg)
# (date) -> from date.
if isinstance(arg, date):
return self.type.fromdate(arg)
# (tzinfo) -> now, @ tzinfo.
elif isinstance(arg, tzinfo):
return self.type.now(arg)
# (str) -> parse.
elif isstr(arg):
warnings.warn(
"The .get() parsing method without a format string will parse more strictly in version 0.15.0."
"See https://github.com/crsmithdev/arrow/issues/612 for more details.",
ArrowParseWarning,
)
dt = parser.DateTimeParser(locale).parse_iso(arg)
return self.type.fromdatetime(dt, tz)
# (struct_time) -> from struct_time
elif isinstance(arg, struct_time):
return self.type.utcfromtimestamp(calendar.timegm(arg))
else:
raise TypeError(
"Can't parse single argument type of '{}'".format(type(arg))
)
elif arg_count == 2:
arg_1, arg_2 = args[0], args[1]
if isinstance(arg_1, datetime):
# (datetime, tzinfo/str) -> fromdatetime replace tzinfo.
if isinstance(arg_2, tzinfo) or isstr(arg_2):
return self.type.fromdatetime(arg_1, arg_2)
else:
raise TypeError(
"Can't parse two arguments of types 'datetime', '{}'".format(
type(arg_2)
)
)
elif isinstance(arg_1, date):
# (date, tzinfo/str) -> fromdate replace tzinfo.
if isinstance(arg_2, tzinfo) or isstr(arg_2):
return self.type.fromdate(arg_1, tzinfo=arg_2)
else:
raise TypeError(
"Can't parse two arguments of types 'date', '{}'".format(
type(arg_2)
)
)
# (str, format) -> parse.
elif isstr(arg_1) and (isstr(arg_2) or isinstance(arg_2, list)):
warnings.warn(
"The .get() parsing method with a format string will parse more strictly in version 0.15.0."
"See https://github.com/crsmithdev/arrow/issues/612 for more details.",
ArrowParseWarning,
)
dt = parser.DateTimeParser(locale).parse(args[0], args[1])
return self.type.fromdatetime(dt, tzinfo=tz)
else:
raise TypeError(
"Can't parse two arguments of types '{}', '{}'".format(
type(arg_1), type(arg_2)
)
)
# 3+ args -> datetime-like via constructor.
else:
return self.type(*args, **kwargs)
|
https://github.com/arrow-py/arrow/issues/630
|
In [1]: import arrow
In [2]: arrow.get("2010", "YYYY", locale="fr_FR")
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-2-a09819f3b7b0> in <module>
----> 1 arrow.get("2010", "YYYY", locale="fr_FR")
lib/python3.6/site-packages/arrow/api.py in get(*args, **kwargs)
19 """
20
---> 21 return _factory.get(*args, **kwargs)
22
23
lib/python3.6/site-packages/arrow/factory.py in get(self, *args, **kwargs)
261 # 3+ args -> datetime-like via constructor.
262 else:
--> 263 return self.type(*args, **kwargs)
264
265 def utcnow(self):
TypeError: __init__() got an unexpected keyword argument 'locale'
In [3]: arrow.__version__
Out[3]: '0.14.4'
|
TypeError
|
def build_embeddings(opt, word_field, feat_fields, for_encoder=True):
"""
Args:
opt: the option in current environment.
word_dict(Vocab): words dictionary.
feature_dicts([Vocab], optional): a list of feature dictionary.
for_encoder(bool): build Embeddings for encoder or decoder?
"""
emb_dim = opt.src_word_vec_size if for_encoder else opt.tgt_word_vec_size
word_padding_idx = word_field.vocab.stoi[word_field.pad_token]
num_word_embeddings = len(word_field.vocab)
feat_pad_indices = [ff.vocab.stoi[ff.pad_token] for ff in feat_fields]
num_feat_embeddings = [len(ff.vocab) for ff in feat_fields]
fix_word_vecs = opt.fix_word_vecs_enc if for_encoder else opt.fix_word_vecs_dec
emb = Embeddings(
word_vec_size=emb_dim,
position_encoding=opt.position_encoding,
feat_merge=opt.feat_merge,
feat_vec_exponent=opt.feat_vec_exponent,
feat_vec_size=opt.feat_vec_size,
dropout=opt.dropout,
word_padding_idx=word_padding_idx,
feat_padding_idx=feat_pad_indices,
word_vocab_size=num_word_embeddings,
feat_vocab_sizes=num_feat_embeddings,
sparse=opt.optim == "sparseadam",
fix_word_vecs=fix_word_vecs,
)
return emb
|
def build_embeddings(opt, word_field, feat_fields, for_encoder=True):
"""
Args:
opt: the option in current environment.
word_dict(Vocab): words dictionary.
feature_dicts([Vocab], optional): a list of feature dictionary.
for_encoder(bool): build Embeddings for encoder or decoder?
"""
emb_dim = opt.src_word_vec_size if for_encoder else opt.tgt_word_vec_size
word_padding_idx = word_field.vocab.stoi[word_field.pad_token]
num_word_embeddings = len(word_field.vocab)
feat_pad_indices = [ff.vocab.stoi[ff.pad_token] for ff in feat_fields]
num_feat_embeddings = [len(ff.vocab) for ff in feat_fields]
emb = Embeddings(
word_vec_size=emb_dim,
position_encoding=opt.position_encoding,
feat_merge=opt.feat_merge,
feat_vec_exponent=opt.feat_vec_exponent,
feat_vec_size=opt.feat_vec_size,
dropout=opt.dropout,
word_padding_idx=word_padding_idx,
feat_padding_idx=feat_pad_indices,
word_vocab_size=num_word_embeddings,
feat_vocab_sizes=num_feat_embeddings,
sparse=opt.optim == "sparseadam",
)
return emb
|
https://github.com/OpenNMT/OpenNMT-py/issues/950
|
Traceback (most recent call last):
File "train.py", line 40, in <module>
main(opt)
File "train.py", line 27, in main
single_main(opt)
File "/home/rudra/OpenNMT-py/onmt/train_single.py", line 113, in main
optim = build_optim(model, opt, checkpoint)
File "/home/rudra/OpenNMT-py/onmt/utils/optimizers.py", line 62, in build_optim
optim.optimizer.load_state_dict(saved_optimizer_state_dict)
File "/home/rudra/miniconda3/envs/py36/lib/python3.6/site-packages/torch/optim/optimizer.py", line 107, in load_state_dict
raise ValueError("loaded state dict contains a parameter group "
ValueError: loaded state dict contains a parameter group that doesn't match the size of optimizer's group
|
ValueError
|
def build_base_model(model_opt, fields, gpu, checkpoint=None):
"""
Args:
model_opt: the option loaded from checkpoint.
fields: `Field` objects for the model.
gpu(bool): whether to use gpu.
checkpoint: the model gnerated by train phase, or a resumed snapshot
model from a stopped training.
Returns:
the NMTModel.
"""
assert model_opt.model_type in ["text", "img", "audio"], (
"Unsupported model type %s" % model_opt.model_type
)
# for backward compatibility
if model_opt.rnn_size != -1:
model_opt.enc_rnn_size = model_opt.rnn_size
model_opt.dec_rnn_size = model_opt.rnn_size
# Build encoder.
if model_opt.model_type == "text":
src_fields = [f for n, f in fields["src"]]
src_emb = build_embeddings(model_opt, src_fields[0], src_fields[1:])
encoder = build_encoder(model_opt, src_emb)
elif model_opt.model_type == "img":
# why is build_encoder not used here?
# why is the model_opt.__dict__ check necessary?
if "image_channel_size" not in model_opt.__dict__:
image_channel_size = 3
else:
image_channel_size = model_opt.image_channel_size
encoder = ImageEncoder(
model_opt.enc_layers,
model_opt.brnn,
model_opt.enc_rnn_size,
model_opt.dropout,
image_channel_size,
)
elif model_opt.model_type == "audio":
encoder = AudioEncoder(
model_opt.rnn_type,
model_opt.enc_layers,
model_opt.dec_layers,
model_opt.brnn,
model_opt.enc_rnn_size,
model_opt.dec_rnn_size,
model_opt.audio_enc_pooling,
model_opt.dropout,
model_opt.sample_rate,
model_opt.window_size,
)
# Build decoder.
tgt_fields = [f for n, f in fields["tgt"]]
tgt_emb = build_embeddings(
model_opt, tgt_fields[0], tgt_fields[1:], for_encoder=False
)
# Share the embedding matrix - preprocess with share_vocab required.
if model_opt.share_embeddings:
# src/tgt vocab should be the same if `-share_vocab` is specified.
assert src_fields[0].vocab == tgt_fields[0].vocab, (
"preprocess with -share_vocab if you use share_embeddings"
)
tgt_emb.word_lut.weight = src_emb.word_lut.weight
decoder = build_decoder(model_opt, tgt_emb)
# Build NMTModel(= encoder + decoder).
device = torch.device("cuda" if gpu else "cpu")
model = onmt.models.NMTModel(encoder, decoder)
# Build Generator.
if not model_opt.copy_attn:
if model_opt.generator_function == "sparsemax":
gen_func = onmt.modules.sparse_activations.LogSparsemax(dim=-1)
else:
gen_func = nn.LogSoftmax(dim=-1)
generator = nn.Sequential(
nn.Linear(model_opt.dec_rnn_size, len(fields["tgt"][0][1].vocab)), gen_func
)
if model_opt.share_decoder_embeddings:
generator[0].weight = decoder.embeddings.word_lut.weight
else:
vocab_size = len(fields["tgt"][0][1].vocab)
pad_idx = fields["tgt"][0][1].vocab.stoi[fields["tgt"][0][1].pad_token]
generator = CopyGenerator(model_opt.dec_rnn_size, vocab_size, pad_idx)
# Load the model states from checkpoint or initialize them.
if checkpoint is not None:
# This preserves backward-compat for models using customed layernorm
def fix_key(s):
s = re.sub(r"(.*)\.layer_norm((_\d+)?)\.b_2", r"\1.layer_norm\2.bias", s)
s = re.sub(r"(.*)\.layer_norm((_\d+)?)\.a_2", r"\1.layer_norm\2.weight", s)
return s
checkpoint["model"] = {fix_key(k): v for k, v in checkpoint["model"].items()}
# end of patch for backward compatibility
model.load_state_dict(checkpoint["model"], strict=False)
generator.load_state_dict(checkpoint["generator"], strict=False)
else:
if model_opt.param_init != 0.0:
for p in model.parameters():
p.data.uniform_(-model_opt.param_init, model_opt.param_init)
for p in generator.parameters():
p.data.uniform_(-model_opt.param_init, model_opt.param_init)
if model_opt.param_init_glorot:
for p in model.parameters():
if p.dim() > 1:
xavier_uniform_(p)
for p in generator.parameters():
if p.dim() > 1:
xavier_uniform_(p)
if hasattr(model.encoder, "embeddings"):
model.encoder.embeddings.load_pretrained_vectors(
model_opt.pre_word_vecs_enc
)
if hasattr(model.decoder, "embeddings"):
model.decoder.embeddings.load_pretrained_vectors(
model_opt.pre_word_vecs_dec
)
model.generator = generator
model.to(device)
return model
|
def build_base_model(model_opt, fields, gpu, checkpoint=None):
"""
Args:
model_opt: the option loaded from checkpoint.
fields: `Field` objects for the model.
gpu(bool): whether to use gpu.
checkpoint: the model gnerated by train phase, or a resumed snapshot
model from a stopped training.
Returns:
the NMTModel.
"""
assert model_opt.model_type in ["text", "img", "audio"], (
"Unsupported model type %s" % model_opt.model_type
)
# for backward compatibility
if model_opt.rnn_size != -1:
model_opt.enc_rnn_size = model_opt.rnn_size
model_opt.dec_rnn_size = model_opt.rnn_size
# Build encoder.
if model_opt.model_type == "text":
src_fields = [f for n, f in fields["src"]]
src_emb = build_embeddings(model_opt, src_fields[0], src_fields[1:])
encoder = build_encoder(model_opt, src_emb)
elif model_opt.model_type == "img":
# why is build_encoder not used here?
# why is the model_opt.__dict__ check necessary?
if "image_channel_size" not in model_opt.__dict__:
image_channel_size = 3
else:
image_channel_size = model_opt.image_channel_size
encoder = ImageEncoder(
model_opt.enc_layers,
model_opt.brnn,
model_opt.enc_rnn_size,
model_opt.dropout,
image_channel_size,
)
elif model_opt.model_type == "audio":
encoder = AudioEncoder(
model_opt.rnn_type,
model_opt.enc_layers,
model_opt.dec_layers,
model_opt.brnn,
model_opt.enc_rnn_size,
model_opt.dec_rnn_size,
model_opt.audio_enc_pooling,
model_opt.dropout,
model_opt.sample_rate,
model_opt.window_size,
)
# Build decoder.
tgt_fields = [f for n, f in fields["tgt"]]
tgt_emb = build_embeddings(
model_opt, tgt_fields[0], tgt_fields[1:], for_encoder=False
)
# Share the embedding matrix - preprocess with share_vocab required.
if model_opt.share_embeddings:
# src/tgt vocab should be the same if `-share_vocab` is specified.
assert src_fields[0].vocab == tgt_fields[0].vocab, (
"preprocess with -share_vocab if you use share_embeddings"
)
tgt_emb.word_lut.weight = src_emb.word_lut.weight
decoder = build_decoder(model_opt, tgt_emb)
# Build NMTModel(= encoder + decoder).
device = torch.device("cuda" if gpu else "cpu")
model = onmt.models.NMTModel(encoder, decoder)
# Build Generator.
if not model_opt.copy_attn:
if model_opt.generator_function == "sparsemax":
gen_func = onmt.modules.sparse_activations.LogSparsemax(dim=-1)
else:
gen_func = nn.LogSoftmax(dim=-1)
generator = nn.Sequential(
nn.Linear(model_opt.dec_rnn_size, len(fields["tgt"][0][1].vocab)), gen_func
)
if model_opt.share_decoder_embeddings:
generator[0].weight = decoder.embeddings.word_lut.weight
else:
vocab_size = len(fields["tgt"][0][1].vocab)
pad_idx = fields["tgt"][0][1].vocab.stoi[fields["tgt"][0][1].pad_token]
generator = CopyGenerator(model_opt.dec_rnn_size, vocab_size, pad_idx)
# Load the model states from checkpoint or initialize them.
if checkpoint is not None:
# This preserves backward-compat for models using customed layernorm
def fix_key(s):
s = re.sub(r"(.*)\.layer_norm((_\d+)?)\.b_2", r"\1.layer_norm\2.bias", s)
s = re.sub(r"(.*)\.layer_norm((_\d+)?)\.a_2", r"\1.layer_norm\2.weight", s)
return s
checkpoint["model"] = {fix_key(k): v for k, v in checkpoint["model"].items()}
# end of patch for backward compatibility
model.load_state_dict(checkpoint["model"], strict=False)
generator.load_state_dict(checkpoint["generator"], strict=False)
else:
if model_opt.param_init != 0.0:
for p in model.parameters():
p.data.uniform_(-model_opt.param_init, model_opt.param_init)
for p in generator.parameters():
p.data.uniform_(-model_opt.param_init, model_opt.param_init)
if model_opt.param_init_glorot:
for p in model.parameters():
if p.dim() > 1:
xavier_uniform_(p)
for p in generator.parameters():
if p.dim() > 1:
xavier_uniform_(p)
if hasattr(model.encoder, "embeddings"):
model.encoder.embeddings.load_pretrained_vectors(
model_opt.pre_word_vecs_enc, model_opt.fix_word_vecs_enc
)
if hasattr(model.decoder, "embeddings"):
model.decoder.embeddings.load_pretrained_vectors(
model_opt.pre_word_vecs_dec, model_opt.fix_word_vecs_dec
)
model.generator = generator
model.to(device)
return model
|
https://github.com/OpenNMT/OpenNMT-py/issues/950
|
Traceback (most recent call last):
File "train.py", line 40, in <module>
main(opt)
File "train.py", line 27, in main
single_main(opt)
File "/home/rudra/OpenNMT-py/onmt/train_single.py", line 113, in main
optim = build_optim(model, opt, checkpoint)
File "/home/rudra/OpenNMT-py/onmt/utils/optimizers.py", line 62, in build_optim
optim.optimizer.load_state_dict(saved_optimizer_state_dict)
File "/home/rudra/miniconda3/envs/py36/lib/python3.6/site-packages/torch/optim/optimizer.py", line 107, in load_state_dict
raise ValueError("loaded state dict contains a parameter group "
ValueError: loaded state dict contains a parameter group that doesn't match the size of optimizer's group
|
ValueError
|
def __init__(
self,
word_vec_size,
word_vocab_size,
word_padding_idx,
position_encoding=False,
feat_merge="concat",
feat_vec_exponent=0.7,
feat_vec_size=-1,
feat_padding_idx=[],
feat_vocab_sizes=[],
dropout=0,
sparse=False,
fix_word_vecs=False,
):
if feat_padding_idx is None:
feat_padding_idx = []
self.word_padding_idx = word_padding_idx
self.word_vec_size = word_vec_size
# Dimensions and padding for constructing the word embedding matrix
vocab_sizes = [word_vocab_size]
emb_dims = [word_vec_size]
pad_indices = [word_padding_idx]
# Dimensions and padding for feature embedding matrices
# (these have no effect if feat_vocab_sizes is empty)
if feat_merge == "sum":
feat_dims = [word_vec_size] * len(feat_vocab_sizes)
elif feat_vec_size > 0:
feat_dims = [feat_vec_size] * len(feat_vocab_sizes)
else:
feat_dims = [int(vocab**feat_vec_exponent) for vocab in feat_vocab_sizes]
vocab_sizes.extend(feat_vocab_sizes)
emb_dims.extend(feat_dims)
pad_indices.extend(feat_padding_idx)
# The embedding matrix look-up tables. The first look-up table
# is for words. Subsequent ones are for features, if any exist.
emb_params = zip(vocab_sizes, emb_dims, pad_indices)
embeddings = [
nn.Embedding(vocab, dim, padding_idx=pad, sparse=sparse)
for vocab, dim, pad in emb_params
]
emb_luts = Elementwise(feat_merge, embeddings)
# The final output size of word + feature vectors. This can vary
# from the word vector size if and only if features are defined.
# This is the attribute you should access if you need to know
# how big your embeddings are going to be.
self.embedding_size = sum(emb_dims) if feat_merge == "concat" else word_vec_size
# The sequence of operations that converts the input sequence
# into a sequence of embeddings. At minimum this consists of
# looking up the embeddings for each word and feature in the
# input. Model parameters may require the sequence to contain
# additional operations as well.
super(Embeddings, self).__init__()
self.make_embedding = nn.Sequential()
self.make_embedding.add_module("emb_luts", emb_luts)
if feat_merge == "mlp" and len(feat_vocab_sizes) > 0:
in_dim = sum(emb_dims)
out_dim = word_vec_size
mlp = nn.Sequential(nn.Linear(in_dim, out_dim), nn.ReLU())
self.make_embedding.add_module("mlp", mlp)
self.position_encoding = position_encoding
if self.position_encoding:
pe = PositionalEncoding(dropout, self.embedding_size)
self.make_embedding.add_module("pe", pe)
if fix_word_vecs:
self.word_lut.weight.requires_grad = False
|
def __init__(
self,
word_vec_size,
word_vocab_size,
word_padding_idx,
position_encoding=False,
feat_merge="concat",
feat_vec_exponent=0.7,
feat_vec_size=-1,
feat_padding_idx=[],
feat_vocab_sizes=[],
dropout=0,
sparse=False,
):
if feat_padding_idx is None:
feat_padding_idx = []
self.word_padding_idx = word_padding_idx
self.word_vec_size = word_vec_size
# Dimensions and padding for constructing the word embedding matrix
vocab_sizes = [word_vocab_size]
emb_dims = [word_vec_size]
pad_indices = [word_padding_idx]
# Dimensions and padding for feature embedding matrices
# (these have no effect if feat_vocab_sizes is empty)
if feat_merge == "sum":
feat_dims = [word_vec_size] * len(feat_vocab_sizes)
elif feat_vec_size > 0:
feat_dims = [feat_vec_size] * len(feat_vocab_sizes)
else:
feat_dims = [int(vocab**feat_vec_exponent) for vocab in feat_vocab_sizes]
vocab_sizes.extend(feat_vocab_sizes)
emb_dims.extend(feat_dims)
pad_indices.extend(feat_padding_idx)
# The embedding matrix look-up tables. The first look-up table
# is for words. Subsequent ones are for features, if any exist.
emb_params = zip(vocab_sizes, emb_dims, pad_indices)
embeddings = [
nn.Embedding(vocab, dim, padding_idx=pad, sparse=sparse)
for vocab, dim, pad in emb_params
]
emb_luts = Elementwise(feat_merge, embeddings)
# The final output size of word + feature vectors. This can vary
# from the word vector size if and only if features are defined.
# This is the attribute you should access if you need to know
# how big your embeddings are going to be.
self.embedding_size = sum(emb_dims) if feat_merge == "concat" else word_vec_size
# The sequence of operations that converts the input sequence
# into a sequence of embeddings. At minimum this consists of
# looking up the embeddings for each word and feature in the
# input. Model parameters may require the sequence to contain
# additional operations as well.
super(Embeddings, self).__init__()
self.make_embedding = nn.Sequential()
self.make_embedding.add_module("emb_luts", emb_luts)
if feat_merge == "mlp" and len(feat_vocab_sizes) > 0:
in_dim = sum(emb_dims)
out_dim = word_vec_size
mlp = nn.Sequential(nn.Linear(in_dim, out_dim), nn.ReLU())
self.make_embedding.add_module("mlp", mlp)
self.position_encoding = position_encoding
if self.position_encoding:
pe = PositionalEncoding(dropout, self.embedding_size)
self.make_embedding.add_module("pe", pe)
|
https://github.com/OpenNMT/OpenNMT-py/issues/950
|
Traceback (most recent call last):
File "train.py", line 40, in <module>
main(opt)
File "train.py", line 27, in main
single_main(opt)
File "/home/rudra/OpenNMT-py/onmt/train_single.py", line 113, in main
optim = build_optim(model, opt, checkpoint)
File "/home/rudra/OpenNMT-py/onmt/utils/optimizers.py", line 62, in build_optim
optim.optimizer.load_state_dict(saved_optimizer_state_dict)
File "/home/rudra/miniconda3/envs/py36/lib/python3.6/site-packages/torch/optim/optimizer.py", line 107, in load_state_dict
raise ValueError("loaded state dict contains a parameter group "
ValueError: loaded state dict contains a parameter group that doesn't match the size of optimizer's group
|
ValueError
|
def load_pretrained_vectors(self, emb_file):
"""Load in pretrained embeddings.
Args:
emb_file (str) : path to torch serialized embeddings
fixed (bool) : if true, embeddings are not updated
"""
if emb_file:
pretrained = torch.load(emb_file)
pretrained_vec_size = pretrained.size(1)
if self.word_vec_size > pretrained_vec_size:
self.word_lut.weight.data[:, :pretrained_vec_size] = pretrained
elif self.word_vec_size < pretrained_vec_size:
self.word_lut.weight.data.copy_(pretrained[:, : self.word_vec_size])
else:
self.word_lut.weight.data.copy_(pretrained)
|
def load_pretrained_vectors(self, emb_file, fixed):
"""Load in pretrained embeddings.
Args:
emb_file (str) : path to torch serialized embeddings
fixed (bool) : if true, embeddings are not updated
"""
if emb_file:
pretrained = torch.load(emb_file)
pretrained_vec_size = pretrained.size(1)
if self.word_vec_size > pretrained_vec_size:
self.word_lut.weight.data[:, :pretrained_vec_size] = pretrained
elif self.word_vec_size < pretrained_vec_size:
self.word_lut.weight.data.copy_(pretrained[:, : self.word_vec_size])
else:
self.word_lut.weight.data.copy_(pretrained)
if fixed:
self.word_lut.weight.requires_grad = False
|
https://github.com/OpenNMT/OpenNMT-py/issues/950
|
Traceback (most recent call last):
File "train.py", line 40, in <module>
main(opt)
File "train.py", line 27, in main
single_main(opt)
File "/home/rudra/OpenNMT-py/onmt/train_single.py", line 113, in main
optim = build_optim(model, opt, checkpoint)
File "/home/rudra/OpenNMT-py/onmt/utils/optimizers.py", line 62, in build_optim
optim.optimizer.load_state_dict(saved_optimizer_state_dict)
File "/home/rudra/miniconda3/envs/py36/lib/python3.6/site-packages/torch/optim/optimizer.py", line 107, in load_state_dict
raise ValueError("loaded state dict contains a parameter group "
ValueError: loaded state dict contains a parameter group that doesn't match the size of optimizer's group
|
ValueError
|
def __iter__(self):
"""
Iterator of (example_dict, nfeats).
On each call, it iterates over as many (example_dict, nfeats) tuples
until this shard's size equals to or approximates `self.shard_size`.
"""
iteration_index = -1
if self.assoc_iter is not None:
# We have associate iterator, just yields tuples
# util we run parallel with it.
while self.line_index < self.assoc_iter.line_index:
line = self.corpus.readline()
if line == "":
raise AssertionError("Two corpuses must have same number of lines!")
self.line_index += 1
iteration_index += 1
yield self._example_dict_iter(line, iteration_index)
if self.assoc_iter.eof:
self.eof = True
self.corpus.close()
else:
# Yield tuples util this shard's size reaches the threshold.
self.corpus.seek(self.last_pos)
while True:
if self.shard_size != 0 and self.line_index % 64 == 0:
# This part of check is time consuming on Py2 (but
# it is quite fast on Py3, weird!). So we don't bother
# to check for very line. Instead we chekc every 64
# lines. Thus we are not dividing exactly per
# `shard_size`, but it is not too much difference.
cur_pos = self.corpus.tell()
if cur_pos >= self.last_pos + self.shard_size:
self.last_pos = cur_pos
return
line = self.corpus.readline()
if line == "":
self.eof = True
self.corpus.close()
return
self.line_index += 1
iteration_index += 1
yield self._example_dict_iter(line, iteration_index)
|
def __iter__(self):
"""
Iterator of (example_dict, nfeats).
On each call, it iterates over as many (example_dict, nfeats) tuples
until this shard's size equals to or approximates `self.shard_size`.
"""
iteration_index = -1
if self.assoc_iter is not None:
# We have associate iterator, just yields tuples
# util we run parallel with it.
while self.line_index < self.assoc_iter.line_index:
line = self.corpus.readline()
if line == "":
raise AssertionError("Two corpuses must have same number of lines!")
self.line_index += 1
iteration_index += 1
yield self._example_dict_iter(line, iteration_index)
if self.assoc_iter.eof:
self.eof = True
self.corpus.close()
else:
# Yield tuples util this shard's size reaches the threshold.
self.corpus.seek(self.last_pos)
while True:
if self.shard_size != 0 and self.line_index % 64 == 0:
# This part of check is time consuming on Py2 (but
# it is quite fast on Py3, weird!). So we don't bother
# to check for very line. Instead we chekc every 64
# lines. Thus we are not dividing exactly per
# `shard_size`, but it is not too much difference.
cur_pos = self.corpus.tell()
if cur_pos >= self.last_pos + self.shard_size:
self.last_pos = cur_pos
raise StopIteration
line = self.corpus.readline()
if line == "":
self.eof = True
self.corpus.close()
raise StopIteration
self.line_index += 1
iteration_index += 1
yield self._example_dict_iter(line, iteration_index)
|
https://github.com/OpenNMT/OpenNMT-py/issues/911
|
Traceback (most recent call last):
File "/mnt/cephfs2/asr/users/zewei.chu/SQuAD/round-trip-translation/OpenNMT-py/onmt/inputters/text_dataset.py", line 384, in __iter__
raise StopIteration
StopIteration
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "preprocess.py", line 211, in <module>
main()
File "preprocess.py", line 201, in main
train_dataset_files = build_save_dataset('train', fields, opt)
File "preprocess.py", line 138, in build_save_dataset
corpus_type, opt)
File "preprocess.py", line 107, in build_save_in_shards
dynamic_dict=opt.dynamic_dict)
File "/mnt/cephfs2/asr/users/zewei.chu/SQuAD/round-trip-translation/OpenNMT-py/onmt/inputters/text_dataset.py", line 79, in __init__
for ex_values in example_values:
File "/mnt/cephfs2/asr/users/zewei.chu/SQuAD/round-trip-translation/OpenNMT-py/onmt/inputters/text_dataset.py", line 71, in <genexpr>
example_values = ([ex[k] for k in keys] for ex in examples_iter)
File "/mnt/cephfs2/asr/users/zewei.chu/SQuAD/round-trip-translation/OpenNMT-py/onmt/inputters/text_dataset.py", line 57, in <genexpr>
examples_iter = (self._join_dicts(src, tgt) for src, tgt in
RuntimeError: generator raised StopIteration
|
RuntimeError
|
def make_base_model(model_opt, fields, gpu, checkpoint=None):
"""
Args:
model_opt: the option loaded from checkpoint.
fields: `Field` objects for the model.
gpu(bool): whether to use gpu.
checkpoint: the model gnerated by train phase, or a resumed snapshot
model from a stopped training.
Returns:
the NMTModel.
"""
assert model_opt.model_type in ["text", "img"], "Unsupported model type %s" % (
model_opt.model_type
)
# Make encoder.
if model_opt.model_type == "text":
src_dict = fields["src"].vocab
feature_dicts = onmt.IO.collect_feature_dicts(fields, "src")
src_embeddings = make_embeddings(model_opt, src_dict, feature_dicts)
encoder = make_encoder(model_opt, src_embeddings)
else:
encoder = ImageEncoder(
model_opt.layers, model_opt.brnn, model_opt.rnn_size, model_opt.dropout
)
# Make decoder.
tgt_dict = fields["tgt"].vocab
# TODO: prepare for a future where tgt features are possible.
feature_dicts = onmt.IO.collect_feature_dicts(fields, "tgt")
tgt_embeddings = make_embeddings(
model_opt, tgt_dict, feature_dicts, for_encoder=False
)
# Share the embedding matrix - preprocess with share_vocab required
if model_opt.share_embeddings:
tgt_embeddings.word_lut.weight = src_embeddings.word_lut.weight
decoder = make_decoder(model_opt, tgt_embeddings)
# Make NMTModel(= encoder + decoder).
model = NMTModel(encoder, decoder)
model.opt = model_opt
# Make Generator.
if not model_opt.copy_attn:
generator = nn.Sequential(
nn.Linear(model_opt.rnn_size, len(fields["tgt"].vocab)), nn.LogSoftmax()
)
if model_opt.share_decoder_embeddings:
generator[0].weight = decoder.embeddings.word_lut.weight
else:
generator = CopyGenerator(model_opt, fields["src"].vocab, fields["tgt"].vocab)
# Load the model states from checkpoint or initialize them.
if checkpoint is not None:
print("Loading model parameters.")
model.load_state_dict(checkpoint["model"])
generator.load_state_dict(checkpoint["generator"])
else:
if model_opt.param_init != 0.0:
print("Intializing model parameters.")
for p in model.parameters():
p.data.uniform_(-model_opt.param_init, model_opt.param_init)
for p in generator.parameters():
p.data.uniform_(-model_opt.param_init, model_opt.param_init)
model.encoder.embeddings.load_pretrained_vectors(
model_opt.pre_word_vecs_enc, model_opt.fix_word_vecs_enc
)
model.decoder.embeddings.load_pretrained_vectors(
model_opt.pre_word_vecs_dec, model_opt.fix_word_vecs_dec
)
# Add generator to model (this registers it as parameter of model).
model.generator = generator
# Make the whole model leverage GPU if indicated to do so.
if gpu:
model.cuda()
else:
model.cpu()
return model
|
def make_base_model(model_opt, fields, gpu, checkpoint=None):
"""
Args:
model_opt: the option loaded from checkpoint.
fields: `Field` objects for the model.
gpu(bool): whether to use gpu.
checkpoint: the model gnerated by train phase, or a resumed snapshot
model from a stopped training.
Returns:
the NMTModel.
"""
assert model_opt.model_type in ["text", "img"], "Unsupported model type %s" % (
model_opt.model_type
)
# Make encoder.
if model_opt.model_type == "text":
src_dict = fields["src"].vocab
feature_dicts = onmt.IO.collect_feature_dicts(fields, "src")
src_embeddings = make_embeddings(model_opt, src_dict, feature_dicts)
encoder = make_encoder(model_opt, src_embeddings)
else:
encoder = ImageEncoder(
model_opt.layers, model_opt.brnn, model_opt.rnn_size, model_opt.dropout
)
# Make decoder.
tgt_dict = fields["tgt"].vocab
# TODO: prepare for a future where tgt features are possible.
feature_dicts = onmt.IO.collect_feature_dicts(fields, "tgt")
tgt_embeddings = make_embeddings(
model_opt, tgt_dict, feature_dicts, for_encoder=False
)
# Share the embedding matrix - preprocess with share_vocab required
if model_opt.share_embeddings:
tgt_embeddings.word_lut.weight = src_embeddings.word_lut.weight
decoder = make_decoder(model_opt, tgt_embeddings)
# Make NMTModel(= encoder + decoder).
model = NMTModel(encoder, decoder)
# Make Generator.
if not model_opt.copy_attn:
generator = nn.Sequential(
nn.Linear(model_opt.rnn_size, len(fields["tgt"].vocab)), nn.LogSoftmax()
)
if model_opt.share_decoder_embeddings:
generator[0].weight = decoder.embeddings.word_lut.weight
else:
generator = CopyGenerator(model_opt, fields["src"].vocab, fields["tgt"].vocab)
# Load the model states from checkpoint or initialize them.
if checkpoint is not None:
print("Loading model parameters.")
model.load_state_dict(checkpoint["model"])
generator.load_state_dict(checkpoint["generator"])
else:
if model_opt.param_init != 0.0:
print("Intializing model parameters.")
for p in model.parameters():
p.data.uniform_(-model_opt.param_init, model_opt.param_init)
for p in generator.parameters():
p.data.uniform_(-model_opt.param_init, model_opt.param_init)
model.encoder.embeddings.load_pretrained_vectors(
model_opt.pre_word_vecs_enc, model_opt.fix_word_vecs_enc
)
model.decoder.embeddings.load_pretrained_vectors(
model_opt.pre_word_vecs_dec, model_opt.fix_word_vecs_dec
)
# Add generator to model (this registers it as parameter of model).
model.generator = generator
# Make the whole model leverage GPU if indicated to do so.
if gpu:
model.cuda()
else:
model.cpu()
return model
|
https://github.com/OpenNMT/OpenNMT-py/issues/394
|
./run.sh translate
[...]
Loading model parameters.
While copying the parameter named encoder.embeddings.make_embedding.emb_luts.0.weight, whose dimensions in the model are torch.Size([22, 500]) and whose dimensions in the checkpoint are torch.Size([22, 100]), ...
Traceback (most recent call last):
File "translate.py", line 133, in <module>
main()
File "translate.py", line 55, in main
translator = onmt.Translator(opt, dummy_opt.__dict__)
File "/home/pltrdy/pytorchwork/OpenNMT-py/onmt/Translator.py", line 29, in __init__
model_opt, self.fields, use_gpu(opt), checkpoint)
File "/home/pltrdy/pytorchwork/OpenNMT-py/onmt/ModelConstructor.py", line 166, in make_base_model
model.load_state_dict(checkpoint['model'])
File "/home/pltrdy/anaconda3/lib/python3.6/site-packages/torch/nn/modules/module.py", line 360, in load_state_dict
own_state[name].copy_(param)
RuntimeError: inconsistent tensor size, expected tensor [22 x 500] and src [22 x 100] to have the same number of elements, but got 11000 and 2200 elements respectively at /home/pltrdy/pytorch/torch/lib/TH/generic/THTensorCopy.c:86
|
RuntimeError
|
def train_model(model, train_data, valid_data, fields, optim):
train_iter = make_train_data_iter(train_data, opt)
valid_iter = make_valid_data_iter(valid_data, opt)
train_loss = make_loss_compute(model, fields["tgt"].vocab, train_data, opt)
valid_loss = make_loss_compute(model, fields["tgt"].vocab, valid_data, opt)
trunc_size = opt.truncated_decoder # Badly named...
shard_size = opt.max_generator_batches
trainer = onmt.Trainer(
model,
train_iter,
valid_iter,
train_loss,
valid_loss,
optim,
trunc_size,
shard_size,
)
for epoch in range(opt.start_epoch, opt.epochs + 1):
print("")
# 1. Train for one epoch on the training set.
train_stats = trainer.train(epoch, report_func)
print("Train perplexity: %g" % train_stats.ppl())
print("Train accuracy: %g" % train_stats.accuracy())
# 2. Validate on the validation set.
valid_stats = trainer.validate()
print("Validation perplexity: %g" % valid_stats.ppl())
print("Validation accuracy: %g" % valid_stats.accuracy())
# 3. Log to remote server.
if opt.exp_host:
train_stats.log("train", experiment, optim.lr)
valid_stats.log("valid", experiment, optim.lr)
# 4. Update the learning rate
trainer.epoch_step(valid_stats.ppl(), epoch)
# 5. Drop a checkpoint if needed.
if epoch >= opt.start_checkpoint_at:
trainer.drop_checkpoint(model.opt, epoch, fields, valid_stats)
|
def train_model(model, train_data, valid_data, fields, optim):
train_iter = make_train_data_iter(train_data, opt)
valid_iter = make_valid_data_iter(valid_data, opt)
train_loss = make_loss_compute(model, fields["tgt"].vocab, train_data, opt)
valid_loss = make_loss_compute(model, fields["tgt"].vocab, valid_data, opt)
trunc_size = opt.truncated_decoder # Badly named...
shard_size = opt.max_generator_batches
trainer = onmt.Trainer(
model,
train_iter,
valid_iter,
train_loss,
valid_loss,
optim,
trunc_size,
shard_size,
)
for epoch in range(opt.start_epoch, opt.epochs + 1):
print("")
# 1. Train for one epoch on the training set.
train_stats = trainer.train(epoch, report_func)
print("Train perplexity: %g" % train_stats.ppl())
print("Train accuracy: %g" % train_stats.accuracy())
# 2. Validate on the validation set.
valid_stats = trainer.validate()
print("Validation perplexity: %g" % valid_stats.ppl())
print("Validation accuracy: %g" % valid_stats.accuracy())
# 3. Log to remote server.
if opt.exp_host:
train_stats.log("train", experiment, optim.lr)
valid_stats.log("valid", experiment, optim.lr)
# 4. Update the learning rate
trainer.epoch_step(valid_stats.ppl(), epoch)
# 5. Drop a checkpoint if needed.
if epoch >= opt.start_checkpoint_at:
trainer.drop_checkpoint(opt, epoch, fields, valid_stats)
|
https://github.com/OpenNMT/OpenNMT-py/issues/394
|
./run.sh translate
[...]
Loading model parameters.
While copying the parameter named encoder.embeddings.make_embedding.emb_luts.0.weight, whose dimensions in the model are torch.Size([22, 500]) and whose dimensions in the checkpoint are torch.Size([22, 100]), ...
Traceback (most recent call last):
File "translate.py", line 133, in <module>
main()
File "translate.py", line 55, in main
translator = onmt.Translator(opt, dummy_opt.__dict__)
File "/home/pltrdy/pytorchwork/OpenNMT-py/onmt/Translator.py", line 29, in __init__
model_opt, self.fields, use_gpu(opt), checkpoint)
File "/home/pltrdy/pytorchwork/OpenNMT-py/onmt/ModelConstructor.py", line 166, in make_base_model
model.load_state_dict(checkpoint['model'])
File "/home/pltrdy/anaconda3/lib/python3.6/site-packages/torch/nn/modules/module.py", line 360, in load_state_dict
own_state[name].copy_(param)
RuntimeError: inconsistent tensor size, expected tensor [22 x 500] and src [22 x 100] to have the same number of elements, but got 11000 and 2200 elements respectively at /home/pltrdy/pytorch/torch/lib/TH/generic/THTensorCopy.c:86
|
RuntimeError
|
def train_model(model, train, valid, fields, optim, model_opt):
train_iter = make_train_data_iter(train, opt)
valid_iter = make_valid_data_iter(valid, opt)
train_loss = make_loss_compute(model, fields["tgt"].vocab, train, opt)
valid_loss = make_loss_compute(model, fields["tgt"].vocab, valid, opt)
trunc_size = opt.truncated_decoder # Badly named...
shard_size = opt.max_generator_batches
trainer = onmt.Trainer(
model,
train_iter,
valid_iter,
train_loss,
valid_loss,
optim,
trunc_size,
shard_size,
train.data_type,
)
for epoch in range(opt.start_epoch, opt.epochs + 1):
print("")
# 1. Train for one epoch on the training set.
train_stats = trainer.train(epoch, report_func)
print("Train perplexity: %g" % train_stats.ppl())
print("Train accuracy: %g" % train_stats.accuracy())
# 2. Validate on the validation set.
valid_stats = trainer.validate()
print("Validation perplexity: %g" % valid_stats.ppl())
print("Validation accuracy: %g" % valid_stats.accuracy())
# 3. Log to remote server.
if opt.exp_host:
train_stats.log("train", experiment, optim.lr)
valid_stats.log("valid", experiment, optim.lr)
# 4. Update the learning rate
trainer.epoch_step(valid_stats.ppl(), epoch)
# 5. Drop a checkpoint if needed.
if epoch >= opt.start_checkpoint_at:
trainer.drop_checkpoint(model_opt, epoch, fields, valid_stats)
|
def train_model(model, train, valid, fields, optim):
train_iter = make_train_data_iter(train, opt)
valid_iter = make_valid_data_iter(valid, opt)
train_loss = make_loss_compute(model, fields["tgt"].vocab, train, opt)
valid_loss = make_loss_compute(model, fields["tgt"].vocab, valid, opt)
trunc_size = opt.truncated_decoder # Badly named...
shard_size = opt.max_generator_batches
trainer = onmt.Trainer(
model,
train_iter,
valid_iter,
train_loss,
valid_loss,
optim,
trunc_size,
shard_size,
train.data_type,
)
for epoch in range(opt.start_epoch, opt.epochs + 1):
print("")
# 1. Train for one epoch on the training set.
train_stats = trainer.train(epoch, report_func)
print("Train perplexity: %g" % train_stats.ppl())
print("Train accuracy: %g" % train_stats.accuracy())
# 2. Validate on the validation set.
valid_stats = trainer.validate()
print("Validation perplexity: %g" % valid_stats.ppl())
print("Validation accuracy: %g" % valid_stats.accuracy())
# 3. Log to remote server.
if opt.exp_host:
train_stats.log("train", experiment, optim.lr)
valid_stats.log("valid", experiment, optim.lr)
# 4. Update the learning rate
trainer.epoch_step(valid_stats.ppl(), epoch)
# 5. Drop a checkpoint if needed.
if epoch >= opt.start_checkpoint_at:
trainer.drop_checkpoint(opt, epoch, fields, valid_stats)
|
https://github.com/OpenNMT/OpenNMT-py/issues/394
|
./run.sh translate
[...]
Loading model parameters.
While copying the parameter named encoder.embeddings.make_embedding.emb_luts.0.weight, whose dimensions in the model are torch.Size([22, 500]) and whose dimensions in the checkpoint are torch.Size([22, 100]), ...
Traceback (most recent call last):
File "translate.py", line 133, in <module>
main()
File "translate.py", line 55, in main
translator = onmt.Translator(opt, dummy_opt.__dict__)
File "/home/pltrdy/pytorchwork/OpenNMT-py/onmt/Translator.py", line 29, in __init__
model_opt, self.fields, use_gpu(opt), checkpoint)
File "/home/pltrdy/pytorchwork/OpenNMT-py/onmt/ModelConstructor.py", line 166, in make_base_model
model.load_state_dict(checkpoint['model'])
File "/home/pltrdy/anaconda3/lib/python3.6/site-packages/torch/nn/modules/module.py", line 360, in load_state_dict
own_state[name].copy_(param)
RuntimeError: inconsistent tensor size, expected tensor [22 x 500] and src [22 x 100] to have the same number of elements, but got 11000 and 2200 elements respectively at /home/pltrdy/pytorch/torch/lib/TH/generic/THTensorCopy.c:86
|
RuntimeError
|
def main():
# Load train and validate data.
print("Loading train and validate data from '%s'" % opt.data)
train = torch.load(opt.data + ".train.pt")
valid = torch.load(opt.data + ".valid.pt")
print(" * number of training sentences: %d" % len(train))
print(" * maximum batch size: %d" % opt.batch_size)
# Load checkpoint if we resume from a previous training.
if opt.train_from:
print("Loading checkpoint from %s" % opt.train_from)
checkpoint = torch.load(
opt.train_from, map_location=lambda storage, loc: storage
)
model_opt = checkpoint["opt"]
# I don't like reassigning attributes of opt: it's not clear
opt.start_epoch = checkpoint["epoch"] + 1
else:
checkpoint = None
model_opt = opt
# Load fields generated from preprocess phase.
fields = load_fields(train, valid, checkpoint)
# Collect features.
src_features = collect_features(train, fields)
for j, feat in enumerate(src_features):
print(" * src feature %d size = %d" % (j, len(fields[feat].vocab)))
# Build model.
model = build_model(model_opt, opt, fields, checkpoint)
tally_parameters(model)
check_save_model_path()
# Build optimizer.
optim = build_optim(model, checkpoint)
# Do training.
train_model(model, train, valid, fields, optim, model_opt)
|
def main():
# Load train and validate data.
print("Loading train and validate data from '%s'" % opt.data)
train = torch.load(opt.data + ".train.pt")
valid = torch.load(opt.data + ".valid.pt")
print(" * number of training sentences: %d" % len(train))
print(" * maximum batch size: %d" % opt.batch_size)
# Load checkpoint if we resume from a previous training.
if opt.train_from:
print("Loading checkpoint from %s" % opt.train_from)
checkpoint = torch.load(
opt.train_from, map_location=lambda storage, loc: storage
)
model_opt = checkpoint["opt"]
# I don't like reassigning attributes of opt: it's not clear
opt.start_epoch = checkpoint["epoch"] + 1
else:
checkpoint = None
model_opt = opt
# Load fields generated from preprocess phase.
fields = load_fields(train, valid, checkpoint)
# Collect features.
src_features = collect_features(train, fields)
for j, feat in enumerate(src_features):
print(" * src feature %d size = %d" % (j, len(fields[feat].vocab)))
# Build model.
model = build_model(model_opt, opt, fields, checkpoint)
tally_parameters(model)
check_save_model_path()
# Build optimizer.
optim = build_optim(model, checkpoint)
# Do training.
train_model(model, train, valid, fields, optim)
|
https://github.com/OpenNMT/OpenNMT-py/issues/394
|
./run.sh translate
[...]
Loading model parameters.
While copying the parameter named encoder.embeddings.make_embedding.emb_luts.0.weight, whose dimensions in the model are torch.Size([22, 500]) and whose dimensions in the checkpoint are torch.Size([22, 100]), ...
Traceback (most recent call last):
File "translate.py", line 133, in <module>
main()
File "translate.py", line 55, in main
translator = onmt.Translator(opt, dummy_opt.__dict__)
File "/home/pltrdy/pytorchwork/OpenNMT-py/onmt/Translator.py", line 29, in __init__
model_opt, self.fields, use_gpu(opt), checkpoint)
File "/home/pltrdy/pytorchwork/OpenNMT-py/onmt/ModelConstructor.py", line 166, in make_base_model
model.load_state_dict(checkpoint['model'])
File "/home/pltrdy/anaconda3/lib/python3.6/site-packages/torch/nn/modules/module.py", line 360, in load_state_dict
own_state[name].copy_(param)
RuntimeError: inconsistent tensor size, expected tensor [22 x 500] and src [22 x 100] to have the same number of elements, but got 11000 and 2200 elements respectively at /home/pltrdy/pytorch/torch/lib/TH/generic/THTensorCopy.c:86
|
RuntimeError
|
def generate(
self,
input_ids=None,
max_length=None,
min_length=None,
do_sample=None,
early_stopping=None,
num_beams=None,
temperature=None,
top_k=None,
top_p=None,
repetition_penalty=None,
bad_words_ids=None,
bos_token_id=None,
pad_token_id=None,
eos_token_id=None,
length_penalty=None,
no_repeat_ngram_size=None,
num_return_sequences=None,
attention_mask=None,
decoder_start_token_id=None,
use_cache=None,
):
r"""
Generates sequences for models with a language modeling head. The method currently supports greedy decoding,
beam-search decoding, sampling with temperature, sampling with top-k or nucleus sampling.
Adapted in part from `Facebook's XLM beam search code
<https://github.com/facebookresearch/XLM/blob/9e6f6814d17be4fe5b15f2e6c43eb2b2d76daeb4/src/model/transformer.py#L529>`__.
Apart from :obj:`input_ids` and :obj:`attention_mask`, all the arguments below will default to the value of the
attribute of the same name inside the :class:`~transformers.PretrainedConfig` of the model. The default values
indicated are the default values of those config.
Most of these parameters are explained in more detail in `this blog post
<https://huggingface.co/blog/how-to-generate>`__.
Parameters:
input_ids (:obj:`tf.Tensor` of :obj:`dtype=tf.int32` and shape :obj:`(batch_size, sequence_length)`, `optional`):
The sequence used as a prompt for the generation. If :obj:`None` the method initializes it as an empty
:obj:`tf.Tensor` of shape :obj:`(1,)`.
max_length (:obj:`int`, `optional`, defaults to 20):
The maximum length of the sequence to be generated.
min_length (:obj:`int`, `optional`, defaults to 10):
The minimum length of the sequence to be generated.
do_sample (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to use sampling ; use greedy decoding otherwise.
early_stopping (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to stop the beam search when at least ``num_beams`` sentences are finished per batch or not.
num_beams (:obj:`int`, `optional`, defaults to 1):
Number of beams for beam search. 1 means no beam search.
temperature (:obj:`float`, `optional`, defaults to 1.0):
The value used to module the next token probabilities.
top_k (:obj:`int`, `optional`, defaults to 50):
The number of highest probability vocabulary tokens to keep for top-k-filtering.
top_p (:obj:`float`, `optional`, defaults to 1.0):
If set to float < 1, only the most probable tokens with probabilities that add up to ``top_p`` or
higher are kept for generation.
repetition_penalty (:obj:`float`, `optional`, defaults to 1.0):
The parameter for repetition penalty. 1.0 means no penalty. See `this paper
<https://arxiv.org/pdf/1909.05858.pdf>`__ for more details.
pad_token_id (:obj:`int`, `optional`):
The id of the `padding` token.
bos_token_id (:obj:`int`, `optional`):
The id of the `beginning-of-sequence` token.
eos_token_id (:obj:`int`, `optional`):
The id of the `end-of-sequence` token.
length_penalty (:obj:`float`, `optional`, defaults to 1.0):
Exponential penalty to the length. 1.0 means no penalty.
Set to values < 1.0 in order to encourage the model to generate shorter sequences, to a value > 1.0 in
order to encourage the model to produce longer sequences.
no_repeat_ngram_size (:obj:`int`, `optional`, defaults to 0):
If set to int > 0, all ngrams of that size can only occur once.
bad_words_ids(:obj:`List[int]`, `optional`):
List of token ids that are not allowed to be generated. In order to get the tokens of the words that
should not appear in the generated text, use :obj:`tokenizer.encode(bad_word, add_prefix_space=True)`.
num_return_sequences(:obj:`int`, `optional`, defaults to 1):
The number of independently computed returned sequences for each element in the batch.
attention_mask (:obj:`tf.Tensor` of :obj:`dtype=tf.int32` and shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values are in ``[0, 1]``, 1 for
tokens that are not masked, and 0 for masked tokens.
If not provided, will default to a tensor the same shape as :obj:`input_ids` that masks the pad token.
`What are attention masks? <../glossary.html#attention-mask>`__
decoder_start_token_id (:obj:`int`, `optional`):
If an encoder-decoder model starts decoding with a different token than `bos`, the id of that token.
use_cache: (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not the model should use the past last key/values attentions (if applicable to the model) to
speed up decoding.
model_specific_kwargs:
Additional model specific kwargs will be forwarded to the :obj:`forward` function of the model.
Return:
:obj:`tf.Tensor` of :obj:`dtype=tf.int32` and shape :obj:`(batch_size * num_return_sequences,
sequence_length)`: The generated sequences. The second dimension (sequence_length) is either equal to
:obj:`max_length` or shorter if all batches finished early due to the :obj:`eos_token_id`.
Examples::
tokenizer = AutoTokenizer.from_pretrained('distilgpt2') # Initialize tokenizer
model = TFAutoModelWithLMHead.from_pretrained('distilgpt2') # Download model and configuration from S3 and cache.
outputs = model.generate(max_length=40) # do greedy decoding
print('Generated: {}'.format(tokenizer.decode(outputs[0], skip_special_tokens=True)))
tokenizer = AutoTokenizer.from_pretrained('openai-gpt') # Initialize tokenizer
model = TFAutoModelWithLMHead.from_pretrained('openai-gpt') # Download model and configuration from S3 and cache.
input_context = 'The dog'
input_ids = tokenizer.encode(input_context, return_tensors='tf') # encode input context
outputs = model.generate(input_ids=input_ids, num_beams=5, num_return_sequences=3, temperature=1.5) # generate 3 independent sequences using beam search decoding (5 beams) with sampling from initial context 'The dog'
for i in range(3): # 3 output sequences were generated
print('Generated {}: {}'.format(i, tokenizer.decode(outputs[i], skip_special_tokens=True)))
tokenizer = AutoTokenizer.from_pretrained('distilgpt2') # Initialize tokenizer
model = TFAutoModelWithLMHead.from_pretrained('distilgpt2') # Download model and configuration from S3 and cache.
input_context = 'The dog'
input_ids = tokenizer.encode(input_context, return_tensors='tf') # encode input context
outputs = model.generate(input_ids=input_ids, max_length=40, temperature=0.7, num_return_sequences=3, do_sample=True) # generate 3 candidates using sampling
for i in range(3): # 3 output sequences were generated
print('Generated {}: {}'.format(i, tokenizer.decode(outputs[i], skip_special_tokens=True)))
tokenizer = AutoTokenizer.from_pretrained('ctrl') # Initialize tokenizer
model = TFAutoModelWithLMHead.from_pretrained('ctrl') # Download model and configuration from S3 and cache.
input_context = 'Legal My neighbor is' # "Legal" is one of the control codes for ctrl
input_ids = tokenizer.encode(input_context, return_tensors='tf') # encode input context
outputs = model.generate(input_ids=input_ids, max_length=50, temperature=0.7, repetition_penalty=1.2) # generate sequences
print('Generated: {}'.format(tokenizer.decode(outputs[0], skip_special_tokens=True)))
tokenizer = AutoTokenizer.from_pretrained('gpt2') # Initialize tokenizer
model = TFAutoModelWithLMHead.from_pretrained('gpt2') # Download model and configuration from S3 and cache.
input_context = 'My cute dog'
bad_words_ids = [tokenizer.encode(bad_word, add_prefix_space=True) for bad_word in ['idiot', 'stupid', 'shut up']]
input_ids = tokenizer.encode(input_context, return_tensors='tf') # encode input context
outputs = model.generate(input_ids=input_ids, max_length=100, do_sample=True, bad_words_ids=bad_words_ids) # generate sequences without allowing bad_words to be generated
"""
# We cannot generate if the model does not have a LM head
if self.get_output_embeddings() is None:
raise AttributeError(
"You tried to generate sequences with a model that does not have a LM Head."
"Please use another model class (e.g. `TFOpenAIGPTLMHeadModel`, `TFXLNetLMHeadModel`, `TFGPT2LMHeadModel`, `TFCTRLLMHeadModel`, `TFT5ForConditionalGeneration`, `TFTransfoXLLMHeadModel`)"
)
max_length = max_length if max_length is not None else self.config.max_length
min_length = min_length if min_length is not None else self.config.min_length
do_sample = do_sample if do_sample is not None else self.config.do_sample
early_stopping = (
early_stopping if early_stopping is not None else self.config.early_stopping
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
num_beams = num_beams if num_beams is not None else self.config.num_beams
temperature = temperature if temperature is not None else self.config.temperature
top_k = top_k if top_k is not None else self.config.top_k
top_p = top_p if top_p is not None else self.config.top_p
repetition_penalty = (
repetition_penalty
if repetition_penalty is not None
else self.config.repetition_penalty
)
bos_token_id = (
bos_token_id if bos_token_id is not None else self.config.bos_token_id
)
pad_token_id = (
pad_token_id if pad_token_id is not None else self.config.pad_token_id
)
eos_token_id = (
eos_token_id if eos_token_id is not None else self.config.eos_token_id
)
length_penalty = (
length_penalty if length_penalty is not None else self.config.length_penalty
)
no_repeat_ngram_size = (
no_repeat_ngram_size
if no_repeat_ngram_size is not None
else self.config.no_repeat_ngram_size
)
bad_words_ids = (
bad_words_ids if bad_words_ids is not None else self.config.bad_words_ids
)
num_return_sequences = (
num_return_sequences
if num_return_sequences is not None
else self.config.num_return_sequences
)
decoder_start_token_id = (
decoder_start_token_id
if decoder_start_token_id is not None
else self.config.decoder_start_token_id
)
if input_ids is not None:
batch_size = shape_list(input_ids)[0] # overridden by the input batch_size
else:
batch_size = 1
assert isinstance(max_length, int) and max_length > 0, (
"`max_length` should be a strictly positive integer."
)
assert isinstance(min_length, int) and min_length >= 0, (
"`min_length` should be a positive integer."
)
assert isinstance(do_sample, bool), "`do_sample` should be a boolean."
assert isinstance(early_stopping, bool), "`early_stopping` should be a boolean."
assert isinstance(use_cache, bool), "`use_cache` should be a boolean."
assert isinstance(num_beams, int) and num_beams > 0, (
"`num_beams` should be a strictly positive integer."
)
assert temperature > 0, "`temperature` should be strictly positive."
assert isinstance(top_k, int) and top_k >= 0, (
"`top_k` should be a positive integer."
)
assert 0 <= top_p <= 1, "`top_p` should be between 0 and 1."
assert repetition_penalty >= 1.0, "`repetition_penalty` should be >= 1."
assert input_ids is not None or (
isinstance(bos_token_id, int) and bos_token_id >= 0
), "If input_ids is not defined, `bos_token_id` should be a positive integer."
assert pad_token_id is None or (
isinstance(pad_token_id, int) and (pad_token_id >= 0)
), "`pad_token_id` should be a positive integer."
assert (eos_token_id is None) or (
isinstance(eos_token_id, int) and (eos_token_id >= 0)
), "`eos_token_id` should be a positive integer."
assert length_penalty > 0, "`length_penalty` should be strictly positive."
assert isinstance(num_return_sequences, int) and num_return_sequences > 0, (
"`num_return_sequences` should be a strictly positive integer."
)
assert (
bad_words_ids is None
or isinstance(bad_words_ids, list)
and isinstance(bad_words_ids[0], list)
), (
"`bad_words_ids` is either `None` or a list of lists of tokens that should not be generated"
)
if input_ids is None:
assert isinstance(bos_token_id, int) and bos_token_id >= 0, (
"you should either supply a context to complete as `input_ids` input "
"or a `bos_token_id` (integer >= 0) as a first token to start the generation."
)
input_ids = tf.fill((batch_size, 1), bos_token_id)
else:
assert len(shape_list(input_ids)) == 2, (
"Input prompt should be of shape (batch_size, sequence length)."
)
# not allow to duplicate outputs when greedy decoding
if do_sample is False:
if num_beams == 1:
# no_beam_search greedy generation conditions
assert num_return_sequences == 1, (
"Greedy decoding will always produce the same output for num_beams == 1 and num_return_sequences > 1. Please set num_return_sequences = 1"
)
else:
# beam_search greedy generation conditions
assert num_beams >= num_return_sequences, (
"Greedy beam search decoding cannot return more sequences than it has beams. Please set num_beams >= num_return_sequences"
)
# create attention mask if necessary
# TODO (PVP): this should later be handled by the forward fn() in each model in the future see PR 3140
if (
(attention_mask is None)
and (pad_token_id is not None)
and (pad_token_id in input_ids.numpy())
):
attention_mask = tf.cast(
tf.math.not_equal(input_ids, pad_token_id), dtype=tf.int32
)
elif attention_mask is None:
attention_mask = tf.ones_like(input_ids)
if pad_token_id is None and eos_token_id is not None:
logger.warning(
"Setting `pad_token_id` to {} (first `eos_token_id`) to generate sequence".format(
eos_token_id
)
)
pad_token_id = eos_token_id
# current position and vocab size
cur_len = shape_list(input_ids)[1] # unused
vocab_size = self.config.vocab_size
# set effective batch size and effective batch multiplier according to do_sample
if do_sample:
effective_batch_size = batch_size * num_return_sequences
effective_batch_mult = num_return_sequences
else:
effective_batch_size = batch_size
effective_batch_mult = 1
if self.config.is_encoder_decoder:
if decoder_start_token_id is None:
decoder_start_token_id = bos_token_id
assert decoder_start_token_id is not None, (
"decoder_start_token_id or bos_token_id has to be defined for encoder-decoder generation"
)
assert hasattr(self, "get_encoder"), (
"{} should have a 'get_encoder' function defined".format(self)
)
assert callable(self.get_encoder), "{} should be a method".format(
self.get_encoder
)
# get encoder and store encoder outputs
encoder = self.get_encoder()
encoder_outputs = encoder(input_ids, attention_mask=attention_mask)
# Expand input ids if num_beams > 1 or num_return_sequences > 1
if num_return_sequences > 1 or num_beams > 1:
input_ids_len = shape_list(input_ids)[-1]
input_ids = tf.broadcast_to(
tf.expand_dims(input_ids, 1),
(batch_size, effective_batch_mult * num_beams, input_ids_len),
)
attention_mask = tf.broadcast_to(
tf.expand_dims(attention_mask, 1),
(batch_size, effective_batch_mult * num_beams, input_ids_len),
)
input_ids = tf.reshape(
input_ids, (effective_batch_size * num_beams, input_ids_len)
) # shape: (batch_size * num_return_sequences * num_beams, cur_len)
attention_mask = tf.reshape(
attention_mask, (effective_batch_size * num_beams, input_ids_len)
) # shape: (batch_size * num_return_sequences * num_beams, cur_len)
if self.config.is_encoder_decoder:
# create empty decoder_input_ids
input_ids = (
tf.ones(
(effective_batch_size * num_beams, 1),
dtype=tf.int32,
)
* decoder_start_token_id
)
cur_len = 1
assert batch_size == encoder_outputs[0].shape[0], (
f"expected encoder_outputs[0] to have 1st dimension bs={batch_size}, got {encoder_outputs[0].shape[0]} "
)
# expand batch_idx to assign correct encoder output for expanded input_ids (due to num_beams > 1 and num_return_sequences > 1)
expanded_batch_idxs = tf.reshape(
tf.repeat(
tf.expand_dims(tf.range(batch_size), -1),
repeats=num_beams * effective_batch_mult,
axis=1,
),
shape=(-1,),
)
# expand encoder_outputs
encoder_outputs = (tf.gather(encoder_outputs[0], expanded_batch_idxs, axis=0),)
else:
encoder_outputs = None
cur_len = shape_list(input_ids)[-1]
assert cur_len < max_length, (
f"The context has {cur_len} number of tokens, but `max_length` is only {max_length}. Please make sure that `max_length` is bigger than the number of tokens, by setting either `generate(max_length=...,...)` or `config.max_length = ...`"
)
if num_beams > 1:
output = self._generate_beam_search(
input_ids,
cur_len=cur_len,
max_length=max_length,
min_length=min_length,
do_sample=do_sample,
early_stopping=early_stopping,
temperature=temperature,
top_k=top_k,
top_p=top_p,
repetition_penalty=repetition_penalty,
no_repeat_ngram_size=no_repeat_ngram_size,
bad_words_ids=bad_words_ids,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
batch_size=effective_batch_size,
num_return_sequences=num_return_sequences,
length_penalty=length_penalty,
num_beams=num_beams,
vocab_size=vocab_size,
encoder_outputs=encoder_outputs,
attention_mask=attention_mask,
use_cache=use_cache,
)
else:
output = self._generate_no_beam_search(
input_ids,
cur_len=cur_len,
max_length=max_length,
min_length=min_length,
do_sample=do_sample,
temperature=temperature,
top_k=top_k,
top_p=top_p,
repetition_penalty=repetition_penalty,
no_repeat_ngram_size=no_repeat_ngram_size,
bad_words_ids=bad_words_ids,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
batch_size=effective_batch_size,
vocab_size=vocab_size,
encoder_outputs=encoder_outputs,
attention_mask=attention_mask,
use_cache=use_cache,
)
return output
|
def generate(
self,
input_ids=None,
max_length=None,
min_length=None,
do_sample=None,
early_stopping=None,
num_beams=None,
temperature=None,
top_k=None,
top_p=None,
repetition_penalty=None,
bad_words_ids=None,
bos_token_id=None,
pad_token_id=None,
eos_token_id=None,
length_penalty=None,
no_repeat_ngram_size=None,
num_return_sequences=None,
attention_mask=None,
decoder_start_token_id=None,
use_cache=None,
):
r"""
Generates sequences for models with a language modeling head. The method currently supports greedy decoding,
beam-search decoding, sampling with temperature, sampling with top-k or nucleus sampling.
Adapted in part from `Facebook's XLM beam search code
<https://github.com/facebookresearch/XLM/blob/9e6f6814d17be4fe5b15f2e6c43eb2b2d76daeb4/src/model/transformer.py#L529>`__.
Apart from :obj:`input_ids` and :obj:`attention_mask`, all the arguments below will default to the value of the
attribute of the same name inside the :class:`~transformers.PretrainedConfig` of the model. The default values
indicated are the default values of those config.
Most of these parameters are explained in more detail in `this blog post
<https://huggingface.co/blog/how-to-generate>`__.
Parameters:
input_ids (:obj:`tf.Tensor` of :obj:`dtype=tf.int32` and shape :obj:`(batch_size, sequence_length)`, `optional`):
The sequence used as a prompt for the generation. If :obj:`None` the method initializes it as an empty
:obj:`tf.Tensor` of shape :obj:`(1,)`.
max_length (:obj:`int`, `optional`, defaults to 20):
The maximum length of the sequence to be generated.
min_length (:obj:`int`, `optional`, defaults to 10):
The minimum length of the sequence to be generated.
do_sample (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to use sampling ; use greedy decoding otherwise.
early_stopping (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to stop the beam search when at least ``num_beams`` sentences are finished per batch or not.
num_beams (:obj:`int`, `optional`, defaults to 1):
Number of beams for beam search. 1 means no beam search.
temperature (:obj:`float`, `optional`, defaults to 1.0):
The value used to module the next token probabilities.
top_k (:obj:`int`, `optional`, defaults to 50):
The number of highest probability vocabulary tokens to keep for top-k-filtering.
top_p (:obj:`float`, `optional`, defaults to 1.0):
If set to float < 1, only the most probable tokens with probabilities that add up to ``top_p`` or
higher are kept for generation.
repetition_penalty (:obj:`float`, `optional`, defaults to 1.0):
The parameter for repetition penalty. 1.0 means no penalty. See `this paper
<https://arxiv.org/pdf/1909.05858.pdf>`__ for more details.
pad_token_id (:obj:`int`, `optional`):
The id of the `padding` token.
bos_token_id (:obj:`int`, `optional`):
The id of the `beginning-of-sequence` token.
eos_token_id (:obj:`int`, `optional`):
The id of the `end-of-sequence` token.
length_penalty (:obj:`float`, `optional`, defaults to 1.0):
Exponential penalty to the length. 1.0 means no penalty.
Set to values < 1.0 in order to encourage the model to generate shorter sequences, to a value > 1.0 in
order to encourage the model to produce longer sequences.
no_repeat_ngram_size (:obj:`int`, `optional`, defaults to 0):
If set to int > 0, all ngrams of that size can only occur once.
bad_words_ids(:obj:`List[int]`, `optional`):
List of token ids that are not allowed to be generated. In order to get the tokens of the words that
should not appear in the generated text, use :obj:`tokenizer.encode(bad_word, add_prefix_space=True)`.
num_return_sequences(:obj:`int`, `optional`, defaults to 1):
The number of independently computed returned sequences for each element in the batch.
attention_mask (:obj:`tf.Tensor` of :obj:`dtype=tf.int32` and shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values are in ``[0, 1]``, 1 for
tokens that are not masked, and 0 for masked tokens.
If not provided, will default to a tensor the same shape as :obj:`input_ids` that masks the pad token.
`What are attention masks? <../glossary.html#attention-mask>`__
decoder_start_token_id (:obj:`int`, `optional`):
If an encoder-decoder model starts decoding with a different token than `bos`, the id of that token.
use_cache: (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not the model should use the past last key/values attentions (if applicable to the model) to
speed up decoding.
model_specific_kwargs:
Additional model specific kwargs will be forwarded to the :obj:`forward` function of the model.
Return:
:obj:`tf.Tensor` of :obj:`dtype=tf.int32` and shape :obj:`(batch_size * num_return_sequences,
sequence_length)`: The generated sequences. The second dimension (sequence_length) is either equal to
:obj:`max_length` or shorter if all batches finished early due to the :obj:`eos_token_id`.
Examples::
tokenizer = AutoTokenizer.from_pretrained('distilgpt2') # Initialize tokenizer
model = TFAutoModelWithLMHead.from_pretrained('distilgpt2') # Download model and configuration from S3 and cache.
outputs = model.generate(max_length=40) # do greedy decoding
print('Generated: {}'.format(tokenizer.decode(outputs[0], skip_special_tokens=True)))
tokenizer = AutoTokenizer.from_pretrained('openai-gpt') # Initialize tokenizer
model = TFAutoModelWithLMHead.from_pretrained('openai-gpt') # Download model and configuration from S3 and cache.
input_context = 'The dog'
input_ids = tokenizer.encode(input_context, return_tensors='tf') # encode input context
outputs = model.generate(input_ids=input_ids, num_beams=5, num_return_sequences=3, temperature=1.5) # generate 3 independent sequences using beam search decoding (5 beams) with sampling from initial context 'The dog'
for i in range(3): # 3 output sequences were generated
print('Generated {}: {}'.format(i, tokenizer.decode(outputs[i], skip_special_tokens=True)))
tokenizer = AutoTokenizer.from_pretrained('distilgpt2') # Initialize tokenizer
model = TFAutoModelWithLMHead.from_pretrained('distilgpt2') # Download model and configuration from S3 and cache.
input_context = 'The dog'
input_ids = tokenizer.encode(input_context, return_tensors='tf') # encode input context
outputs = model.generate(input_ids=input_ids, max_length=40, temperature=0.7, num_return_sequences=3, do_sample=True) # generate 3 candidates using sampling
for i in range(3): # 3 output sequences were generated
print('Generated {}: {}'.format(i, tokenizer.decode(outputs[i], skip_special_tokens=True)))
tokenizer = AutoTokenizer.from_pretrained('ctrl') # Initialize tokenizer
model = TFAutoModelWithLMHead.from_pretrained('ctrl') # Download model and configuration from S3 and cache.
input_context = 'Legal My neighbor is' # "Legal" is one of the control codes for ctrl
input_ids = tokenizer.encode(input_context, return_tensors='tf') # encode input context
outputs = model.generate(input_ids=input_ids, max_length=50, temperature=0.7, repetition_penalty=1.2) # generate sequences
print('Generated: {}'.format(tokenizer.decode(outputs[0], skip_special_tokens=True)))
tokenizer = AutoTokenizer.from_pretrained('gpt2') # Initialize tokenizer
model = TFAutoModelWithLMHead.from_pretrained('gpt2') # Download model and configuration from S3 and cache.
input_context = 'My cute dog'
bad_words_ids = [tokenizer.encode(bad_word, add_prefix_space=True) for bad_word in ['idiot', 'stupid', 'shut up']]
input_ids = tokenizer.encode(input_context, return_tensors='tf') # encode input context
outputs = model.generate(input_ids=input_ids, max_length=100, do_sample=True, bad_words_ids=bad_words_ids) # generate sequences without allowing bad_words to be generated
"""
# We cannot generate if the model does not have a LM head
if self.get_output_embeddings() is None:
raise AttributeError(
"You tried to generate sequences with a model that does not have a LM Head."
"Please use another model class (e.g. `TFOpenAIGPTLMHeadModel`, `TFXLNetLMHeadModel`, `TFGPT2LMHeadModel`, `TFCTRLLMHeadModel`, `TFT5ForConditionalGeneration`, `TFTransfoXLLMHeadModel`)"
)
max_length = max_length if max_length is not None else self.config.max_length
min_length = min_length if min_length is not None else self.config.min_length
do_sample = do_sample if do_sample is not None else self.config.do_sample
early_stopping = (
early_stopping if early_stopping is not None else self.config.early_stopping
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
num_beams = num_beams if num_beams is not None else self.config.num_beams
temperature = temperature if temperature is not None else self.config.temperature
top_k = top_k if top_k is not None else self.config.top_k
top_p = top_p if top_p is not None else self.config.top_p
repetition_penalty = (
repetition_penalty
if repetition_penalty is not None
else self.config.repetition_penalty
)
bos_token_id = (
bos_token_id if bos_token_id is not None else self.config.bos_token_id
)
pad_token_id = (
pad_token_id if pad_token_id is not None else self.config.pad_token_id
)
eos_token_id = (
eos_token_id if eos_token_id is not None else self.config.eos_token_id
)
length_penalty = (
length_penalty if length_penalty is not None else self.config.length_penalty
)
no_repeat_ngram_size = (
no_repeat_ngram_size
if no_repeat_ngram_size is not None
else self.config.no_repeat_ngram_size
)
bad_words_ids = (
bad_words_ids if bad_words_ids is not None else self.config.bad_words_ids
)
num_return_sequences = (
num_return_sequences
if num_return_sequences is not None
else self.config.num_return_sequences
)
decoder_start_token_id = (
decoder_start_token_id
if decoder_start_token_id is not None
else self.config.decoder_start_token_id
)
if input_ids is not None:
batch_size = shape_list(input_ids)[0] # overridden by the input batch_size
else:
batch_size = 1
assert isinstance(max_length, int) and max_length > 0, (
"`max_length` should be a strictly positive integer."
)
assert isinstance(min_length, int) and min_length >= 0, (
"`min_length` should be a positive integer."
)
assert isinstance(do_sample, bool), "`do_sample` should be a boolean."
assert isinstance(early_stopping, bool), "`early_stopping` should be a boolean."
assert isinstance(use_cache, bool), "`use_cache` should be a boolean."
assert isinstance(num_beams, int) and num_beams > 0, (
"`num_beams` should be a strictly positive integer."
)
assert temperature > 0, "`temperature` should be strictly positive."
assert isinstance(top_k, int) and top_k >= 0, (
"`top_k` should be a positive integer."
)
assert 0 <= top_p <= 1, "`top_p` should be between 0 and 1."
assert repetition_penalty >= 1.0, "`repetition_penalty` should be >= 1."
assert input_ids is not None or (
isinstance(bos_token_id, int) and bos_token_id >= 0
), "If input_ids is not defined, `bos_token_id` should be a positive integer."
assert pad_token_id is None or (
isinstance(pad_token_id, int) and (pad_token_id >= 0)
), "`pad_token_id` should be a positive integer."
assert (eos_token_id is None) or (
isinstance(eos_token_id, int) and (eos_token_id >= 0)
), "`eos_token_id` should be a positive integer."
assert length_penalty > 0, "`length_penalty` should be strictly positive."
assert isinstance(num_return_sequences, int) and num_return_sequences > 0, (
"`num_return_sequences` should be a strictly positive integer."
)
assert (
bad_words_ids is None
or isinstance(bad_words_ids, list)
and isinstance(bad_words_ids[0], list)
), (
"`bad_words_ids` is either `None` or a list of lists of tokens that should not be generated"
)
if input_ids is None:
assert isinstance(bos_token_id, int) and bos_token_id >= 0, (
"you should either supply a context to complete as `input_ids` input "
"or a `bos_token_id` (integer >= 0) as a first token to start the generation."
)
input_ids = tf.fill((batch_size, 1), bos_token_id)
else:
assert len(shape_list(input_ids)) == 2, (
"Input prompt should be of shape (batch_size, sequence length)."
)
# not allow to duplicate outputs when greedy decoding
if do_sample is False:
if num_beams == 1:
# no_beam_search greedy generation conditions
assert num_return_sequences == 1, (
"Greedy decoding will always produce the same output for num_beams == 1 and num_return_sequences > 1. Please set num_return_sequences = 1"
)
else:
# beam_search greedy generation conditions
assert num_beams >= num_return_sequences, (
"Greedy beam search decoding cannot return more sequences than it has beams. Please set num_beams >= num_return_sequences"
)
# create attention mask if necessary
# TODO (PVP): this should later be handled by the forward fn() in each model in the future see PR 3140
if (
(attention_mask is None)
and (pad_token_id is not None)
and (pad_token_id in input_ids.numpy())
):
attention_mask = tf.cast(
tf.math.not_equal(input_ids, pad_token_id), dtype=tf.int32
)
elif attention_mask is None:
attention_mask = tf.ones_like(input_ids)
if pad_token_id is None and eos_token_id is not None:
logger.warning(
"Setting `pad_token_id` to {} (first `eos_token_id`) to generate sequence".format(
eos_token_id
)
)
pad_token_id = eos_token_id
# current position and vocab size
cur_len = shape_list(input_ids)[1] # unused
vocab_size = self.config.vocab_size
# set effective batch size and effective batch multiplier according to do_sample
if do_sample:
effective_batch_size = batch_size * num_return_sequences
effective_batch_mult = num_return_sequences
else:
effective_batch_size = batch_size
effective_batch_mult = 1
if self.config.is_encoder_decoder:
if decoder_start_token_id is None:
decoder_start_token_id = bos_token_id
assert decoder_start_token_id is not None, (
"decoder_start_token_id or bos_token_id has to be defined for encoder-decoder generation"
)
assert hasattr(self, "get_encoder"), (
"{} should have a 'get_encoder' function defined".format(self)
)
assert callable(self.get_encoder), "{} should be a method".format(
self.get_encoder
)
# get encoder and store encoder outputs
encoder = self.get_encoder()
encoder_outputs = encoder(input_ids, attention_mask=attention_mask)
# Expand input ids if num_beams > 1 or num_return_sequences > 1
if num_return_sequences > 1 or num_beams > 1:
input_ids_len = shape_list(input_ids)[-1]
input_ids = tf.broadcast_to(
tf.expand_dims(input_ids, 1),
(batch_size, effective_batch_mult * num_beams, input_ids_len),
)
attention_mask = tf.broadcast_to(
tf.expand_dims(attention_mask, 1),
(batch_size, effective_batch_mult * num_beams, input_ids_len),
)
input_ids = tf.reshape(
input_ids, (effective_batch_size * num_beams, input_ids_len)
) # shape: (batch_size * num_return_sequences * num_beams, cur_len)
attention_mask = tf.reshape(
attention_mask, (effective_batch_size * num_beams, input_ids_len)
) # shape: (batch_size * num_return_sequences * num_beams, cur_len)
if self.config.is_encoder_decoder:
# create empty decoder_input_ids
input_ids = (
tf.ones(
(effective_batch_size * num_beams, 1),
dtype=tf.int32,
)
* decoder_start_token_id
)
cur_len = 1
assert batch_size == encoder_outputs[0].shape[0], (
f"expected encoder_outputs[0] to have 1st dimension bs={batch_size}, got {encoder_outputs[0].shape[0]} "
)
# expand batch_idx to assign correct encoder output for expanded input_ids (due to num_beams > 1 and num_return_sequences > 1)
expanded_batch_idxs = tf.reshape(
tf.repeat(
tf.expand_dims(tf.range(batch_size), -1),
repeats=num_beams * effective_batch_mult,
axis=1,
),
shape=(-1,),
)
# expand encoder_outputs
encoder_outputs = (
tf.gather(encoder_outputs[0], expanded_batch_idxs, axis=0),
*encoder_outputs[1:],
)
else:
encoder_outputs = None
cur_len = shape_list(input_ids)[-1]
assert cur_len < max_length, (
f"The context has {cur_len} number of tokens, but `max_length` is only {max_length}. Please make sure that `max_length` is bigger than the number of tokens, by setting either `generate(max_length=...,...)` or `config.max_length = ...`"
)
if num_beams > 1:
output = self._generate_beam_search(
input_ids,
cur_len=cur_len,
max_length=max_length,
min_length=min_length,
do_sample=do_sample,
early_stopping=early_stopping,
temperature=temperature,
top_k=top_k,
top_p=top_p,
repetition_penalty=repetition_penalty,
no_repeat_ngram_size=no_repeat_ngram_size,
bad_words_ids=bad_words_ids,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
batch_size=effective_batch_size,
num_return_sequences=num_return_sequences,
length_penalty=length_penalty,
num_beams=num_beams,
vocab_size=vocab_size,
encoder_outputs=encoder_outputs,
attention_mask=attention_mask,
use_cache=use_cache,
)
else:
output = self._generate_no_beam_search(
input_ids,
cur_len=cur_len,
max_length=max_length,
min_length=min_length,
do_sample=do_sample,
temperature=temperature,
top_k=top_k,
top_p=top_p,
repetition_penalty=repetition_penalty,
no_repeat_ngram_size=no_repeat_ngram_size,
bad_words_ids=bad_words_ids,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
batch_size=effective_batch_size,
vocab_size=vocab_size,
encoder_outputs=encoder_outputs,
attention_mask=attention_mask,
use_cache=use_cache,
)
return output
|
https://github.com/huggingface/transformers/issues/8361
|
import transformers
model = transformers.TFT5ForConditionalGeneration.from_pretrained('t5-small', output_hidden_states=True, output_attentions=True)
tokenizer = transformers.T5Tokenizer.from_pretrained('t5-small')
input_ids = tokenizer.batch_encode_plus(['test 1', 'test 2', 'test 3'], return_tensors="tf", padding='longest')
output_ids = model.generate(input_ids['input_ids'], attention_mask=input_ids['attention_mask'])
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/transformers/generation_tf_utils.py", line 405, in generate
use_cache=use_cache,
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/transformers/generation_tf_utils.py", line 445, in _generate_no_beam_search
outputs = self(**model_inputs)
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/keras/engine/base_layer.py", line 985, in __call__
outputs = call_fn(inputs, *args, **kwargs)
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/transformers/modeling_tf_t5.py", line 1352, in call
training=training,
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/keras/engine/base_layer.py", line 985, in __call__
outputs = call_fn(inputs, *args, **kwargs)
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/transformers/modeling_tf_t5.py", line 759, in call
training=training,
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/keras/engine/base_layer.py", line 985, in __call__
outputs = call_fn(inputs, *args, **kwargs)
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/transformers/modeling_tf_t5.py", line 450, in call
assert len(past_key_value) == expected_num_past_key_values, error_message
AssertionError: There should be 4 past states. 2 (past / key) for self attention.2 (past / key) for cross attention Got 3 past key / value states
|
AssertionError
|
def _tf_glue_convert_examples_to_features(
examples: tf.data.Dataset,
tokenizer: PreTrainedTokenizer,
task=str,
max_length: Optional[int] = None,
) -> tf.data.Dataset:
"""
Returns:
A ``tf.data.Dataset`` containing the task-specific features.
"""
processor = glue_processors[task]()
examples = [
processor.tfds_map(processor.get_example_from_tensor_dict(example))
for example in examples
]
features = glue_convert_examples_to_features(
examples, tokenizer, max_length=max_length, task=task
)
def gen():
for ex in features:
d = {k: v for k, v in asdict(ex).items() if v is not None}
label = d.pop("label")
yield (d, label)
input_names = ["input_ids"] + tokenizer.model_input_names
return tf.data.Dataset.from_generator(
gen,
({k: tf.int32 for k in input_names}, tf.int64),
({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])),
)
|
def _tf_glue_convert_examples_to_features(
examples: tf.data.Dataset,
tokenizer: PreTrainedTokenizer,
task=str,
max_length: Optional[int] = None,
) -> tf.data.Dataset:
"""
Returns:
A ``tf.data.Dataset`` containing the task-specific features.
"""
processor = glue_processors[task]()
examples = [
processor.tfds_map(processor.get_example_from_tensor_dict(example))
for example in examples
]
features = glue_convert_examples_to_features(
examples, tokenizer, max_length=max_length, task=task
)
def gen():
for ex in features:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
return tf.data.Dataset.from_generator(
gen,
(
{
"input_ids": tf.int32,
"attention_mask": tf.int32,
"token_type_ids": tf.int32,
},
tf.int64,
),
(
{
"input_ids": tf.TensorShape([None]),
"attention_mask": tf.TensorShape([None]),
"token_type_ids": tf.TensorShape([None]),
},
tf.TensorShape([]),
),
)
|
https://github.com/huggingface/transformers/issues/4856
|
Traceback (most recent call last):
File "run_glue.py", line 229, in <module>
main()
File "run_glue.py", line 199, in main
compute_metrics=compute_metrics,
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/transformers/trainer_tf.py", line 48, in __init__
self._setup_training()
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/transformers/trainer_tf.py", line 58, in _setup_training
self._prepare_dataset()
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/transformers/trainer_tf.py", line 95, in _prepare_dataset
self.num_train_examples = self.train_dataset.reduce(tf.constant(0), lambda x, _: x + 1).numpy()
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorflow_core/python/data/ops/dataset_ops.py", line 1934, in reduce
output_types=structure.get_flat_tensor_types(state_structure)))
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorflow_core/python/ops/gen_dataset_ops.py", line 4661, in reduce_dataset
_ops.raise_from_not_ok_status(e, name)
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorflow_core/python/framework/ops.py", line 6606, in raise_from_not_ok_status
six.raise_from(core._status_to_exception(e.code, message), None)
File "<string>", line 3, in raise_from
tensorflow.python.framework.errors_impl.InvalidArgumentError: TypeError: `generator` yielded an element that could not be converted to the expected type. The expected type was int32, but the yielded element was None.
Traceback (most recent call last):
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorflow_core/python/data/ops/dataset_ops.py", line 805, in generator_py_func
ret, dtype=dtype.as_numpy_dtype))
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorflow_core/python/ops/script_ops.py", line 196, in _convert
result = np.asarray(value, dtype=dtype, order="C")
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/numpy/core/_asarray.py", line 85, in asarray
return array(a, dtype, copy=False, order=order)
TypeError: int() argument must be a string, a bytes-like object or a number, not 'NoneType'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorflow_core/python/ops/script_ops.py", line 236, in __call__
ret = func(*args)
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorflow_core/python/data/ops/dataset_ops.py", line 810, in generator_py_func
"element was %s." % (dtype.name, ret)), sys.exc_info()[2])
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/six.py", line 702, in reraise
raise value.with_traceback(tb)
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorflow_core/python/data/ops/dataset_ops.py", line 805, in generator_py_func
ret, dtype=dtype.as_numpy_dtype))
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorflow_core/python/ops/script_ops.py", line 196, in _convert
result = np.asarray(value, dtype=dtype, order="C")
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/numpy/core/_asarray.py", line 85, in asarray
return array(a, dtype, copy=False, order=order)
TypeError: `generator` yielded an element that could not be converted to the expected type. The expected type was int32, but the yielded element was None.
[[{{node PyFunc}}]] [Op:ReduceDataset]
|
tensorflow.python.framework.errors_impl.InvalidArgumentError
|
def gen():
for ex in features:
d = {k: v for k, v in asdict(ex).items() if v is not None}
label = d.pop("label")
yield (d, label)
|
def gen():
for ex in features:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
|
https://github.com/huggingface/transformers/issues/4856
|
Traceback (most recent call last):
File "run_glue.py", line 229, in <module>
main()
File "run_glue.py", line 199, in main
compute_metrics=compute_metrics,
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/transformers/trainer_tf.py", line 48, in __init__
self._setup_training()
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/transformers/trainer_tf.py", line 58, in _setup_training
self._prepare_dataset()
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/transformers/trainer_tf.py", line 95, in _prepare_dataset
self.num_train_examples = self.train_dataset.reduce(tf.constant(0), lambda x, _: x + 1).numpy()
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorflow_core/python/data/ops/dataset_ops.py", line 1934, in reduce
output_types=structure.get_flat_tensor_types(state_structure)))
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorflow_core/python/ops/gen_dataset_ops.py", line 4661, in reduce_dataset
_ops.raise_from_not_ok_status(e, name)
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorflow_core/python/framework/ops.py", line 6606, in raise_from_not_ok_status
six.raise_from(core._status_to_exception(e.code, message), None)
File "<string>", line 3, in raise_from
tensorflow.python.framework.errors_impl.InvalidArgumentError: TypeError: `generator` yielded an element that could not be converted to the expected type. The expected type was int32, but the yielded element was None.
Traceback (most recent call last):
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorflow_core/python/data/ops/dataset_ops.py", line 805, in generator_py_func
ret, dtype=dtype.as_numpy_dtype))
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorflow_core/python/ops/script_ops.py", line 196, in _convert
result = np.asarray(value, dtype=dtype, order="C")
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/numpy/core/_asarray.py", line 85, in asarray
return array(a, dtype, copy=False, order=order)
TypeError: int() argument must be a string, a bytes-like object or a number, not 'NoneType'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorflow_core/python/ops/script_ops.py", line 236, in __call__
ret = func(*args)
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorflow_core/python/data/ops/dataset_ops.py", line 810, in generator_py_func
"element was %s." % (dtype.name, ret)), sys.exc_info()[2])
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/six.py", line 702, in reraise
raise value.with_traceback(tb)
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorflow_core/python/data/ops/dataset_ops.py", line 805, in generator_py_func
ret, dtype=dtype.as_numpy_dtype))
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorflow_core/python/ops/script_ops.py", line 196, in _convert
result = np.asarray(value, dtype=dtype, order="C")
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/numpy/core/_asarray.py", line 85, in asarray
return array(a, dtype, copy=False, order=order)
TypeError: `generator` yielded an element that could not be converted to the expected type. The expected type was int32, but the yielded element was None.
[[{{node PyFunc}}]] [Op:ReduceDataset]
|
tensorflow.python.framework.errors_impl.InvalidArgumentError
|
def squad_convert_examples_to_features(
examples,
tokenizer,
max_seq_length,
doc_stride,
max_query_length,
is_training,
return_dataset=False,
threads=1,
tqdm_enabled=True,
):
"""
Converts a list of examples into a list of features that can be directly given as input to a model.
It is model-dependant and takes advantage of many of the tokenizer's features to create the model's inputs.
Args:
examples: list of :class:`~transformers.data.processors.squad.SquadExample`
tokenizer: an instance of a child of :class:`~transformers.PreTrainedTokenizer`
max_seq_length: The maximum sequence length of the inputs.
doc_stride: The stride used when the context is too large and is split across several features.
max_query_length: The maximum length of the query.
is_training: whether to create features for model evaluation or model training.
return_dataset: Default False. Either 'pt' or 'tf'.
if 'pt': returns a torch.data.TensorDataset,
if 'tf': returns a tf.data.Dataset
threads: multiple processing threadsa-smi
Returns:
list of :class:`~transformers.data.processors.squad.SquadFeatures`
Example::
processor = SquadV2Processor()
examples = processor.get_dev_examples(data_dir)
features = squad_convert_examples_to_features(
examples=examples,
tokenizer=tokenizer,
max_seq_length=args.max_seq_length,
doc_stride=args.doc_stride,
max_query_length=args.max_query_length,
is_training=not evaluate,
)
"""
# Defining helper methods
features = []
threads = min(threads, cpu_count())
with Pool(
threads,
initializer=squad_convert_example_to_features_init,
initargs=(tokenizer,),
) as p:
annotate_ = partial(
squad_convert_example_to_features,
max_seq_length=max_seq_length,
doc_stride=doc_stride,
max_query_length=max_query_length,
is_training=is_training,
)
features = list(
tqdm(
p.imap(annotate_, examples, chunksize=32),
total=len(examples),
desc="convert squad examples to features",
disable=not tqdm_enabled,
)
)
new_features = []
unique_id = 1000000000
example_index = 0
for example_features in tqdm(
features,
total=len(features),
desc="add example index and unique id",
disable=not tqdm_enabled,
):
if not example_features:
continue
for example_feature in example_features:
example_feature.example_index = example_index
example_feature.unique_id = unique_id
new_features.append(example_feature)
unique_id += 1
example_index += 1
features = new_features
del new_features
if return_dataset == "pt":
if not is_torch_available():
raise RuntimeError("PyTorch must be installed to return a PyTorch dataset.")
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_masks = torch.tensor(
[f.attention_mask for f in features], dtype=torch.long
)
all_token_type_ids = torch.tensor(
[f.token_type_ids for f in features], dtype=torch.long
)
all_cls_index = torch.tensor([f.cls_index for f in features], dtype=torch.long)
all_p_mask = torch.tensor([f.p_mask for f in features], dtype=torch.float)
all_is_impossible = torch.tensor(
[f.is_impossible for f in features], dtype=torch.float
)
if not is_training:
all_feature_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
dataset = TensorDataset(
all_input_ids,
all_attention_masks,
all_token_type_ids,
all_feature_index,
all_cls_index,
all_p_mask,
)
else:
all_start_positions = torch.tensor(
[f.start_position for f in features], dtype=torch.long
)
all_end_positions = torch.tensor(
[f.end_position for f in features], dtype=torch.long
)
dataset = TensorDataset(
all_input_ids,
all_attention_masks,
all_token_type_ids,
all_start_positions,
all_end_positions,
all_cls_index,
all_p_mask,
all_is_impossible,
)
return features, dataset
elif return_dataset == "tf":
if not is_tf_available():
raise RuntimeError(
"TensorFlow must be installed to return a TensorFlow dataset."
)
def gen():
for i, ex in enumerate(features):
if ex.token_type_ids is None:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"feature_index": i,
"qas_id": ex.qas_id,
},
{
"start_positions": ex.start_position,
"end_positions": ex.end_position,
"cls_index": ex.cls_index,
"p_mask": ex.p_mask,
"is_impossible": ex.is_impossible,
},
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
"feature_index": i,
"qas_id": ex.qas_id,
},
{
"start_positions": ex.start_position,
"end_positions": ex.end_position,
"cls_index": ex.cls_index,
"p_mask": ex.p_mask,
"is_impossible": ex.is_impossible,
},
)
# Why have we split the batch into a tuple? PyTorch just has a list of tensors.
if "token_type_ids" in tokenizer.model_input_names:
train_types = (
{
"input_ids": tf.int32,
"attention_mask": tf.int32,
"token_type_ids": tf.int32,
"feature_index": tf.int64,
"qas_id": tf.string,
},
{
"start_positions": tf.int64,
"end_positions": tf.int64,
"cls_index": tf.int64,
"p_mask": tf.int32,
"is_impossible": tf.int32,
},
)
train_shapes = (
{
"input_ids": tf.TensorShape([None]),
"attention_mask": tf.TensorShape([None]),
"token_type_ids": tf.TensorShape([None]),
"feature_index": tf.TensorShape([]),
"qas_id": tf.TensorShape([]),
},
{
"start_positions": tf.TensorShape([]),
"end_positions": tf.TensorShape([]),
"cls_index": tf.TensorShape([]),
"p_mask": tf.TensorShape([None]),
"is_impossible": tf.TensorShape([]),
},
)
else:
train_types = (
{
"input_ids": tf.int32,
"attention_mask": tf.int32,
"feature_index": tf.int64,
"qas_id": tf.string,
},
{
"start_positions": tf.int64,
"end_positions": tf.int64,
"cls_index": tf.int64,
"p_mask": tf.int32,
"is_impossible": tf.int32,
},
)
train_shapes = (
{
"input_ids": tf.TensorShape([None]),
"attention_mask": tf.TensorShape([None]),
"feature_index": tf.TensorShape([]),
"qas_id": tf.TensorShape([]),
},
{
"start_positions": tf.TensorShape([]),
"end_positions": tf.TensorShape([]),
"cls_index": tf.TensorShape([]),
"p_mask": tf.TensorShape([None]),
"is_impossible": tf.TensorShape([]),
},
)
return tf.data.Dataset.from_generator(gen, train_types, train_shapes)
else:
return features
|
def squad_convert_examples_to_features(
examples,
tokenizer,
max_seq_length,
doc_stride,
max_query_length,
is_training,
return_dataset=False,
threads=1,
tqdm_enabled=True,
):
"""
Converts a list of examples into a list of features that can be directly given as input to a model.
It is model-dependant and takes advantage of many of the tokenizer's features to create the model's inputs.
Args:
examples: list of :class:`~transformers.data.processors.squad.SquadExample`
tokenizer: an instance of a child of :class:`~transformers.PreTrainedTokenizer`
max_seq_length: The maximum sequence length of the inputs.
doc_stride: The stride used when the context is too large and is split across several features.
max_query_length: The maximum length of the query.
is_training: whether to create features for model evaluation or model training.
return_dataset: Default False. Either 'pt' or 'tf'.
if 'pt': returns a torch.data.TensorDataset,
if 'tf': returns a tf.data.Dataset
threads: multiple processing threadsa-smi
Returns:
list of :class:`~transformers.data.processors.squad.SquadFeatures`
Example::
processor = SquadV2Processor()
examples = processor.get_dev_examples(data_dir)
features = squad_convert_examples_to_features(
examples=examples,
tokenizer=tokenizer,
max_seq_length=args.max_seq_length,
doc_stride=args.doc_stride,
max_query_length=args.max_query_length,
is_training=not evaluate,
)
"""
# Defining helper methods
features = []
threads = min(threads, cpu_count())
with Pool(
threads,
initializer=squad_convert_example_to_features_init,
initargs=(tokenizer,),
) as p:
annotate_ = partial(
squad_convert_example_to_features,
max_seq_length=max_seq_length,
doc_stride=doc_stride,
max_query_length=max_query_length,
is_training=is_training,
)
features = list(
tqdm(
p.imap(annotate_, examples, chunksize=32),
total=len(examples),
desc="convert squad examples to features",
disable=not tqdm_enabled,
)
)
new_features = []
unique_id = 1000000000
example_index = 0
for example_features in tqdm(
features,
total=len(features),
desc="add example index and unique id",
disable=not tqdm_enabled,
):
if not example_features:
continue
for example_feature in example_features:
example_feature.example_index = example_index
example_feature.unique_id = unique_id
new_features.append(example_feature)
unique_id += 1
example_index += 1
features = new_features
del new_features
if return_dataset == "pt":
if not is_torch_available():
raise RuntimeError("PyTorch must be installed to return a PyTorch dataset.")
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_masks = torch.tensor(
[f.attention_mask for f in features], dtype=torch.long
)
all_token_type_ids = torch.tensor(
[f.token_type_ids for f in features], dtype=torch.long
)
all_cls_index = torch.tensor([f.cls_index for f in features], dtype=torch.long)
all_p_mask = torch.tensor([f.p_mask for f in features], dtype=torch.float)
all_is_impossible = torch.tensor(
[f.is_impossible for f in features], dtype=torch.float
)
if not is_training:
all_feature_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
dataset = TensorDataset(
all_input_ids,
all_attention_masks,
all_token_type_ids,
all_feature_index,
all_cls_index,
all_p_mask,
)
else:
all_start_positions = torch.tensor(
[f.start_position for f in features], dtype=torch.long
)
all_end_positions = torch.tensor(
[f.end_position for f in features], dtype=torch.long
)
dataset = TensorDataset(
all_input_ids,
all_attention_masks,
all_token_type_ids,
all_start_positions,
all_end_positions,
all_cls_index,
all_p_mask,
all_is_impossible,
)
return features, dataset
elif return_dataset == "tf":
if not is_tf_available():
raise RuntimeError(
"TensorFlow must be installed to return a TensorFlow dataset."
)
def gen():
for i, ex in enumerate(features):
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
"feature_index": i,
"qas_id": ex.qas_id,
},
{
"start_positions": ex.start_position,
"end_positions": ex.end_position,
"cls_index": ex.cls_index,
"p_mask": ex.p_mask,
"is_impossible": ex.is_impossible,
},
)
# Why have we split the batch into a tuple? PyTorch just has a list of tensors.
train_types = (
{
"input_ids": tf.int32,
"attention_mask": tf.int32,
"token_type_ids": tf.int32,
"feature_index": tf.int64,
"qas_id": tf.string,
},
{
"start_positions": tf.int64,
"end_positions": tf.int64,
"cls_index": tf.int64,
"p_mask": tf.int32,
"is_impossible": tf.int32,
},
)
train_shapes = (
{
"input_ids": tf.TensorShape([None]),
"attention_mask": tf.TensorShape([None]),
"token_type_ids": tf.TensorShape([None]),
"feature_index": tf.TensorShape([]),
"qas_id": tf.TensorShape([]),
},
{
"start_positions": tf.TensorShape([]),
"end_positions": tf.TensorShape([]),
"cls_index": tf.TensorShape([]),
"p_mask": tf.TensorShape([None]),
"is_impossible": tf.TensorShape([]),
},
)
return tf.data.Dataset.from_generator(gen, train_types, train_shapes)
else:
return features
|
https://github.com/huggingface/transformers/issues/4856
|
Traceback (most recent call last):
File "run_glue.py", line 229, in <module>
main()
File "run_glue.py", line 199, in main
compute_metrics=compute_metrics,
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/transformers/trainer_tf.py", line 48, in __init__
self._setup_training()
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/transformers/trainer_tf.py", line 58, in _setup_training
self._prepare_dataset()
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/transformers/trainer_tf.py", line 95, in _prepare_dataset
self.num_train_examples = self.train_dataset.reduce(tf.constant(0), lambda x, _: x + 1).numpy()
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorflow_core/python/data/ops/dataset_ops.py", line 1934, in reduce
output_types=structure.get_flat_tensor_types(state_structure)))
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorflow_core/python/ops/gen_dataset_ops.py", line 4661, in reduce_dataset
_ops.raise_from_not_ok_status(e, name)
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorflow_core/python/framework/ops.py", line 6606, in raise_from_not_ok_status
six.raise_from(core._status_to_exception(e.code, message), None)
File "<string>", line 3, in raise_from
tensorflow.python.framework.errors_impl.InvalidArgumentError: TypeError: `generator` yielded an element that could not be converted to the expected type. The expected type was int32, but the yielded element was None.
Traceback (most recent call last):
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorflow_core/python/data/ops/dataset_ops.py", line 805, in generator_py_func
ret, dtype=dtype.as_numpy_dtype))
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorflow_core/python/ops/script_ops.py", line 196, in _convert
result = np.asarray(value, dtype=dtype, order="C")
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/numpy/core/_asarray.py", line 85, in asarray
return array(a, dtype, copy=False, order=order)
TypeError: int() argument must be a string, a bytes-like object or a number, not 'NoneType'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorflow_core/python/ops/script_ops.py", line 236, in __call__
ret = func(*args)
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorflow_core/python/data/ops/dataset_ops.py", line 810, in generator_py_func
"element was %s." % (dtype.name, ret)), sys.exc_info()[2])
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/six.py", line 702, in reraise
raise value.with_traceback(tb)
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorflow_core/python/data/ops/dataset_ops.py", line 805, in generator_py_func
ret, dtype=dtype.as_numpy_dtype))
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorflow_core/python/ops/script_ops.py", line 196, in _convert
result = np.asarray(value, dtype=dtype, order="C")
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/numpy/core/_asarray.py", line 85, in asarray
return array(a, dtype, copy=False, order=order)
TypeError: `generator` yielded an element that could not be converted to the expected type. The expected type was int32, but the yielded element was None.
[[{{node PyFunc}}]] [Op:ReduceDataset]
|
tensorflow.python.framework.errors_impl.InvalidArgumentError
|
def gen():
for i, ex in enumerate(features):
if ex.token_type_ids is None:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"feature_index": i,
"qas_id": ex.qas_id,
},
{
"start_positions": ex.start_position,
"end_positions": ex.end_position,
"cls_index": ex.cls_index,
"p_mask": ex.p_mask,
"is_impossible": ex.is_impossible,
},
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
"feature_index": i,
"qas_id": ex.qas_id,
},
{
"start_positions": ex.start_position,
"end_positions": ex.end_position,
"cls_index": ex.cls_index,
"p_mask": ex.p_mask,
"is_impossible": ex.is_impossible,
},
)
|
def gen():
for i, ex in enumerate(features):
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
"feature_index": i,
"qas_id": ex.qas_id,
},
{
"start_positions": ex.start_position,
"end_positions": ex.end_position,
"cls_index": ex.cls_index,
"p_mask": ex.p_mask,
"is_impossible": ex.is_impossible,
},
)
|
https://github.com/huggingface/transformers/issues/4856
|
Traceback (most recent call last):
File "run_glue.py", line 229, in <module>
main()
File "run_glue.py", line 199, in main
compute_metrics=compute_metrics,
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/transformers/trainer_tf.py", line 48, in __init__
self._setup_training()
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/transformers/trainer_tf.py", line 58, in _setup_training
self._prepare_dataset()
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/transformers/trainer_tf.py", line 95, in _prepare_dataset
self.num_train_examples = self.train_dataset.reduce(tf.constant(0), lambda x, _: x + 1).numpy()
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorflow_core/python/data/ops/dataset_ops.py", line 1934, in reduce
output_types=structure.get_flat_tensor_types(state_structure)))
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorflow_core/python/ops/gen_dataset_ops.py", line 4661, in reduce_dataset
_ops.raise_from_not_ok_status(e, name)
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorflow_core/python/framework/ops.py", line 6606, in raise_from_not_ok_status
six.raise_from(core._status_to_exception(e.code, message), None)
File "<string>", line 3, in raise_from
tensorflow.python.framework.errors_impl.InvalidArgumentError: TypeError: `generator` yielded an element that could not be converted to the expected type. The expected type was int32, but the yielded element was None.
Traceback (most recent call last):
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorflow_core/python/data/ops/dataset_ops.py", line 805, in generator_py_func
ret, dtype=dtype.as_numpy_dtype))
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorflow_core/python/ops/script_ops.py", line 196, in _convert
result = np.asarray(value, dtype=dtype, order="C")
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/numpy/core/_asarray.py", line 85, in asarray
return array(a, dtype, copy=False, order=order)
TypeError: int() argument must be a string, a bytes-like object or a number, not 'NoneType'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorflow_core/python/ops/script_ops.py", line 236, in __call__
ret = func(*args)
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorflow_core/python/data/ops/dataset_ops.py", line 810, in generator_py_func
"element was %s." % (dtype.name, ret)), sys.exc_info()[2])
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/six.py", line 702, in reraise
raise value.with_traceback(tb)
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorflow_core/python/data/ops/dataset_ops.py", line 805, in generator_py_func
ret, dtype=dtype.as_numpy_dtype))
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorflow_core/python/ops/script_ops.py", line 196, in _convert
result = np.asarray(value, dtype=dtype, order="C")
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/numpy/core/_asarray.py", line 85, in asarray
return array(a, dtype, copy=False, order=order)
TypeError: `generator` yielded an element that could not be converted to the expected type. The expected type was int32, but the yielded element was None.
[[{{node PyFunc}}]] [Op:ReduceDataset]
|
tensorflow.python.framework.errors_impl.InvalidArgumentError
|
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""Instantiates one of the base model classes of the library
from a pre-trained model configuration.
The `from_pretrained()` method takes care of returning the correct model class instance
based on the `model_type` property of the config object, or when it's missing,
falling back to using pattern matching on the `pretrained_model_name_or_path` string.
The base model class to instantiate is selected as the first pattern matching
in the `pretrained_model_name_or_path` string (in the following order):
- contains `t5`: :class:`~transformers.T5Model` (T5 model)
- contains `distilbert`: :class:`~transformers.DistilBertModel` (DistilBERT model)
- contains `albert`: :class:`~transformers.AlbertModel` (ALBERT model)
- contains `camembert`: :class:`~transformers.CamembertModel` (CamemBERT model)
- contains `xlm-roberta`: :class:`~transformers.XLMRobertaModel` (XLM-RoBERTa model)
- contains `roberta`: :class:`~transformers.RobertaModel` (RoBERTa model)
- contains `bert`: :class:`~transformers.BertModel` (Bert model)
- contains `openai-gpt`: :class:`~transformers.OpenAIGPTModel` (OpenAI GPT model)
- contains `gpt2`: :class:`~transformers.GPT2Model` (OpenAI GPT-2 model)
- contains `transfo-xl`: :class:`~transformers.TransfoXLModel` (Transformer-XL model)
- contains `xlnet`: :class:`~transformers.XLNetModel` (XLNet model)
- contains `xlm`: :class:`~transformers.XLMModel` (XLM model)
- contains `ctrl`: :class:`~transformers.CTRLModel` (Salesforce CTRL model)
- contains `flaubert`: :class:`~transformers.Flaubert` (Flaubert model)
The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated)
To train the model, you should first set it back in training mode with `model.train()`
Args:
pretrained_model_name_or_path: either:
- a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.
- a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``.
- a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``.
- a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
model_args: (`optional`) Sequence of positional arguments:
All remaning positional arguments will be passed to the underlying model's ``__init__`` method
config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`:
Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when:
- the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or
- the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory.
- the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory.
state_dict: (`optional`) dict:
an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file.
This option can be used if you want to create a model from a pretrained configuration but load your own weights.
In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.
cache_dir: (`optional`) string:
Path to a directory in which a downloaded pre-trained model
configuration should be cached if the standard cache should not be used.
force_download: (`optional`) boolean, default False:
Force to (re-)download the model weights and configuration files and override the cached versions if they exists.
resume_download: (`optional`) boolean, default False:
Do not delete incompletely recieved file. Attempt to resume the download if such a file exists.
proxies: (`optional`) dict, default None:
A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
The proxies are used on each request.
output_loading_info: (`optional`) boolean:
Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.
kwargs: (`optional`) Remaining dictionary of keyword arguments:
These arguments will be passed to the configuration and the model.
Examples::
model = AutoModel.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache.
model = AutoModel.from_pretrained('./test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`
assert model.config.output_attention == True
# Loading from a TF checkpoint file instead of a PyTorch model (slower)
config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
model = AutoModel.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
if not isinstance(config, PretrainedConfig):
config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
for config_class, model_class in MODEL_MAPPING.items():
if isinstance(config, config_class):
return model_class.from_pretrained(
pretrained_model_name_or_path, *model_args, config=config, **kwargs
)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__,
cls.__name__,
", ".join(c.__name__ for c in MODEL_MAPPING.keys()),
)
)
|
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""Instantiates one of the base model classes of the library
from a pre-trained model configuration.
The `from_pretrained()` method takes care of returning the correct model class instance
based on the `model_type` property of the config object, or when it's missing,
falling back to using pattern matching on the `pretrained_model_name_or_path` string.
The base model class to instantiate is selected as the first pattern matching
in the `pretrained_model_name_or_path` string (in the following order):
- contains `t5`: :class:`~transformers.T5Model` (T5 model)
- contains `distilbert`: :class:`~transformers.DistilBertModel` (DistilBERT model)
- contains `albert`: :class:`~transformers.AlbertModel` (ALBERT model)
- contains `camembert`: :class:`~transformers.CamembertModel` (CamemBERT model)
- contains `xlm-roberta`: :class:`~transformers.XLMRobertaModel` (XLM-RoBERTa model)
- contains `roberta`: :class:`~transformers.RobertaModel` (RoBERTa model)
- contains `bert`: :class:`~transformers.BertModel` (Bert model)
- contains `openai-gpt`: :class:`~transformers.OpenAIGPTModel` (OpenAI GPT model)
- contains `gpt2`: :class:`~transformers.GPT2Model` (OpenAI GPT-2 model)
- contains `transfo-xl`: :class:`~transformers.TransfoXLModel` (Transformer-XL model)
- contains `xlnet`: :class:`~transformers.XLNetModel` (XLNet model)
- contains `xlm`: :class:`~transformers.XLMModel` (XLM model)
- contains `ctrl`: :class:`~transformers.CTRLModel` (Salesforce CTRL model)
- contains `flaubert`: :class:`~transformers.Flaubert` (Flaubert model)
The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated)
To train the model, you should first set it back in training mode with `model.train()`
Args:
pretrained_model_name_or_path: either:
- a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.
- a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``.
- a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``.
- a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
model_args: (`optional`) Sequence of positional arguments:
All remaning positional arguments will be passed to the underlying model's ``__init__`` method
config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`:
Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when:
- the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or
- the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory.
- the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory.
state_dict: (`optional`) dict:
an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file.
This option can be used if you want to create a model from a pretrained configuration but load your own weights.
In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.
cache_dir: (`optional`) string:
Path to a directory in which a downloaded pre-trained model
configuration should be cached if the standard cache should not be used.
force_download: (`optional`) boolean, default False:
Force to (re-)download the model weights and configuration files and override the cached versions if they exists.
resume_download: (`optional`) boolean, default False:
Do not delete incompletely recieved file. Attempt to resume the download if such a file exists.
proxies: (`optional`) dict, default None:
A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
The proxies are used on each request.
output_loading_info: (`optional`) boolean:
Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.
kwargs: (`optional`) Remaining dictionary of keyword arguments:
Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded:
- If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done)
- If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function.
Examples::
model = AutoModel.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache.
model = AutoModel.from_pretrained('./test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`
model = AutoModel.from_pretrained('bert-base-uncased', output_attention=True) # Update configuration during loading
assert model.config.output_attention == True
# Loading from a TF checkpoint file instead of a PyTorch model (slower)
config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
model = AutoModel.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
if not isinstance(config, PretrainedConfig):
config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
for config_class, model_class in MODEL_MAPPING.items():
if isinstance(config, config_class):
return model_class.from_pretrained(
pretrained_model_name_or_path, *model_args, config=config, **kwargs
)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__,
cls.__name__,
", ".join(c.__name__ for c in MODEL_MAPPING.keys()),
)
)
|
https://github.com/huggingface/transformers/issues/2985
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "[...]/transformers/src/transformers/modeling_auto.py", line 384, in from_pretrained
return model_class.from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **kwargs)
File "[...]/transformers/src/transformers/modeling_utils.py", line 463, in from_pretrained
model = cls(config, *model_args, **model_kwargs)
TypeError: __init__() got an unexpected keyword argument 'output_attention'
|
TypeError
|
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""Instantiates one of the model classes of the library -with the architecture used for pretraining this model– from a pre-trained model configuration.
The `from_pretrained()` method takes care of returning the correct model class instance
based on the `model_type` property of the config object, or when it's missing,
falling back to using pattern matching on the `pretrained_model_name_or_path` string.
The model class to instantiate is selected as the first pattern matching
in the `pretrained_model_name_or_path` string (in the following order):
- contains `t5`: :class:`~transformers.T5ModelWithLMHead` (T5 model)
- contains `distilbert`: :class:`~transformers.DistilBertForMaskedLM` (DistilBERT model)
- contains `albert`: :class:`~transformers.AlbertForMaskedLM` (ALBERT model)
- contains `camembert`: :class:`~transformers.CamembertForMaskedLM` (CamemBERT model)
- contains `xlm-roberta`: :class:`~transformers.XLMRobertaForMaskedLM` (XLM-RoBERTa model)
- contains `roberta`: :class:`~transformers.RobertaForMaskedLM` (RoBERTa model)
- contains `bert`: :class:`~transformers.BertForPreTraining` (Bert model)
- contains `openai-gpt`: :class:`~transformers.OpenAIGPTLMHeadModel` (OpenAI GPT model)
- contains `gpt2`: :class:`~transformers.GPT2LMHeadModel` (OpenAI GPT-2 model)
- contains `transfo-xl`: :class:`~transformers.TransfoXLLMHeadModel` (Transformer-XL model)
- contains `xlnet`: :class:`~transformers.XLNetLMHeadModel` (XLNet model)
- contains `xlm`: :class:`~transformers.XLMWithLMHeadModel` (XLM model)
- contains `ctrl`: :class:`~transformers.CTRLLMHeadModel` (Salesforce CTRL model)
- contains `flaubert`: :class:`~transformers.FlaubertWithLMHeadModel` (Flaubert model)
The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated)
To train the model, you should first set it back in training mode with `model.train()`
Args:
pretrained_model_name_or_path:
Either:
- a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.
- a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``.
- a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``.
- a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
model_args: (`optional`) Sequence of positional arguments:
All remaning positional arguments will be passed to the underlying model's ``__init__`` method
config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`:
Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when:
- the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or
- the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory.
- the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory.
state_dict: (`optional`) dict:
an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file.
This option can be used if you want to create a model from a pretrained configuration but load your own weights.
In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.
cache_dir: (`optional`) string:
Path to a directory in which a downloaded pre-trained model
configuration should be cached if the standard cache should not be used.
force_download: (`optional`) boolean, default False:
Force to (re-)download the model weights and configuration files and override the cached versions if they exists.
resume_download: (`optional`) boolean, default False:
Do not delete incompletely received file. Attempt to resume the download if such a file exists.
proxies: (`optional`) dict, default None:
A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
The proxies are used on each request.
output_loading_info: (`optional`) boolean:
Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.
kwargs: (`optional`) Remaining dictionary of keyword arguments:
These arguments will be passed to the configuration and the model.
Examples::
model = AutoModelForPreTraining.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache.
model = AutoModelForPreTraining.from_pretrained('./test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`
assert model.config.output_attention == True
# Loading from a TF checkpoint file instead of a PyTorch model (slower)
config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
model = AutoModelForPreTraining.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
if not isinstance(config, PretrainedConfig):
config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
for config_class, model_class in MODEL_FOR_PRETRAINING_MAPPING.items():
if isinstance(config, config_class):
return model_class.from_pretrained(
pretrained_model_name_or_path, *model_args, config=config, **kwargs
)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__,
cls.__name__,
", ".join(c.__name__ for c in MODEL_FOR_PRETRAINING_MAPPING.keys()),
)
)
|
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""Instantiates one of the model classes of the library -with the architecture used for pretraining this model– from a pre-trained model configuration.
The `from_pretrained()` method takes care of returning the correct model class instance
based on the `model_type` property of the config object, or when it's missing,
falling back to using pattern matching on the `pretrained_model_name_or_path` string.
The model class to instantiate is selected as the first pattern matching
in the `pretrained_model_name_or_path` string (in the following order):
- contains `t5`: :class:`~transformers.T5ModelWithLMHead` (T5 model)
- contains `distilbert`: :class:`~transformers.DistilBertForMaskedLM` (DistilBERT model)
- contains `albert`: :class:`~transformers.AlbertForMaskedLM` (ALBERT model)
- contains `camembert`: :class:`~transformers.CamembertForMaskedLM` (CamemBERT model)
- contains `xlm-roberta`: :class:`~transformers.XLMRobertaForMaskedLM` (XLM-RoBERTa model)
- contains `roberta`: :class:`~transformers.RobertaForMaskedLM` (RoBERTa model)
- contains `bert`: :class:`~transformers.BertForPreTraining` (Bert model)
- contains `openai-gpt`: :class:`~transformers.OpenAIGPTLMHeadModel` (OpenAI GPT model)
- contains `gpt2`: :class:`~transformers.GPT2LMHeadModel` (OpenAI GPT-2 model)
- contains `transfo-xl`: :class:`~transformers.TransfoXLLMHeadModel` (Transformer-XL model)
- contains `xlnet`: :class:`~transformers.XLNetLMHeadModel` (XLNet model)
- contains `xlm`: :class:`~transformers.XLMWithLMHeadModel` (XLM model)
- contains `ctrl`: :class:`~transformers.CTRLLMHeadModel` (Salesforce CTRL model)
- contains `flaubert`: :class:`~transformers.FlaubertWithLMHeadModel` (Flaubert model)
The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated)
To train the model, you should first set it back in training mode with `model.train()`
Args:
pretrained_model_name_or_path:
Either:
- a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.
- a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``.
- a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``.
- a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
model_args: (`optional`) Sequence of positional arguments:
All remaning positional arguments will be passed to the underlying model's ``__init__`` method
config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`:
Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when:
- the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or
- the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory.
- the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory.
state_dict: (`optional`) dict:
an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file.
This option can be used if you want to create a model from a pretrained configuration but load your own weights.
In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.
cache_dir: (`optional`) string:
Path to a directory in which a downloaded pre-trained model
configuration should be cached if the standard cache should not be used.
force_download: (`optional`) boolean, default False:
Force to (re-)download the model weights and configuration files and override the cached versions if they exists.
resume_download: (`optional`) boolean, default False:
Do not delete incompletely received file. Attempt to resume the download if such a file exists.
proxies: (`optional`) dict, default None:
A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
The proxies are used on each request.
output_loading_info: (`optional`) boolean:
Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.
kwargs: (`optional`) Remaining dictionary of keyword arguments:
Can be used to update the configuration object (after it being loaded) and initiate the model.
(e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or
automatically loaded:
- If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the
underlying model's ``__init__`` method (we assume all relevant updates to the configuration have
already been done)
- If a configuration is not provided, ``kwargs`` will be first passed to the configuration class
initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of
``kwargs`` that corresponds to a configuration attribute will be used to override said attribute
with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration
attribute will be passed to the underlying model's ``__init__`` function.
Examples::
model = AutoModelForPreTraining.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache.
model = AutoModelForPreTraining.from_pretrained('./test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`
model = AutoModelForPreTraining.from_pretrained('bert-base-uncased', output_attention=True) # Update configuration during loading
assert model.config.output_attention == True
# Loading from a TF checkpoint file instead of a PyTorch model (slower)
config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
model = AutoModelForPreTraining.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
if not isinstance(config, PretrainedConfig):
config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
for config_class, model_class in MODEL_FOR_PRETRAINING_MAPPING.items():
if isinstance(config, config_class):
return model_class.from_pretrained(
pretrained_model_name_or_path, *model_args, config=config, **kwargs
)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__,
cls.__name__,
", ".join(c.__name__ for c in MODEL_FOR_PRETRAINING_MAPPING.keys()),
)
)
|
https://github.com/huggingface/transformers/issues/2985
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "[...]/transformers/src/transformers/modeling_auto.py", line 384, in from_pretrained
return model_class.from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **kwargs)
File "[...]/transformers/src/transformers/modeling_utils.py", line 463, in from_pretrained
model = cls(config, *model_args, **model_kwargs)
TypeError: __init__() got an unexpected keyword argument 'output_attention'
|
TypeError
|
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""Instantiates one of the language modeling model classes of the library
from a pre-trained model configuration.
The `from_pretrained()` method takes care of returning the correct model class instance
based on the `model_type` property of the config object, or when it's missing,
falling back to using pattern matching on the `pretrained_model_name_or_path` string.
The model class to instantiate is selected as the first pattern matching
in the `pretrained_model_name_or_path` string (in the following order):
- contains `t5`: :class:`~transformers.T5ModelWithLMHead` (T5 model)
- contains `distilbert`: :class:`~transformers.DistilBertForMaskedLM` (DistilBERT model)
- contains `albert`: :class:`~transformers.AlbertForMaskedLM` (ALBERT model)
- contains `camembert`: :class:`~transformers.CamembertForMaskedLM` (CamemBERT model)
- contains `xlm-roberta`: :class:`~transformers.XLMRobertaForMaskedLM` (XLM-RoBERTa model)
- contains `roberta`: :class:`~transformers.RobertaForMaskedLM` (RoBERTa model)
- contains `bert`: :class:`~transformers.BertForMaskedLM` (Bert model)
- contains `openai-gpt`: :class:`~transformers.OpenAIGPTLMHeadModel` (OpenAI GPT model)
- contains `gpt2`: :class:`~transformers.GPT2LMHeadModel` (OpenAI GPT-2 model)
- contains `transfo-xl`: :class:`~transformers.TransfoXLLMHeadModel` (Transformer-XL model)
- contains `xlnet`: :class:`~transformers.XLNetLMHeadModel` (XLNet model)
- contains `xlm`: :class:`~transformers.XLMWithLMHeadModel` (XLM model)
- contains `ctrl`: :class:`~transformers.CTRLLMHeadModel` (Salesforce CTRL model)
- contains `flaubert`: :class:`~transformers.FlaubertWithLMHeadModel` (Flaubert model)
The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated)
To train the model, you should first set it back in training mode with `model.train()`
Args:
pretrained_model_name_or_path:
Either:
- a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.
- a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``.
- a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``.
- a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
model_args: (`optional`) Sequence of positional arguments:
All remaning positional arguments will be passed to the underlying model's ``__init__`` method
config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`:
Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when:
- the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or
- the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory.
- the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory.
state_dict: (`optional`) dict:
an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file.
This option can be used if you want to create a model from a pretrained configuration but load your own weights.
In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.
cache_dir: (`optional`) string:
Path to a directory in which a downloaded pre-trained model
configuration should be cached if the standard cache should not be used.
force_download: (`optional`) boolean, default False:
Force to (re-)download the model weights and configuration files and override the cached versions if they exists.
resume_download: (`optional`) boolean, default False:
Do not delete incompletely received file. Attempt to resume the download if such a file exists.
proxies: (`optional`) dict, default None:
A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
The proxies are used on each request.
output_loading_info: (`optional`) boolean:
Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.
kwargs: (`optional`) Remaining dictionary of keyword arguments:
These arguments will be passed to the configuration and the model.
Examples::
model = AutoModelWithLMHead.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache.
model = AutoModelWithLMHead.from_pretrained('./test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`
assert model.config.output_attention == True
# Loading from a TF checkpoint file instead of a PyTorch model (slower)
config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
model = AutoModelWithLMHead.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
if not isinstance(config, PretrainedConfig):
config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
for config_class, model_class in MODEL_WITH_LM_HEAD_MAPPING.items():
if isinstance(config, config_class):
return model_class.from_pretrained(
pretrained_model_name_or_path, *model_args, config=config, **kwargs
)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__,
cls.__name__,
", ".join(c.__name__ for c in MODEL_WITH_LM_HEAD_MAPPING.keys()),
)
)
|
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""Instantiates one of the language modeling model classes of the library
from a pre-trained model configuration.
The `from_pretrained()` method takes care of returning the correct model class instance
based on the `model_type` property of the config object, or when it's missing,
falling back to using pattern matching on the `pretrained_model_name_or_path` string.
The model class to instantiate is selected as the first pattern matching
in the `pretrained_model_name_or_path` string (in the following order):
- contains `t5`: :class:`~transformers.T5ModelWithLMHead` (T5 model)
- contains `distilbert`: :class:`~transformers.DistilBertForMaskedLM` (DistilBERT model)
- contains `albert`: :class:`~transformers.AlbertForMaskedLM` (ALBERT model)
- contains `camembert`: :class:`~transformers.CamembertForMaskedLM` (CamemBERT model)
- contains `xlm-roberta`: :class:`~transformers.XLMRobertaForMaskedLM` (XLM-RoBERTa model)
- contains `roberta`: :class:`~transformers.RobertaForMaskedLM` (RoBERTa model)
- contains `bert`: :class:`~transformers.BertForMaskedLM` (Bert model)
- contains `openai-gpt`: :class:`~transformers.OpenAIGPTLMHeadModel` (OpenAI GPT model)
- contains `gpt2`: :class:`~transformers.GPT2LMHeadModel` (OpenAI GPT-2 model)
- contains `transfo-xl`: :class:`~transformers.TransfoXLLMHeadModel` (Transformer-XL model)
- contains `xlnet`: :class:`~transformers.XLNetLMHeadModel` (XLNet model)
- contains `xlm`: :class:`~transformers.XLMWithLMHeadModel` (XLM model)
- contains `ctrl`: :class:`~transformers.CTRLLMHeadModel` (Salesforce CTRL model)
- contains `flaubert`: :class:`~transformers.FlaubertWithLMHeadModel` (Flaubert model)
The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated)
To train the model, you should first set it back in training mode with `model.train()`
Args:
pretrained_model_name_or_path:
Either:
- a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.
- a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``.
- a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``.
- a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
model_args: (`optional`) Sequence of positional arguments:
All remaning positional arguments will be passed to the underlying model's ``__init__`` method
config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`:
Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when:
- the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or
- the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory.
- the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory.
state_dict: (`optional`) dict:
an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file.
This option can be used if you want to create a model from a pretrained configuration but load your own weights.
In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.
cache_dir: (`optional`) string:
Path to a directory in which a downloaded pre-trained model
configuration should be cached if the standard cache should not be used.
force_download: (`optional`) boolean, default False:
Force to (re-)download the model weights and configuration files and override the cached versions if they exists.
resume_download: (`optional`) boolean, default False:
Do not delete incompletely received file. Attempt to resume the download if such a file exists.
proxies: (`optional`) dict, default None:
A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
The proxies are used on each request.
output_loading_info: (`optional`) boolean:
Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.
kwargs: (`optional`) Remaining dictionary of keyword arguments:
Can be used to update the configuration object (after it being loaded) and initiate the model.
(e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or
automatically loaded:
- If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the
underlying model's ``__init__`` method (we assume all relevant updates to the configuration have
already been done)
- If a configuration is not provided, ``kwargs`` will be first passed to the configuration class
initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of
``kwargs`` that corresponds to a configuration attribute will be used to override said attribute
with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration
attribute will be passed to the underlying model's ``__init__`` function.
Examples::
model = AutoModelWithLMHead.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache.
model = AutoModelWithLMHead.from_pretrained('./test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`
model = AutoModelWithLMHead.from_pretrained('bert-base-uncased', output_attention=True) # Update configuration during loading
assert model.config.output_attention == True
# Loading from a TF checkpoint file instead of a PyTorch model (slower)
config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
model = AutoModelWithLMHead.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
if not isinstance(config, PretrainedConfig):
config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
for config_class, model_class in MODEL_WITH_LM_HEAD_MAPPING.items():
if isinstance(config, config_class):
return model_class.from_pretrained(
pretrained_model_name_or_path, *model_args, config=config, **kwargs
)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__,
cls.__name__,
", ".join(c.__name__ for c in MODEL_WITH_LM_HEAD_MAPPING.keys()),
)
)
|
https://github.com/huggingface/transformers/issues/2985
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "[...]/transformers/src/transformers/modeling_auto.py", line 384, in from_pretrained
return model_class.from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **kwargs)
File "[...]/transformers/src/transformers/modeling_utils.py", line 463, in from_pretrained
model = cls(config, *model_args, **model_kwargs)
TypeError: __init__() got an unexpected keyword argument 'output_attention'
|
TypeError
|
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""Instantiates one of the sequence classification model classes of the library
from a pre-trained model configuration.
The `from_pretrained()` method takes care of returning the correct model class instance
based on the `model_type` property of the config object, or when it's missing,
falling back to using pattern matching on the `pretrained_model_name_or_path` string.
The model class to instantiate is selected as the first pattern matching
in the `pretrained_model_name_or_path` string (in the following order):
- contains `distilbert`: :class:`~transformers.DistilBertForSequenceClassification` (DistilBERT model)
- contains `albert`: :class:`~transformers.AlbertForSequenceClassification` (ALBERT model)
- contains `camembert`: :class:`~transformers.CamembertForSequenceClassification` (CamemBERT model)
- contains `xlm-roberta`: :class:`~transformers.XLMRobertaForSequenceClassification` (XLM-RoBERTa model)
- contains `roberta`: :class:`~transformers.RobertaForSequenceClassification` (RoBERTa model)
- contains `bert`: :class:`~transformers.BertForSequenceClassification` (Bert model)
- contains `xlnet`: :class:`~transformers.XLNetForSequenceClassification` (XLNet model)
- contains `flaubert`: :class:`~transformers.FlaubertForSequenceClassification` (Flaubert model)
The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated)
To train the model, you should first set it back in training mode with `model.train()`
Args:
pretrained_model_name_or_path: either:
- a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.
- a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``.
- a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``.
- a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
model_args: (`optional`) Sequence of positional arguments:
All remaining positional arguments will be passed to the underlying model's ``__init__`` method
config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`:
Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when:
- the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or
- the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory.
- the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory.
state_dict: (`optional`) dict:
an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file.
This option can be used if you want to create a model from a pretrained configuration but load your own weights.
In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.
cache_dir: (`optional`) string:
Path to a directory in which a downloaded pre-trained model
configuration should be cached if the standard cache should not be used.
force_download: (`optional`) boolean, default False:
Force to (re-)download the model weights and configuration files and override the cached versions if they exists.
resume_download: (`optional`) boolean, default False:
Do not delete incompletely recieved file. Attempt to resume the download if such a file exists.
proxies: (`optional`) dict, default None:
A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
The proxies are used on each request.
output_loading_info: (`optional`) boolean:
Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.
kwargs: (`optional`) Remaining dictionary of keyword arguments:
These arguments will be passed to the configuration and the model.
Examples::
model = AutoModelForSequenceClassification.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache.
model = AutoModelForSequenceClassification.from_pretrained('./test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`
assert model.config.output_attention == True
# Loading from a TF checkpoint file instead of a PyTorch model (slower)
config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
model = AutoModelForSequenceClassification.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
if not isinstance(config, PretrainedConfig):
config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
for config_class, model_class in MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING.items():
if isinstance(config, config_class):
return model_class.from_pretrained(
pretrained_model_name_or_path, *model_args, config=config, **kwargs
)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__,
cls.__name__,
", ".join(
c.__name__ for c in MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING.keys()
),
)
)
|
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""Instantiates one of the sequence classification model classes of the library
from a pre-trained model configuration.
The `from_pretrained()` method takes care of returning the correct model class instance
based on the `model_type` property of the config object, or when it's missing,
falling back to using pattern matching on the `pretrained_model_name_or_path` string.
The model class to instantiate is selected as the first pattern matching
in the `pretrained_model_name_or_path` string (in the following order):
- contains `distilbert`: :class:`~transformers.DistilBertForSequenceClassification` (DistilBERT model)
- contains `albert`: :class:`~transformers.AlbertForSequenceClassification` (ALBERT model)
- contains `camembert`: :class:`~transformers.CamembertForSequenceClassification` (CamemBERT model)
- contains `xlm-roberta`: :class:`~transformers.XLMRobertaForSequenceClassification` (XLM-RoBERTa model)
- contains `roberta`: :class:`~transformers.RobertaForSequenceClassification` (RoBERTa model)
- contains `bert`: :class:`~transformers.BertForSequenceClassification` (Bert model)
- contains `xlnet`: :class:`~transformers.XLNetForSequenceClassification` (XLNet model)
- contains `flaubert`: :class:`~transformers.FlaubertForSequenceClassification` (Flaubert model)
The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated)
To train the model, you should first set it back in training mode with `model.train()`
Args:
pretrained_model_name_or_path: either:
- a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.
- a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``.
- a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``.
- a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
model_args: (`optional`) Sequence of positional arguments:
All remaining positional arguments will be passed to the underlying model's ``__init__`` method
config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`:
Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when:
- the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or
- the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory.
- the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory.
state_dict: (`optional`) dict:
an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file.
This option can be used if you want to create a model from a pretrained configuration but load your own weights.
In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.
cache_dir: (`optional`) string:
Path to a directory in which a downloaded pre-trained model
configuration should be cached if the standard cache should not be used.
force_download: (`optional`) boolean, default False:
Force to (re-)download the model weights and configuration files and override the cached versions if they exists.
resume_download: (`optional`) boolean, default False:
Do not delete incompletely recieved file. Attempt to resume the download if such a file exists.
proxies: (`optional`) dict, default None:
A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
The proxies are used on each request.
output_loading_info: (`optional`) boolean:
Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.
kwargs: (`optional`) Remaining dictionary of keyword arguments:
Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded:
- If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done)
- If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function.
Examples::
model = AutoModelForSequenceClassification.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache.
model = AutoModelForSequenceClassification.from_pretrained('./test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`
model = AutoModelForSequenceClassification.from_pretrained('bert-base-uncased', output_attention=True) # Update configuration during loading
assert model.config.output_attention == True
# Loading from a TF checkpoint file instead of a PyTorch model (slower)
config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
model = AutoModelForSequenceClassification.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
if not isinstance(config, PretrainedConfig):
config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
for config_class, model_class in MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING.items():
if isinstance(config, config_class):
return model_class.from_pretrained(
pretrained_model_name_or_path, *model_args, config=config, **kwargs
)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__,
cls.__name__,
", ".join(
c.__name__ for c in MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING.keys()
),
)
)
|
https://github.com/huggingface/transformers/issues/2985
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "[...]/transformers/src/transformers/modeling_auto.py", line 384, in from_pretrained
return model_class.from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **kwargs)
File "[...]/transformers/src/transformers/modeling_utils.py", line 463, in from_pretrained
model = cls(config, *model_args, **model_kwargs)
TypeError: __init__() got an unexpected keyword argument 'output_attention'
|
TypeError
|
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""Instantiates one of the question answering model classes of the library
from a pre-trained model configuration.
The `from_pretrained()` method takes care of returning the correct model class instance
based on the `model_type` property of the config object, or when it's missing,
falling back to using pattern matching on the `pretrained_model_name_or_path` string.
The model class to instantiate is selected as the first pattern matching
in the `pretrained_model_name_or_path` string (in the following order):
- contains `distilbert`: :class:`~transformers.DistilBertForQuestionAnswering` (DistilBERT model)
- contains `albert`: :class:`~transformers.AlbertForQuestionAnswering` (ALBERT model)
- contains `bert`: :class:`~transformers.BertForQuestionAnswering` (Bert model)
- contains `xlnet`: :class:`~transformers.XLNetForQuestionAnswering` (XLNet model)
- contains `xlm`: :class:`~transformers.XLMForQuestionAnswering` (XLM model)
- contains `flaubert`: :class:`~transformers.FlaubertForQuestionAnswering` (XLM model)
The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated)
To train the model, you should first set it back in training mode with `model.train()`
Args:
pretrained_model_name_or_path: either:
- a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.
- a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``.
- a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``.
- a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
model_args: (`optional`) Sequence of positional arguments:
All remaning positional arguments will be passed to the underlying model's ``__init__`` method
config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`:
Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when:
- the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or
- the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory.
- the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory.
state_dict: (`optional`) dict:
an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file.
This option can be used if you want to create a model from a pretrained configuration but load your own weights.
In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.
cache_dir: (`optional`) string:
Path to a directory in which a downloaded pre-trained model
configuration should be cached if the standard cache should not be used.
force_download: (`optional`) boolean, default False:
Force to (re-)download the model weights and configuration files and override the cached versions if they exists.
proxies: (`optional`) dict, default None:
A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
The proxies are used on each request.
output_loading_info: (`optional`) boolean:
Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.
kwargs: (`optional`) Remaining dictionary of keyword arguments:
These arguments will be passed to the configuration and the model.
Examples::
model = AutoModelForQuestionAnswering.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache.
model = AutoModelForQuestionAnswering.from_pretrained('./test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`
assert model.config.output_attention == True
# Loading from a TF checkpoint file instead of a PyTorch model (slower)
config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
model = AutoModelForQuestionAnswering.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
if not isinstance(config, PretrainedConfig):
config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
for config_class, model_class in MODEL_FOR_QUESTION_ANSWERING_MAPPING.items():
if isinstance(config, config_class):
return model_class.from_pretrained(
pretrained_model_name_or_path, *model_args, config=config, **kwargs
)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__,
cls.__name__,
", ".join(c.__name__ for c in MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()),
)
)
|
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""Instantiates one of the question answering model classes of the library
from a pre-trained model configuration.
The `from_pretrained()` method takes care of returning the correct model class instance
based on the `model_type` property of the config object, or when it's missing,
falling back to using pattern matching on the `pretrained_model_name_or_path` string.
The model class to instantiate is selected as the first pattern matching
in the `pretrained_model_name_or_path` string (in the following order):
- contains `distilbert`: :class:`~transformers.DistilBertForQuestionAnswering` (DistilBERT model)
- contains `albert`: :class:`~transformers.AlbertForQuestionAnswering` (ALBERT model)
- contains `bert`: :class:`~transformers.BertForQuestionAnswering` (Bert model)
- contains `xlnet`: :class:`~transformers.XLNetForQuestionAnswering` (XLNet model)
- contains `xlm`: :class:`~transformers.XLMForQuestionAnswering` (XLM model)
- contains `flaubert`: :class:`~transformers.FlaubertForQuestionAnswering` (XLM model)
The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated)
To train the model, you should first set it back in training mode with `model.train()`
Args:
pretrained_model_name_or_path: either:
- a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.
- a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``.
- a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``.
- a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
model_args: (`optional`) Sequence of positional arguments:
All remaning positional arguments will be passed to the underlying model's ``__init__`` method
config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`:
Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when:
- the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or
- the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory.
- the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory.
state_dict: (`optional`) dict:
an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file.
This option can be used if you want to create a model from a pretrained configuration but load your own weights.
In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.
cache_dir: (`optional`) string:
Path to a directory in which a downloaded pre-trained model
configuration should be cached if the standard cache should not be used.
force_download: (`optional`) boolean, default False:
Force to (re-)download the model weights and configuration files and override the cached versions if they exists.
proxies: (`optional`) dict, default None:
A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
The proxies are used on each request.
output_loading_info: (`optional`) boolean:
Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.
kwargs: (`optional`) Remaining dictionary of keyword arguments:
Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded:
- If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done)
- If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function.
Examples::
model = AutoModelForQuestionAnswering.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache.
model = AutoModelForQuestionAnswering.from_pretrained('./test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`
model = AutoModelForQuestionAnswering.from_pretrained('bert-base-uncased', output_attention=True) # Update configuration during loading
assert model.config.output_attention == True
# Loading from a TF checkpoint file instead of a PyTorch model (slower)
config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
model = AutoModelForQuestionAnswering.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
if not isinstance(config, PretrainedConfig):
config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
for config_class, model_class in MODEL_FOR_QUESTION_ANSWERING_MAPPING.items():
if isinstance(config, config_class):
return model_class.from_pretrained(
pretrained_model_name_or_path, *model_args, config=config, **kwargs
)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__,
cls.__name__,
", ".join(c.__name__ for c in MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()),
)
)
|
https://github.com/huggingface/transformers/issues/2985
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "[...]/transformers/src/transformers/modeling_auto.py", line 384, in from_pretrained
return model_class.from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **kwargs)
File "[...]/transformers/src/transformers/modeling_utils.py", line 463, in from_pretrained
model = cls(config, *model_args, **model_kwargs)
TypeError: __init__() got an unexpected keyword argument 'output_attention'
|
TypeError
|
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""Instantiates one of the question answering model classes of the library
from a pre-trained model configuration.
The `from_pretrained()` method takes care of returning the correct model class instance
based on the `model_type` property of the config object, or when it's missing,
falling back to using pattern matching on the `pretrained_model_name_or_path` string.
The model class to instantiate is selected as the first pattern matching
in the `pretrained_model_name_or_path` string (in the following order):
- contains `distilbert`: :class:`~transformers.DistilBertForTokenClassification` (DistilBERT model)
- contains `xlm-roberta`: :class:`~transformers.XLMRobertaForTokenClassification` (XLM-RoBERTa?Para model)
- contains `camembert`: :class:`~transformers.CamembertForTokenClassification` (Camembert model)
- contains `bert`: :class:`~transformers.BertForTokenClassification` (Bert model)
- contains `xlnet`: :class:`~transformers.XLNetForTokenClassification` (XLNet model)
- contains `roberta`: :class:`~transformers.RobertaForTokenClassification` (Roberta model)
The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated)
To train the model, you should first set it back in training mode with `model.train()`
Args:
pretrained_model_name_or_path:
Either:
- a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.
- a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``.
- a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
model_args: (`optional`) Sequence of positional arguments:
All remaning positional arguments will be passed to the underlying model's ``__init__`` method
config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`:
Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when:
- the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or
- the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory.
- the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory.
state_dict: (`optional`) dict:
an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file.
This option can be used if you want to create a model from a pretrained configuration but load your own weights.
In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.
cache_dir: (`optional`) string:
Path to a directory in which a downloaded pre-trained model
configuration should be cached if the standard cache should not be used.
force_download: (`optional`) boolean, default False:
Force to (re-)download the model weights and configuration files and override the cached versions if they exists.
proxies: (`optional`) dict, default None:
A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
The proxies are used on each request.
output_loading_info: (`optional`) boolean:
Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.
kwargs: (`optional`) Remaining dictionary of keyword arguments:
These arguments will be passed to the configuration and the model.
Examples::
model = AutoModelForTokenClassification.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache.
model = AutoModelForTokenClassification.from_pretrained('./test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`
assert model.config.output_attention == True
# Loading from a TF checkpoint file instead of a PyTorch model (slower)
config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
model = AutoModelForTokenClassification.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
if not isinstance(config, PretrainedConfig):
config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
for config_class, model_class in MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items():
if isinstance(config, config_class):
return model_class.from_pretrained(
pretrained_model_name_or_path, *model_args, config=config, **kwargs
)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__,
cls.__name__,
", ".join(
c.__name__ for c in MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.keys()
),
)
)
|
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""Instantiates one of the question answering model classes of the library
from a pre-trained model configuration.
The `from_pretrained()` method takes care of returning the correct model class instance
based on the `model_type` property of the config object, or when it's missing,
falling back to using pattern matching on the `pretrained_model_name_or_path` string.
The model class to instantiate is selected as the first pattern matching
in the `pretrained_model_name_or_path` string (in the following order):
- contains `distilbert`: :class:`~transformers.DistilBertForTokenClassification` (DistilBERT model)
- contains `xlm-roberta`: :class:`~transformers.XLMRobertaForTokenClassification` (XLM-RoBERTa?Para model)
- contains `camembert`: :class:`~transformers.CamembertForTokenClassification` (Camembert model)
- contains `bert`: :class:`~transformers.BertForTokenClassification` (Bert model)
- contains `xlnet`: :class:`~transformers.XLNetForTokenClassification` (XLNet model)
- contains `roberta`: :class:`~transformers.RobertaForTokenClassification` (Roberta model)
The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated)
To train the model, you should first set it back in training mode with `model.train()`
Args:
pretrained_model_name_or_path:
Either:
- a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.
- a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``.
- a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
model_args: (`optional`) Sequence of positional arguments:
All remaning positional arguments will be passed to the underlying model's ``__init__`` method
config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`:
Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when:
- the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or
- the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory.
- the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory.
state_dict: (`optional`) dict:
an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file.
This option can be used if you want to create a model from a pretrained configuration but load your own weights.
In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.
cache_dir: (`optional`) string:
Path to a directory in which a downloaded pre-trained model
configuration should be cached if the standard cache should not be used.
force_download: (`optional`) boolean, default False:
Force to (re-)download the model weights and configuration files and override the cached versions if they exists.
proxies: (`optional`) dict, default None:
A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
The proxies are used on each request.
output_loading_info: (`optional`) boolean:
Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.
kwargs: (`optional`) Remaining dictionary of keyword arguments:
Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded:
- If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done)
- If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function.
Examples::
model = AutoModelForTokenClassification.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache.
model = AutoModelForTokenClassification.from_pretrained('./test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`
model = AutoModelForTokenClassification.from_pretrained('bert-base-uncased', output_attention=True) # Update configuration during loading
assert model.config.output_attention == True
# Loading from a TF checkpoint file instead of a PyTorch model (slower)
config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
model = AutoModelForTokenClassification.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
if not isinstance(config, PretrainedConfig):
config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
for config_class, model_class in MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items():
if isinstance(config, config_class):
return model_class.from_pretrained(
pretrained_model_name_or_path, *model_args, config=config, **kwargs
)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__,
cls.__name__,
", ".join(
c.__name__ for c in MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.keys()
),
)
)
|
https://github.com/huggingface/transformers/issues/2985
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "[...]/transformers/src/transformers/modeling_auto.py", line 384, in from_pretrained
return model_class.from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **kwargs)
File "[...]/transformers/src/transformers/modeling_utils.py", line 463, in from_pretrained
model = cls(config, *model_args, **model_kwargs)
TypeError: __init__() got an unexpected keyword argument 'output_attention'
|
TypeError
|
def start(self, wait=60, *, server_settings={}, **opts):
"""Start the cluster."""
status = self.get_status()
if status == "running":
return
elif status == "not-initialized":
raise ClusterError(
"cluster in {!r} has not been initialized".format(self._data_dir)
)
port = opts.pop("port", None)
if port == "dynamic":
port = find_available_port()
extra_args = ["--{}={}".format(k, v) for k, v in opts.items()]
extra_args.append("--port={}".format(port))
sockdir = server_settings.get("unix_socket_directories")
if sockdir is None:
sockdir = server_settings.get("unix_socket_directory")
if sockdir is None:
sockdir = "/tmp"
ssl_key = server_settings.get("ssl_key_file")
if ssl_key:
# Make sure server certificate key file has correct permissions.
keyfile = os.path.join(self._data_dir, "srvkey.pem")
shutil.copy(ssl_key, keyfile)
os.chmod(keyfile, 0o600)
server_settings = server_settings.copy()
server_settings["ssl_key_file"] = keyfile
if self._pg_version < (9, 3):
sockdir_opt = "unix_socket_directory"
else:
sockdir_opt = "unix_socket_directories"
server_settings[sockdir_opt] = sockdir
for k, v in server_settings.items():
extra_args.extend(["-c", "{}={}".format(k, v)])
if _system == "Windows":
# On Windows we have to use pg_ctl as direct execution
# of postgres daemon under an Administrative account
# is not permitted and there is no easy way to drop
# privileges.
if os.getenv("ASYNCPG_DEBUG_SERVER"):
stdout = sys.stdout
else:
stdout = subprocess.DEVNULL
process = subprocess.run(
[self._pg_ctl, "start", "-D", self._data_dir, "-o", " ".join(extra_args)],
stdout=stdout,
stderr=subprocess.STDOUT,
)
if process.returncode != 0:
if process.stderr:
stderr = ":\n{}".format(process.stderr.decode())
else:
stderr = ""
raise ClusterError(
"pg_ctl start exited with status {:d}{}".format(
process.returncode, stderr
)
)
else:
if os.getenv("ASYNCPG_DEBUG_SERVER"):
stdout = sys.stdout
else:
stdout = subprocess.DEVNULL
self._daemon_process = subprocess.Popen(
[self._postgres, "-D", self._data_dir, *extra_args],
stdout=stdout,
stderr=subprocess.STDOUT,
preexec_fn=ensure_dead_with_parent,
)
self._daemon_pid = self._daemon_process.pid
self._test_connection(timeout=wait)
|
def start(self, wait=60, *, server_settings={}, **opts):
"""Start the cluster."""
status = self.get_status()
if status == "running":
return
elif status == "not-initialized":
raise ClusterError(
"cluster in {!r} has not been initialized".format(self._data_dir)
)
port = opts.pop("port", None)
if port == "dynamic":
port = find_available_port()
extra_args = ["--{}={}".format(k, v) for k, v in opts.items()]
extra_args.append("--port={}".format(port))
sockdir = server_settings.get("unix_socket_directories")
if sockdir is None:
sockdir = server_settings.get("unix_socket_directory")
if sockdir is None:
sockdir = "/tmp"
ssl_key = server_settings.get("ssl_key_file")
if ssl_key:
# Make sure server certificate key file has correct permissions.
keyfile = os.path.join(self._data_dir, "srvkey.pem")
shutil.copy(ssl_key, keyfile)
os.chmod(keyfile, 0o400)
server_settings = server_settings.copy()
server_settings["ssl_key_file"] = keyfile
if self._pg_version < (9, 3):
sockdir_opt = "unix_socket_directory"
else:
sockdir_opt = "unix_socket_directories"
server_settings[sockdir_opt] = sockdir
for k, v in server_settings.items():
extra_args.extend(["-c", "{}={}".format(k, v)])
if _system == "Windows":
# On Windows we have to use pg_ctl as direct execution
# of postgres daemon under an Administrative account
# is not permitted and there is no easy way to drop
# privileges.
process = subprocess.run(
[self._pg_ctl, "start", "-D", self._data_dir, "-o", " ".join(extra_args)],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
stderr = process.stderr
if process.returncode != 0:
raise ClusterError(
"pg_ctl start exited with status {:d}: {}".format(
process.returncode, stderr.decode()
)
)
else:
if os.getenv("ASYNCPG_DEBUG_SERVER"):
stdout = sys.stdout
else:
stdout = subprocess.DEVNULL
self._daemon_process = subprocess.Popen(
[self._postgres, "-D", self._data_dir, *extra_args],
stdout=stdout,
stderr=subprocess.STDOUT,
preexec_fn=ensure_dead_with_parent,
)
self._daemon_pid = self._daemon_process.pid
self._test_connection(timeout=wait)
|
https://github.com/MagicStack/asyncpg/issues/220
|
[2017-11-01 00:09:25,800] (ERROR) asyncio: Task was destroyed but it is pending!
task: <Task pending coro=<Pool.release.<locals>._release_impl() running at /usr/local/lib/python3.6/site-packages/asyncpg/pool.py:465> wait_for=<Future pending cb=[<TaskWakeupMethWrapper object at 0x7f830f109d68>()]> cb=[shield.<locals>._done_callback() at /usr/local/lib/python3.6/asyncio/tasks.py:672]>
[2017-11-01 00:09:25,804] (ERROR) asyncio: Task was destroyed but it is pending!
task: <Task pending coro=<Pool.release.<locals>._release_impl() running at /usr/local/lib/python3.6/site-packages/asyncpg/pool.py:465> wait_for=<Future pending cb=[<TaskWakeupMethWrapper object at 0x7f830f0891f8>()]> cb=[shield.<locals>._done_callback() at /usr/local/lib/python3.6/asyncio/tasks.py:672]>
[2017-11-01 00:09:25,808] (ERROR) asyncio: Fatal write error on socket transport
protocol: <asyncpg.protocol.protocol.Protocol object at 0x7f830f6bb588>
transport: <_SelectorSocketTransport fd=9>
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 192, in release
await self._con.reset()
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 986, in reset
await self.execute(reset_query)
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 238, in execute
return await self._protocol.query(query, timeout)
File "asyncpg/protocol/protocol.pyx", line 296, in query
AttributeError: 'weakref' object has no attribute 'cline_in_traceback'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/asyncio/selector_events.py", line 762, in write
n = self._sock.send(data)
OSError: [Errno 9] Bad file descriptor
Exception ignored in: <coroutine object Pool.release.<locals>._release_impl at 0x7f830f197678>
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 465, in _release_impl
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 203, in release
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 192, in release
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 986, in reset
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 238, in execute
File "asyncpg/protocol/protocol.pyx", line 296, in query
AttributeError: 'weakref' object has no attribute 'cline_in_traceback'
[2017-11-01 00:09:25,813] (ERROR) asyncio: Fatal write error on socket transport
protocol: <asyncpg.protocol.protocol.Protocol object at 0x7f830f6bb6d8>
transport: <_SelectorSocketTransport fd=10>
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 192, in release
await self._con.reset()
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 986, in reset
await self.execute(reset_query)
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 238, in execute
return await self._protocol.query(query, timeout)
File "asyncpg/protocol/protocol.pyx", line 296, in query
AttributeError: 'weakref' object has no attribute 'cline_in_traceback'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/asyncio/selector_events.py", line 762, in write
n = self._sock.send(data)
OSError: [Errno 9] Bad file descriptor
Exception ignored in: <coroutine object Pool.release.<locals>._release_impl at 0x7f830f197990>
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 465, in _release_impl
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 203, in release
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 192, in release
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 986, in reset
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 238, in execute
File "asyncpg/protocol/protocol.pyx", line 296, in query
AttributeError: 'weakref' object has no attribute 'cline_in_traceback'
[2017-11-01 00:09:25,817] (ERROR) asyncio: Task was destroyed but it is pending!
task: <Task pending coro=<DefaultModule.run() running at ./src/models/module.py:52> wait_for=<Future pending cb=[<TaskWakeupMethWrapper object at 0x7f830f1093a8>()]>>
[2017-11-01 00:09:25,821] (ERROR) asyncio: Task was destroyed but it is pending!
task: <Task pending coro=<DefaultModule.run() running at ./src/models/module.py:52> wait_for=<Future pending cb=[<TaskWakeupMethWrapper object at 0x7f830f089198>()]>>
[2017-11-01 00:09:25,825] (ERROR) DatabaseController: Traceback (most recent call last):
File "./src/controllers/database.py", line 102, in fetch
_logger.info("Conflict with recovery, retrying")
GeneratorExit
|
AttributeError
|
def __init__(
self,
protocol,
transport,
loop,
addr: (str, int) or str,
config: connect_utils._ClientConfiguration,
params: connect_utils._ConnectionParameters,
):
self._protocol = protocol
self._transport = transport
self._loop = loop
self._top_xact = None
self._aborted = False
# Incremented very time the connection is released back to a pool.
# Used to catch invalid references to connection-related resources
# post-release (e.g. explicit prepared statements).
self._pool_release_ctr = 0
self._addr = addr
self._config = config
self._params = params
self._stmt_cache = _StatementCache(
loop=loop,
max_size=config.statement_cache_size,
on_remove=self._maybe_gc_stmt,
max_lifetime=config.max_cached_statement_lifetime,
)
self._stmts_to_close = set()
self._listeners = {}
self._log_listeners = set()
self._cancellations = set()
settings = self._protocol.get_settings()
ver_string = settings.server_version
self._server_version = serverversion.split_server_version_string(ver_string)
self._server_caps = _detect_server_capabilities(self._server_version, settings)
self._intro_query = introspection.INTRO_LOOKUP_TYPES
self._reset_query = None
self._proxy = None
# Used to serialize operations that might involve anonymous
# statements. Specifically, we want to make the following
# operation atomic:
# ("prepare an anonymous statement", "use the statement")
#
# Used for `con.fetchval()`, `con.fetch()`, `con.fetchrow()`,
# `con.execute()`, and `con.executemany()`.
self._stmt_exclusive_section = _Atomic()
|
def __init__(
self,
protocol,
transport,
loop,
addr: (str, int) or str,
config: connect_utils._ClientConfiguration,
params: connect_utils._ConnectionParameters,
):
self._protocol = protocol
self._transport = transport
self._loop = loop
self._top_xact = None
self._aborted = False
# Incremented very time the connection is released back to a pool.
# Used to catch invalid references to connection-related resources
# post-release (e.g. explicit prepared statements).
self._pool_release_ctr = 0
self._addr = addr
self._config = config
self._params = params
self._stmt_cache = _StatementCache(
loop=loop,
max_size=config.statement_cache_size,
on_remove=self._maybe_gc_stmt,
max_lifetime=config.max_cached_statement_lifetime,
)
self._stmts_to_close = set()
self._listeners = {}
self._log_listeners = set()
settings = self._protocol.get_settings()
ver_string = settings.server_version
self._server_version = serverversion.split_server_version_string(ver_string)
self._server_caps = _detect_server_capabilities(self._server_version, settings)
self._intro_query = introspection.INTRO_LOOKUP_TYPES
self._reset_query = None
self._proxy = None
# Used to serialize operations that might involve anonymous
# statements. Specifically, we want to make the following
# operation atomic:
# ("prepare an anonymous statement", "use the statement")
#
# Used for `con.fetchval()`, `con.fetch()`, `con.fetchrow()`,
# `con.execute()`, and `con.executemany()`.
self._stmt_exclusive_section = _Atomic()
|
https://github.com/MagicStack/asyncpg/issues/220
|
[2017-11-01 00:09:25,800] (ERROR) asyncio: Task was destroyed but it is pending!
task: <Task pending coro=<Pool.release.<locals>._release_impl() running at /usr/local/lib/python3.6/site-packages/asyncpg/pool.py:465> wait_for=<Future pending cb=[<TaskWakeupMethWrapper object at 0x7f830f109d68>()]> cb=[shield.<locals>._done_callback() at /usr/local/lib/python3.6/asyncio/tasks.py:672]>
[2017-11-01 00:09:25,804] (ERROR) asyncio: Task was destroyed but it is pending!
task: <Task pending coro=<Pool.release.<locals>._release_impl() running at /usr/local/lib/python3.6/site-packages/asyncpg/pool.py:465> wait_for=<Future pending cb=[<TaskWakeupMethWrapper object at 0x7f830f0891f8>()]> cb=[shield.<locals>._done_callback() at /usr/local/lib/python3.6/asyncio/tasks.py:672]>
[2017-11-01 00:09:25,808] (ERROR) asyncio: Fatal write error on socket transport
protocol: <asyncpg.protocol.protocol.Protocol object at 0x7f830f6bb588>
transport: <_SelectorSocketTransport fd=9>
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 192, in release
await self._con.reset()
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 986, in reset
await self.execute(reset_query)
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 238, in execute
return await self._protocol.query(query, timeout)
File "asyncpg/protocol/protocol.pyx", line 296, in query
AttributeError: 'weakref' object has no attribute 'cline_in_traceback'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/asyncio/selector_events.py", line 762, in write
n = self._sock.send(data)
OSError: [Errno 9] Bad file descriptor
Exception ignored in: <coroutine object Pool.release.<locals>._release_impl at 0x7f830f197678>
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 465, in _release_impl
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 203, in release
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 192, in release
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 986, in reset
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 238, in execute
File "asyncpg/protocol/protocol.pyx", line 296, in query
AttributeError: 'weakref' object has no attribute 'cline_in_traceback'
[2017-11-01 00:09:25,813] (ERROR) asyncio: Fatal write error on socket transport
protocol: <asyncpg.protocol.protocol.Protocol object at 0x7f830f6bb6d8>
transport: <_SelectorSocketTransport fd=10>
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 192, in release
await self._con.reset()
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 986, in reset
await self.execute(reset_query)
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 238, in execute
return await self._protocol.query(query, timeout)
File "asyncpg/protocol/protocol.pyx", line 296, in query
AttributeError: 'weakref' object has no attribute 'cline_in_traceback'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/asyncio/selector_events.py", line 762, in write
n = self._sock.send(data)
OSError: [Errno 9] Bad file descriptor
Exception ignored in: <coroutine object Pool.release.<locals>._release_impl at 0x7f830f197990>
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 465, in _release_impl
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 203, in release
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 192, in release
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 986, in reset
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 238, in execute
File "asyncpg/protocol/protocol.pyx", line 296, in query
AttributeError: 'weakref' object has no attribute 'cline_in_traceback'
[2017-11-01 00:09:25,817] (ERROR) asyncio: Task was destroyed but it is pending!
task: <Task pending coro=<DefaultModule.run() running at ./src/models/module.py:52> wait_for=<Future pending cb=[<TaskWakeupMethWrapper object at 0x7f830f1093a8>()]>>
[2017-11-01 00:09:25,821] (ERROR) asyncio: Task was destroyed but it is pending!
task: <Task pending coro=<DefaultModule.run() running at ./src/models/module.py:52> wait_for=<Future pending cb=[<TaskWakeupMethWrapper object at 0x7f830f089198>()]>>
[2017-11-01 00:09:25,825] (ERROR) DatabaseController: Traceback (most recent call last):
File "./src/controllers/database.py", line 102, in fetch
_logger.info("Conflict with recovery, retrying")
GeneratorExit
|
AttributeError
|
async def close(self, *, timeout=None):
"""Close the connection gracefully.
:param float timeout:
Optional timeout value in seconds.
.. versionchanged:: 0.14.0
Added the *timeout* parameter.
"""
if self.is_closed():
return
self._mark_stmts_as_closed()
self._listeners.clear()
self._log_listeners.clear()
self._aborted = True
try:
await self._protocol.close(timeout)
except Exception:
# If we fail to close gracefully, abort the connection.
self._aborted = True
self._protocol.abort()
raise
finally:
self._clean_tasks()
|
async def close(self):
"""Close the connection gracefully."""
if self.is_closed():
return
self._mark_stmts_as_closed()
self._listeners.clear()
self._log_listeners.clear()
self._aborted = True
await self._protocol.close()
|
https://github.com/MagicStack/asyncpg/issues/220
|
[2017-11-01 00:09:25,800] (ERROR) asyncio: Task was destroyed but it is pending!
task: <Task pending coro=<Pool.release.<locals>._release_impl() running at /usr/local/lib/python3.6/site-packages/asyncpg/pool.py:465> wait_for=<Future pending cb=[<TaskWakeupMethWrapper object at 0x7f830f109d68>()]> cb=[shield.<locals>._done_callback() at /usr/local/lib/python3.6/asyncio/tasks.py:672]>
[2017-11-01 00:09:25,804] (ERROR) asyncio: Task was destroyed but it is pending!
task: <Task pending coro=<Pool.release.<locals>._release_impl() running at /usr/local/lib/python3.6/site-packages/asyncpg/pool.py:465> wait_for=<Future pending cb=[<TaskWakeupMethWrapper object at 0x7f830f0891f8>()]> cb=[shield.<locals>._done_callback() at /usr/local/lib/python3.6/asyncio/tasks.py:672]>
[2017-11-01 00:09:25,808] (ERROR) asyncio: Fatal write error on socket transport
protocol: <asyncpg.protocol.protocol.Protocol object at 0x7f830f6bb588>
transport: <_SelectorSocketTransport fd=9>
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 192, in release
await self._con.reset()
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 986, in reset
await self.execute(reset_query)
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 238, in execute
return await self._protocol.query(query, timeout)
File "asyncpg/protocol/protocol.pyx", line 296, in query
AttributeError: 'weakref' object has no attribute 'cline_in_traceback'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/asyncio/selector_events.py", line 762, in write
n = self._sock.send(data)
OSError: [Errno 9] Bad file descriptor
Exception ignored in: <coroutine object Pool.release.<locals>._release_impl at 0x7f830f197678>
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 465, in _release_impl
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 203, in release
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 192, in release
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 986, in reset
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 238, in execute
File "asyncpg/protocol/protocol.pyx", line 296, in query
AttributeError: 'weakref' object has no attribute 'cline_in_traceback'
[2017-11-01 00:09:25,813] (ERROR) asyncio: Fatal write error on socket transport
protocol: <asyncpg.protocol.protocol.Protocol object at 0x7f830f6bb6d8>
transport: <_SelectorSocketTransport fd=10>
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 192, in release
await self._con.reset()
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 986, in reset
await self.execute(reset_query)
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 238, in execute
return await self._protocol.query(query, timeout)
File "asyncpg/protocol/protocol.pyx", line 296, in query
AttributeError: 'weakref' object has no attribute 'cline_in_traceback'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/asyncio/selector_events.py", line 762, in write
n = self._sock.send(data)
OSError: [Errno 9] Bad file descriptor
Exception ignored in: <coroutine object Pool.release.<locals>._release_impl at 0x7f830f197990>
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 465, in _release_impl
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 203, in release
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 192, in release
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 986, in reset
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 238, in execute
File "asyncpg/protocol/protocol.pyx", line 296, in query
AttributeError: 'weakref' object has no attribute 'cline_in_traceback'
[2017-11-01 00:09:25,817] (ERROR) asyncio: Task was destroyed but it is pending!
task: <Task pending coro=<DefaultModule.run() running at ./src/models/module.py:52> wait_for=<Future pending cb=[<TaskWakeupMethWrapper object at 0x7f830f1093a8>()]>>
[2017-11-01 00:09:25,821] (ERROR) asyncio: Task was destroyed but it is pending!
task: <Task pending coro=<DefaultModule.run() running at ./src/models/module.py:52> wait_for=<Future pending cb=[<TaskWakeupMethWrapper object at 0x7f830f089198>()]>>
[2017-11-01 00:09:25,825] (ERROR) DatabaseController: Traceback (most recent call last):
File "./src/controllers/database.py", line 102, in fetch
_logger.info("Conflict with recovery, retrying")
GeneratorExit
|
AttributeError
|
def terminate(self):
"""Terminate the connection without waiting for pending data."""
self._mark_stmts_as_closed()
self._listeners.clear()
self._log_listeners.clear()
self._aborted = True
self._protocol.abort()
self._clean_tasks()
|
def terminate(self):
"""Terminate the connection without waiting for pending data."""
self._mark_stmts_as_closed()
self._listeners.clear()
self._log_listeners.clear()
self._aborted = True
self._protocol.abort()
|
https://github.com/MagicStack/asyncpg/issues/220
|
[2017-11-01 00:09:25,800] (ERROR) asyncio: Task was destroyed but it is pending!
task: <Task pending coro=<Pool.release.<locals>._release_impl() running at /usr/local/lib/python3.6/site-packages/asyncpg/pool.py:465> wait_for=<Future pending cb=[<TaskWakeupMethWrapper object at 0x7f830f109d68>()]> cb=[shield.<locals>._done_callback() at /usr/local/lib/python3.6/asyncio/tasks.py:672]>
[2017-11-01 00:09:25,804] (ERROR) asyncio: Task was destroyed but it is pending!
task: <Task pending coro=<Pool.release.<locals>._release_impl() running at /usr/local/lib/python3.6/site-packages/asyncpg/pool.py:465> wait_for=<Future pending cb=[<TaskWakeupMethWrapper object at 0x7f830f0891f8>()]> cb=[shield.<locals>._done_callback() at /usr/local/lib/python3.6/asyncio/tasks.py:672]>
[2017-11-01 00:09:25,808] (ERROR) asyncio: Fatal write error on socket transport
protocol: <asyncpg.protocol.protocol.Protocol object at 0x7f830f6bb588>
transport: <_SelectorSocketTransport fd=9>
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 192, in release
await self._con.reset()
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 986, in reset
await self.execute(reset_query)
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 238, in execute
return await self._protocol.query(query, timeout)
File "asyncpg/protocol/protocol.pyx", line 296, in query
AttributeError: 'weakref' object has no attribute 'cline_in_traceback'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/asyncio/selector_events.py", line 762, in write
n = self._sock.send(data)
OSError: [Errno 9] Bad file descriptor
Exception ignored in: <coroutine object Pool.release.<locals>._release_impl at 0x7f830f197678>
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 465, in _release_impl
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 203, in release
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 192, in release
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 986, in reset
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 238, in execute
File "asyncpg/protocol/protocol.pyx", line 296, in query
AttributeError: 'weakref' object has no attribute 'cline_in_traceback'
[2017-11-01 00:09:25,813] (ERROR) asyncio: Fatal write error on socket transport
protocol: <asyncpg.protocol.protocol.Protocol object at 0x7f830f6bb6d8>
transport: <_SelectorSocketTransport fd=10>
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 192, in release
await self._con.reset()
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 986, in reset
await self.execute(reset_query)
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 238, in execute
return await self._protocol.query(query, timeout)
File "asyncpg/protocol/protocol.pyx", line 296, in query
AttributeError: 'weakref' object has no attribute 'cline_in_traceback'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/asyncio/selector_events.py", line 762, in write
n = self._sock.send(data)
OSError: [Errno 9] Bad file descriptor
Exception ignored in: <coroutine object Pool.release.<locals>._release_impl at 0x7f830f197990>
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 465, in _release_impl
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 203, in release
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 192, in release
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 986, in reset
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 238, in execute
File "asyncpg/protocol/protocol.pyx", line 296, in query
AttributeError: 'weakref' object has no attribute 'cline_in_traceback'
[2017-11-01 00:09:25,817] (ERROR) asyncio: Task was destroyed but it is pending!
task: <Task pending coro=<DefaultModule.run() running at ./src/models/module.py:52> wait_for=<Future pending cb=[<TaskWakeupMethWrapper object at 0x7f830f1093a8>()]>>
[2017-11-01 00:09:25,821] (ERROR) asyncio: Task was destroyed but it is pending!
task: <Task pending coro=<DefaultModule.run() running at ./src/models/module.py:52> wait_for=<Future pending cb=[<TaskWakeupMethWrapper object at 0x7f830f089198>()]>>
[2017-11-01 00:09:25,825] (ERROR) DatabaseController: Traceback (most recent call last):
File "./src/controllers/database.py", line 102, in fetch
_logger.info("Conflict with recovery, retrying")
GeneratorExit
|
AttributeError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.